From 638a9e433ecd61e64761352dbec1fa4f5874c941 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 7 Aug 2024 15:18:06 +0200 Subject: Merging upstream version 6.10.3. Signed-off-by: Daniel Baumann --- drivers/Makefile | 5 - drivers/accel/ivpu/ivpu_debugfs.c | 2 + drivers/accel/ivpu/ivpu_mmu_context.c | 1 + drivers/accel/qaic/Makefile | 5 +- drivers/accel/qaic/qaic.h | 9 + drivers/accel/qaic/qaic_data.c | 9 + drivers/accel/qaic/qaic_debugfs.c | 338 + drivers/accel/qaic/qaic_debugfs.h | 20 + drivers/accel/qaic/qaic_drv.c | 26 +- drivers/accel/qaic/sahara.c | 449 + drivers/accel/qaic/sahara.h | 10 + drivers/accessibility/speakup/devsynth.c | 59 +- drivers/accessibility/speakup/speakup.h | 2 + drivers/accessibility/speakup/synth.c | 92 +- drivers/acpi/Kconfig | 3 + drivers/acpi/Makefile | 8 +- drivers/acpi/acpi_cmos_rtc.c | 98 - drivers/acpi/acpi_ipmi.c | 23 +- drivers/acpi/acpi_lpss.c | 1357 --- drivers/acpi/acpica/aclocal.h | 2 +- drivers/acpi/acpica/acobject.h | 107 +- drivers/acpi/acpica/evgpeinit.c | 1 + drivers/acpi/acpica/tbfadt.c | 30 +- drivers/acpi/acpica/tbutils.c | 7 +- drivers/acpi/acpica/utdebug.c | 5 + drivers/acpi/apei/einj-core.c | 12 +- drivers/acpi/apei/ghes.c | 84 + drivers/acpi/arm64/amba.c | 8 - drivers/acpi/arm64/dma.c | 17 +- drivers/acpi/arm64/iort.c | 20 +- drivers/acpi/bgrt.c | 9 +- drivers/acpi/blacklist.c | 140 - drivers/acpi/bus.c | 20 +- drivers/acpi/cppc_acpi.c | 4 +- drivers/acpi/dock.c | 48 +- drivers/acpi/dptf/dptf_pch_fivr.c | 1 + drivers/acpi/dptf/dptf_power.c | 2 + drivers/acpi/dptf/int340x_thermal.c | 6 + drivers/acpi/fan.h | 1 + drivers/acpi/internal.h | 3 +- drivers/acpi/mipi-disco-img.c | 14 +- drivers/acpi/nhlt.c | 289 + drivers/acpi/numa/srat.c | 77 +- drivers/acpi/platform_profile.c | 39 + drivers/acpi/property.c | 11 +- drivers/acpi/scan.c | 35 +- drivers/acpi/x86/Makefile | 8 + drivers/acpi/x86/blacklist.c | 140 + drivers/acpi/x86/cmos_rtc.c | 98 + drivers/acpi/x86/lpss.c | 1355 +++ drivers/acpi/x86/utils.c | 9 + drivers/amba/bus.c | 11 +- drivers/android/binder.c | 4 +- drivers/ata/Kconfig | 28 +- drivers/ata/ahci.h | 2 +- drivers/ata/libata-core.c | 114 +- drivers/ata/libata-sata.c | 171 +- drivers/ata/libata-scsi.c | 195 +- drivers/ata/libata-sff.c | 4 + drivers/ata/libata.h | 11 +- drivers/ata/pata_cs5520.c | 6 - drivers/ata/pata_macio.c | 20 +- drivers/ata/sata_mv.c | 2 +- drivers/ata/sata_nv.c | 24 +- drivers/ata/sata_sil24.c | 2 +- drivers/atm/fore200e.c | 3 - drivers/atm/fore200e.h | 1 - drivers/auxdisplay/ht16k33.c | 16 +- drivers/base/arch_topology.c | 26 +- drivers/base/core.c | 62 +- drivers/base/cpu.c | 2 +- drivers/base/devcoredump.c | 23 + drivers/base/devres.c | 11 +- drivers/base/memory.c | 2 +- drivers/base/power/main.c | 6 +- drivers/base/power/wakeup.c | 11 +- drivers/base/property.c | 16 +- drivers/base/regmap/internal.h | 14 +- drivers/base/regmap/regcache-maple.c | 2 +- drivers/base/regmap/regmap-i3c.c | 2 +- drivers/base/regmap/regmap-kunit.c | 999 +- drivers/base/regmap/regmap-mdio.c | 2 +- drivers/base/regmap/regmap-ram.c | 5 +- drivers/base/regmap/regmap-raw-ram.c | 5 +- drivers/base/regmap/regmap-sdw-mbq.c | 2 +- drivers/base/regmap/regmap-sdw.c | 2 +- drivers/base/regmap/regmap-spi.c | 1 + drivers/base/regmap/trace.h | 18 +- drivers/base/trace.h | 2 +- drivers/bcma/host_soc.c | 6 +- drivers/block/brd.c | 66 +- drivers/block/loop.c | 4 +- drivers/block/nbd.c | 58 +- drivers/block/null_blk/main.c | 42 +- drivers/block/null_blk/null_blk.h | 2 + drivers/block/null_blk/trace.h | 7 +- drivers/block/null_blk/zoned.c | 358 +- drivers/block/pktcdvd.c | 7 +- drivers/block/rbd.c | 35 +- drivers/block/rnbd/rnbd-srv-trace.h | 12 +- drivers/block/ublk_drv.c | 11 +- drivers/block/virtio_blk.c | 3 +- drivers/block/xen-blkfront.c | 16 +- drivers/block/zram/zram_drv.c | 60 +- drivers/block/zram/zram_drv.h | 2 +- drivers/bluetooth/Kconfig | 11 + drivers/bluetooth/Makefile | 1 + drivers/bluetooth/btintel.c | 207 +- drivers/bluetooth/btintel.h | 51 +- drivers/bluetooth/btintel_pcie.c | 1363 +++ drivers/bluetooth/btintel_pcie.h | 430 + drivers/bluetooth/btmrvl_sdio.c | 1 - drivers/bluetooth/btmtksdio.c | 1 - drivers/bluetooth/btnxpuart.c | 52 +- drivers/bluetooth/btqca.c | 43 +- drivers/bluetooth/btqca.h | 60 +- drivers/bluetooth/btqcomsmd.c | 6 +- drivers/bluetooth/btrtl.c | 7 + drivers/bluetooth/btusb.c | 53 +- drivers/bluetooth/hci_bcm.c | 8 +- drivers/bluetooth/hci_bcm4377.c | 2 +- drivers/bluetooth/hci_intel.c | 25 +- drivers/bluetooth/virtio_bt.c | 1 - drivers/bus/Kconfig | 10 + drivers/bus/Makefile | 1 + drivers/bus/brcmstb_gisb.c | 1 + drivers/bus/mhi/ep/main.c | 14 +- drivers/bus/mhi/host/init.c | 41 +- drivers/bus/mhi/host/internal.h | 4 +- drivers/bus/mhi/host/main.c | 16 + drivers/bus/mhi/host/pci_generic.c | 45 + drivers/bus/mhi/host/pm.c | 42 +- drivers/bus/mhi/host/trace.h | 12 +- drivers/bus/stm32_etzpc.c | 141 + drivers/bus/stm32_firewall.c | 294 + drivers/bus/stm32_firewall.h | 83 + drivers/bus/stm32_rifsc.c | 252 + drivers/bus/ti-sysc.c | 165 +- drivers/cdx/controller/cdx_controller.c | 6 +- drivers/char/agp/alpha-agp.c | 2 +- drivers/char/hw_random/amd-rng.c | 4 +- drivers/char/hw_random/core.c | 53 +- drivers/char/hw_random/mxc-rnga.c | 9 +- drivers/char/hw_random/nomadik-rng.c | 1 - drivers/char/hw_random/virtio-rng.c | 1 - drivers/char/ipmi/Makefile | 11 +- drivers/char/ipmi/bt-bmc.c | 5 +- drivers/char/ipmi/ipmi_msghandler.c | 29 +- drivers/char/ipmi/ipmi_powernv.c | 6 +- drivers/char/ipmi/ipmi_si_intf.c | 3 +- drivers/char/ipmi/ipmi_si_pci.c | 3 + drivers/char/ipmi/ipmi_si_platform.c | 6 +- drivers/char/ipmi/ipmi_ssif.c | 5 +- drivers/char/ipmi/kcs_bmc_aspeed.c | 6 +- drivers/char/ipmi/kcs_bmc_npcm7xx.c | 6 +- drivers/char/ipmi/ssif_bmc.c | 6 +- drivers/char/mem.c | 8 +- drivers/char/powernv-op-panel.c | 5 +- drivers/char/sonypi.c | 6 +- drivers/char/tpm/Kconfig | 17 +- drivers/char/tpm/Makefile | 2 + drivers/char/tpm/eventlog/acpi.c | 1 - drivers/char/tpm/eventlog/common.c | 2 + drivers/char/tpm/tpm-buf.c | 226 + drivers/char/tpm/tpm-chip.c | 6 + drivers/char/tpm/tpm-interface.c | 26 +- drivers/char/tpm/tpm-sysfs.c | 18 + drivers/char/tpm/tpm.h | 16 +- drivers/char/tpm/tpm2-cmd.c | 61 +- drivers/char/tpm/tpm2-sessions.c | 1365 +++ drivers/char/tpm/tpm2-space.c | 11 +- drivers/char/tpm/tpm_infineon.c | 14 +- drivers/char/tpm/tpm_tis_core.c | 19 +- drivers/char/tpm/tpm_tis_core.h | 2 +- drivers/char/tpm/tpm_tis_spi_main.c | 1 + drivers/char/virtio_console.c | 2 - drivers/clk/Kconfig | 5 +- drivers/clk/Makefile | 1 + drivers/clk/clk-en7523.c | 200 +- drivers/clk/clk-gemini.c | 2 - drivers/clk/clk-highbank.c | 1 - drivers/clk/clk-loongson2.c | 548 +- drivers/clk/clk-scmi.c | 249 +- drivers/clk/clkdev.c | 42 +- drivers/clk/davinci/da8xx-cfgchip.c | 4 +- drivers/clk/imx/Kconfig | 7 + drivers/clk/imx/Makefile | 1 + drivers/clk/imx/clk-imx8mp-audiomix.c | 155 +- drivers/clk/imx/clk-imx95-blk-ctl.c | 438 + drivers/clk/meson/Kconfig | 5 + drivers/clk/meson/Makefile | 1 + drivers/clk/meson/a1-peripherals.c | 1 + drivers/clk/meson/a1-pll.c | 1 + drivers/clk/meson/axg-aoclk.c | 2 +- drivers/clk/meson/axg-audio.c | 2 +- drivers/clk/meson/axg.c | 2 +- drivers/clk/meson/clk-cpu-dyndiv.c | 2 +- drivers/clk/meson/clk-dualdiv.c | 2 +- drivers/clk/meson/clk-mpll.c | 2 +- drivers/clk/meson/clk-phase.c | 2 +- drivers/clk/meson/clk-pll.c | 6 +- drivers/clk/meson/clk-regmap.c | 2 +- drivers/clk/meson/g12a-aoclk.c | 2 +- drivers/clk/meson/g12a.c | 78 +- drivers/clk/meson/gxbb-aoclk.c | 2 +- drivers/clk/meson/gxbb.c | 2 +- drivers/clk/meson/meson-aoclk.c | 2 +- drivers/clk/meson/meson-eeclk.c | 2 +- drivers/clk/meson/s4-peripherals.c | 4 +- drivers/clk/meson/s4-pll.c | 7 + drivers/clk/meson/sclk-div.c | 2 +- drivers/clk/meson/vclk.c | 141 + drivers/clk/meson/vclk.h | 51 + drivers/clk/meson/vid-pll-div.c | 2 +- drivers/clk/microchip/clk-mpfs.c | 92 +- drivers/clk/nxp/clk-lpc18xx-cgu.c | 1 - drivers/clk/qcom/apss-ipq-pll.c | 56 +- drivers/clk/qcom/camcc-sc7280.c | 5 + drivers/clk/qcom/clk-alpha-pll.c | 21 +- drivers/clk/qcom/clk-alpha-pll.h | 5 +- drivers/clk/qcom/clk-cbf-8996.c | 13 +- drivers/clk/qcom/clk-rcg.h | 24 +- drivers/clk/qcom/clk-rcg2.c | 198 + drivers/clk/qcom/clk-rpm.c | 1 - drivers/clk/qcom/common.c | 18 + drivers/clk/qcom/common.h | 2 + drivers/clk/qcom/gcc-ipq8074.c | 120 +- drivers/clk/qcom/gcc-msm8917.c | 1 + drivers/clk/qcom/gcc-msm8953.c | 1 + drivers/clk/qcom/gcc-sa8775p.c | 40 + drivers/clk/qcom/gcc-sc7280.c | 3 + drivers/clk/qcom/gcc-sm8150.c | 61 - drivers/clk/qcom/gcc-x1e80100.c | 46 +- drivers/clk/qcom/gpucc-sa8775p.c | 41 +- drivers/clk/qcom/gpucc-sm8350.c | 5 +- drivers/clk/qcom/hfpll.c | 6 +- drivers/clk/qcom/kpss-xcc.c | 4 +- drivers/clk/renesas/clk-r8a73a4.c | 2 - drivers/clk/renesas/clk-r8a7740.c | 27 +- drivers/clk/renesas/clk-sh73a0.c | 2 - drivers/clk/renesas/r8a779h0-cpg-mssr.c | 21 + drivers/clk/renesas/r9a07g043-cpg.c | 4 +- drivers/clk/renesas/r9a07g044-cpg.c | 2 +- drivers/clk/renesas/r9a08g045-cpg.c | 41 + drivers/clk/renesas/rzg2l-cpg.c | 199 +- drivers/clk/renesas/rzg2l-cpg.h | 67 + drivers/clk/rockchip/clk-mmc-phase.c | 1 - drivers/clk/rockchip/clk-rk3568.c | 5 + drivers/clk/rockchip/rst-rk3588.c | 1 + drivers/clk/samsung/clk-exynos-arm64.c | 56 +- drivers/clk/samsung/clk-exynos4.c | 13 +- drivers/clk/samsung/clk-exynos850.c | 440 +- drivers/clk/samsung/clk-gs101.c | 967 +- drivers/clk/samsung/clk.c | 11 +- drivers/clk/samsung/clk.h | 4 + drivers/clk/sophgo/Kconfig | 11 + drivers/clk/sophgo/Makefile | 7 + drivers/clk/sophgo/clk-cv1800.c | 1537 +++ drivers/clk/sophgo/clk-cv1800.h | 123 + drivers/clk/sophgo/clk-cv18xx-common.c | 66 + drivers/clk/sophgo/clk-cv18xx-common.h | 81 + drivers/clk/sophgo/clk-cv18xx-ip.c | 887 ++ drivers/clk/sophgo/clk-cv18xx-ip.h | 261 + drivers/clk/sophgo/clk-cv18xx-pll.c | 419 + drivers/clk/sophgo/clk-cv18xx-pll.h | 118 + drivers/clk/stm32/Kconfig | 7 + drivers/clk/stm32/Makefile | 1 + drivers/clk/stm32/clk-stm32-core.c | 11 +- drivers/clk/stm32/clk-stm32mp13.c | 72 +- drivers/clk/stm32/clk-stm32mp25.c | 1875 +++ drivers/clk/stm32/reset-stm32.c | 59 +- drivers/clk/stm32/reset-stm32.h | 7 + drivers/clk/stm32/stm32mp25_rcc.h | 712 ++ drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c | 1 + drivers/clk/sunxi-ng/ccu-sun20i-d1.c | 1 + drivers/clk/sunxi-ng/ccu-sun4i-a10.c | 1 + drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c | 1 + drivers/clk/sunxi-ng/ccu-sun50i-a100.c | 1 + drivers/clk/sunxi-ng/ccu-sun50i-a64.c | 13 +- drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c | 1 + drivers/clk/sunxi-ng/ccu-sun50i-h6.c | 1 + drivers/clk/sunxi-ng/ccu-sun50i-h616.c | 1 + drivers/clk/sunxi-ng/ccu-sun6i-a31.c | 1 + drivers/clk/sunxi-ng/ccu-sun6i-rtc.c | 1 + drivers/clk/sunxi-ng/ccu-sun8i-a23.c | 1 + drivers/clk/sunxi-ng/ccu-sun8i-a33.c | 1 + drivers/clk/sunxi-ng/ccu-sun8i-a83t.c | 1 + drivers/clk/sunxi-ng/ccu-sun8i-de2.c | 1 + drivers/clk/sunxi-ng/ccu-sun8i-h3.c | 1 + drivers/clk/sunxi-ng/ccu-sun8i-r.c | 1 + drivers/clk/sunxi-ng/ccu-sun8i-r40.c | 1 + drivers/clk/sunxi-ng/ccu-sun8i-v3s.c | 1 + drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c | 1 + drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c | 1 + drivers/clk/sunxi-ng/ccu-sun9i-a80.c | 1 + drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c | 1 + drivers/clk/sunxi-ng/ccu_nkm.c | 21 + drivers/clk/sunxi-ng/ccu_nkm.h | 2 + drivers/clk/ti/dpll.c | 10 +- drivers/clocksource/arm_arch_timer.c | 2 +- drivers/clocksource/renesas-ostm.c | 3 +- drivers/clocksource/timer-clint.c | 2 +- drivers/clocksource/timer-ti-dm.c | 1 - drivers/comedi/drivers/cb_pcidas64.c | 5 - drivers/counter/counter-core.c | 4 +- drivers/counter/stm32-timer-cnt.c | 461 +- drivers/counter/ti-ecap-capture.c | 8 +- drivers/counter/ti-eqep.c | 6 +- drivers/cpufreq/amd-pstate-ut.c | 12 +- drivers/cpufreq/amd-pstate.c | 138 +- drivers/cpufreq/amd-pstate.h | 14 +- drivers/cpufreq/cpufreq-dt-platdev.c | 10 +- drivers/cpufreq/cpufreq-dt.c | 21 +- drivers/cpufreq/cpufreq.c | 36 + drivers/cpufreq/freq_table.c | 12 +- drivers/cpufreq/intel_pstate.c | 183 +- drivers/cpufreq/mediatek-cpufreq.c | 10 + drivers/cpufreq/pmac32-cpufreq.c | 8 +- drivers/cpufreq/qcom-cpufreq-hw.c | 4 +- drivers/cpufreq/qcom-cpufreq-nvmem.c | 13 +- drivers/cpufreq/sun50i-cpufreq-nvmem.c | 209 +- drivers/cpufreq/tegra124-cpufreq.c | 19 +- drivers/cpufreq/ti-cpufreq.c | 6 +- drivers/cpuidle/coupled.c | 13 +- drivers/cpuidle/cpuidle-kirkwood.c | 5 +- drivers/cpuidle/cpuidle-psci-domain.c | 3 +- drivers/cpuidle/cpuidle-psci.c | 5 +- drivers/cpuidle/cpuidle-psci.h | 20 - drivers/cpuidle/governors/ladder.c | 1 + drivers/crypto/Kconfig | 26 +- drivers/crypto/Makefile | 1 + drivers/crypto/atmel-i2c.c | 30 +- drivers/crypto/atmel-i2c.h | 8 +- drivers/crypto/atmel-sha204a.c | 68 + drivers/crypto/caam/ctrl.c | 19 +- drivers/crypto/ccp/sev-dev.c | 8 +- drivers/crypto/hisilicon/debugfs.c | 44 +- drivers/crypto/hisilicon/hpre/hpre_main.c | 23 +- drivers/crypto/hisilicon/qm.c | 3 + drivers/crypto/hisilicon/sec2/sec_main.c | 30 +- drivers/crypto/hisilicon/sgl.c | 5 +- drivers/crypto/hisilicon/zip/zip_main.c | 24 +- drivers/crypto/intel/iaa/iaa_crypto.h | 16 +- drivers/crypto/intel/iaa/iaa_crypto_main.c | 23 +- drivers/crypto/intel/iaa/iaa_crypto_stats.c | 183 +- drivers/crypto/intel/iaa/iaa_crypto_stats.h | 8 - drivers/crypto/intel/qat/qat_420xx/Makefile | 2 +- .../crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c | 3 + drivers/crypto/intel/qat/qat_4xxx/Makefile | 2 +- .../crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 5 + drivers/crypto/intel/qat/qat_c3xxx/Makefile | 2 +- .../crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c | 1 + drivers/crypto/intel/qat/qat_c3xxxvf/Makefile | 2 +- .../intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c | 1 + drivers/crypto/intel/qat/qat_c62x/Makefile | 2 +- .../crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c | 1 + drivers/crypto/intel/qat/qat_c62xvf/Makefile | 2 +- .../intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c | 1 + drivers/crypto/intel/qat/qat_common/Makefile | 7 +- .../intel/qat/qat_common/adf_accel_devices.h | 88 + drivers/crypto/intel/qat/qat_common/adf_cfg.c | 6 +- .../crypto/intel/qat/qat_common/adf_common_drv.h | 10 + .../intel/qat/qat_common/adf_gen2_hw_csr_data.c | 101 + .../intel/qat/qat_common/adf_gen2_hw_csr_data.h | 86 + .../crypto/intel/qat/qat_common/adf_gen2_hw_data.c | 97 - .../crypto/intel/qat/qat_common/adf_gen2_hw_data.h | 76 - .../intel/qat/qat_common/adf_gen4_hw_csr_data.c | 231 + .../intel/qat/qat_common/adf_gen4_hw_csr_data.h | 188 + .../crypto/intel/qat/qat_common/adf_gen4_hw_data.c | 380 +- .../crypto/intel/qat/qat_common/adf_gen4_hw_data.h | 127 +- .../crypto/intel/qat/qat_common/adf_gen4_pfvf.c | 8 +- .../crypto/intel/qat/qat_common/adf_gen4_vf_mig.c | 1010 ++ .../crypto/intel/qat/qat_common/adf_gen4_vf_mig.h | 10 + .../crypto/intel/qat/qat_common/adf_mstate_mgr.c | 318 + .../crypto/intel/qat/qat_common/adf_mstate_mgr.h | 89 + .../intel/qat/qat_common/adf_pfvf_pf_proto.c | 8 +- .../crypto/intel/qat/qat_common/adf_pfvf_utils.h | 11 + drivers/crypto/intel/qat/qat_common/adf_rl.c | 10 +- drivers/crypto/intel/qat/qat_common/adf_rl.h | 2 + drivers/crypto/intel/qat/qat_common/adf_sriov.c | 7 +- .../crypto/intel/qat/qat_common/adf_transport.c | 4 +- .../crypto/intel/qat/qat_common/qat_asym_algs.c | 66 +- drivers/crypto/intel/qat/qat_common/qat_bl.c | 6 +- drivers/crypto/intel/qat/qat_common/qat_bl.h | 11 +- drivers/crypto/intel/qat/qat_common/qat_mig_dev.c | 130 + drivers/crypto/intel/qat/qat_dh895xcc/Makefile | 2 +- .../intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 1 + drivers/crypto/intel/qat/qat_dh895xccvf/Makefile | 2 +- .../qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c | 1 + .../crypto/marvell/octeontx2/otx2_cpt_devlink.c | 9 +- drivers/crypto/mxs-dcp.c | 107 +- drivers/crypto/nx/nx-842.c | 6 +- drivers/crypto/nx/nx-842.h | 10 +- drivers/crypto/sahara.c | 16 +- drivers/crypto/starfive/Kconfig | 4 + drivers/crypto/starfive/jh7110-aes.c | 597 +- drivers/crypto/starfive/jh7110-cryp.c | 43 - drivers/crypto/starfive/jh7110-cryp.h | 10 +- drivers/crypto/starfive/jh7110-hash.c | 275 +- drivers/crypto/starfive/jh7110-rsa.c | 13 +- drivers/crypto/stm32/stm32-hash.c | 570 +- drivers/crypto/tegra/Makefile | 9 + drivers/crypto/tegra/tegra-se-aes.c | 1933 +++ drivers/crypto/tegra/tegra-se-hash.c | 1060 ++ drivers/crypto/tegra/tegra-se-key.c | 156 + drivers/crypto/tegra/tegra-se-main.c | 436 + drivers/crypto/tegra/tegra-se.h | 560 + drivers/crypto/virtio/virtio_crypto_core.c | 1 - drivers/cxl/Kconfig | 1 + drivers/cxl/acpi.c | 93 +- drivers/cxl/core/core.h | 7 + drivers/cxl/core/hdm.c | 13 +- drivers/cxl/core/mbox.c | 48 +- drivers/cxl/core/pci.c | 6 +- drivers/cxl/core/region.c | 91 + drivers/cxl/core/regs.c | 2 +- drivers/cxl/core/trace.c | 91 - drivers/cxl/core/trace.h | 70 +- drivers/cxl/cxl.h | 7 + drivers/cxl/cxlmem.h | 14 +- drivers/cxl/cxlpci.h | 1 - drivers/cxl/pci.c | 73 +- drivers/cxl/pmem.c | 2 - drivers/dax/bus.c | 3 +- drivers/dax/device.c | 8 +- drivers/dax/kmem.c | 30 +- drivers/devfreq/event/exynos-nocp.c | 6 +- drivers/devfreq/event/exynos-ppmu.c | 6 +- drivers/devfreq/exynos-bus.c | 9 +- drivers/devfreq/mtk-cci-devfreq.c | 6 +- drivers/devfreq/rk3399_dmc.c | 6 +- drivers/devfreq/sun8i-a33-mbus.c | 6 +- drivers/dma-buf/dma-buf.c | 56 +- drivers/dma-buf/sync_trace.h | 2 +- drivers/dma/Makefile | 6 +- drivers/dma/amba-pl08x.c | 4 +- drivers/dma/dma-axi-dmac.c | 78 +- drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | 32 +- drivers/dma/dw-axi-dmac/dw-axi-dmac.h | 2 +- drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c | 14 +- drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h | 5 +- drivers/dma/fsl-dpaa2-qdma/dpdmai.c | 113 +- drivers/dma/fsl-dpaa2-qdma/dpdmai.h | 61 +- drivers/dma/fsl-edma-common.c | 28 +- drivers/dma/fsl-edma-common.h | 111 +- drivers/dma/fsl-edma-main.c | 52 +- drivers/dma/fsl-edma-trace.c | 4 + drivers/dma/fsl-edma-trace.h | 132 + drivers/dma/idxd/cdev.c | 17 +- drivers/dma/imx-sdma.c | 97 +- drivers/dma/mcf-edma-main.c | 4 +- drivers/dma/pch_dma.c | 5 - drivers/dma/pl330.c | 1 - drivers/dma/qcom/hidma.c | 11 - drivers/dma/qcom/hidma_mgmt.c | 109 +- drivers/dma/ti/k3-udma.c | 4 +- drivers/dma/virt-dma.h | 10 + drivers/dma/xilinx/xdma.c | 1 + drivers/dma/xilinx/xilinx_dpdma.c | 10 +- drivers/edac/Makefile | 10 +- drivers/edac/altera_edac.c | 8 +- drivers/edac/amd64_edac.h | 1 - drivers/edac/amd8111_edac.c | 3 +- drivers/edac/armada_xp_edac.c | 2 +- drivers/edac/cpc925_edac.c | 2 +- drivers/edac/edac_device.c | 53 +- drivers/edac/edac_device.h | 22 +- drivers/edac/edac_device_sysfs.c | 22 +- drivers/edac/edac_mc_sysfs.c | 47 +- drivers/edac/edac_pci.h | 5 - drivers/edac/highbank_l2_edac.c | 2 +- drivers/edac/mpc85xx_edac.c | 2 +- drivers/edac/octeon_edac-l2c.c | 2 +- drivers/edac/octeon_edac-pc.c | 2 +- drivers/edac/qcom_edac.c | 1 - drivers/edac/sifive_edac.c | 3 +- drivers/edac/skx_common.c | 21 +- drivers/edac/skx_common.h | 4 +- drivers/edac/thunderx_edac.c | 6 +- drivers/edac/xgene_edac.c | 10 +- drivers/edac/zynqmp_edac.c | 2 +- drivers/eisa/Kconfig | 9 +- drivers/eisa/virtual_root.c | 2 +- drivers/extcon/extcon-adc-jack.c | 8 +- drivers/extcon/extcon-intel-cht-wc.c | 6 +- drivers/extcon/extcon-intel-mrfld.c | 26 +- drivers/extcon/extcon-max3355.c | 6 +- drivers/extcon/extcon-max77843.c | 6 +- drivers/extcon/extcon-rtk-type-c.c | 1 - drivers/extcon/extcon-usb-gpio.c | 6 +- drivers/extcon/extcon-usbc-cros-ec.c | 6 +- drivers/firewire/.kunitconfig | 1 + drivers/firewire/Kconfig | 18 +- drivers/firewire/Makefile | 6 +- drivers/firewire/core-card.c | 7 + drivers/firewire/core-cdev.c | 7 + drivers/firewire/core-topology.c | 3 + drivers/firewire/core-trace.c | 5 + drivers/firewire/core-transaction.c | 253 +- drivers/firewire/core.h | 21 +- drivers/firewire/ohci.c | 131 +- drivers/firewire/packet-header-definitions.h | 234 + drivers/firewire/packet-serdes-test.c | 583 + drivers/firewire/sbp2.c | 13 +- drivers/firewire/uapi-test.c | 1 + drivers/firmware/arm_ffa/driver.c | 187 +- drivers/firmware/arm_scmi/Makefile | 3 +- drivers/firmware/arm_scmi/common.h | 11 + drivers/firmware/arm_scmi/driver.c | 269 +- drivers/firmware/arm_scmi/mailbox.c | 3 + drivers/firmware/arm_scmi/notify.c | 30 +- drivers/firmware/arm_scmi/perf.c | 15 +- drivers/firmware/arm_scmi/pinctrl.c | 916 ++ drivers/firmware/arm_scmi/protocols.h | 18 + drivers/firmware/arm_scmi/virtio.c | 1 - drivers/firmware/cirrus/cs_dsp.c | 54 + drivers/firmware/dmi_scan.c | 18 +- drivers/firmware/efi/efi-pstore.c | 18 +- drivers/firmware/efi/libstub/Makefile | 11 - drivers/firmware/efi/libstub/screen_info.c | 2 + drivers/firmware/efi/libstub/x86-stub.c | 25 +- drivers/firmware/efi/rci2-table.c | 10 +- drivers/firmware/efi/runtime-wrappers.c | 13 +- drivers/firmware/efi/vars.c | 2 +- drivers/firmware/google/cbmem.c | 1 - drivers/firmware/google/coreboot_table.c | 6 +- drivers/firmware/google/coreboot_table.h | 6 +- drivers/firmware/qcom/qcom_scm.c | 51 +- drivers/firmware/smccc/smccc.c | 1 + drivers/firmware/ti_sci.c | 24 +- drivers/firmware/turris-mox-rwtm.c | 23 +- drivers/fpga/Kconfig | 12 + drivers/fpga/Makefile | 2 + drivers/fpga/altera-cvp.c | 1 - drivers/fpga/altera-ps-spi.c | 1 - drivers/fpga/dfl-afu-main.c | 2 - drivers/fpga/dfl-afu.h | 3 - drivers/fpga/dfl-fme-main.c | 2 - drivers/fpga/dfl-fme.h | 2 - drivers/fpga/dfl.h | 5 - drivers/fpga/ice40-spi.c | 4 +- drivers/fpga/intel-m10-bmc-sec-update.c | 3 +- drivers/fpga/tests/fpga-bridge-test.c | 33 +- drivers/fpga/tests/fpga-mgr-test.c | 16 +- drivers/fpga/tests/fpga-region-test.c | 41 +- drivers/fpga/xilinx-core.c | 229 + drivers/fpga/xilinx-core.h | 27 + drivers/fpga/xilinx-selectmap.c | 95 + drivers/fpga/xilinx-spi.c | 224 +- drivers/gpio/Kconfig | 27 + drivers/gpio/Makefile | 1 + drivers/gpio/gpio-brcmstb.c | 21 +- drivers/gpio/gpio-cros-ec.c | 8 + drivers/gpio/gpio-graniterapids.c | 385 + drivers/gpio/gpio-gw-pld.c | 1 + drivers/gpio/gpio-mc33880.c | 1 + drivers/gpio/gpio-pca953x.c | 2 +- drivers/gpio/gpio-pcf857x.c | 1 + drivers/gpio/gpio-pcie-idio-24.c | 2 +- drivers/gpio/gpio-pl061.c | 1 + drivers/gpio/gpio-regmap.c | 4 +- drivers/gpio/gpio-sch.c | 35 +- drivers/gpio/gpio-virtio.c | 1 - drivers/gpio/gpiolib-acpi.c | 69 +- drivers/gpio/gpiolib-legacy.c | 49 +- drivers/gpio/gpiolib-of.c | 23 +- drivers/gpio/gpiolib-swnode.c | 44 + drivers/gpio/gpiolib-sysfs.c | 2 +- drivers/gpio/gpiolib.c | 26 +- drivers/gpio/gpiolib.h | 2 +- drivers/gpu/drm/Kconfig | 59 +- drivers/gpu/drm/Makefile | 30 + drivers/gpu/drm/amd/amdgpu/Makefile | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 14 +- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c | 169 +- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h | 33 +- drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 29 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 71 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 16 +- drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 5 + drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c | 360 + drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h | 47 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 161 +- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 23 +- drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 34 - drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 9 +- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 2 - drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c | 47 +- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 33 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c | 32 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 46 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h | 24 + drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 25 +- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 51 +- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 9 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 506 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 77 + drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 22 +- drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h | 25 + drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c | 105 - drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h | 17 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 29 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 88 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 9 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c | 132 +- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h | 25 +- drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 7 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 10 + drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 41 +- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 7 + drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 82 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 17 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 76 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 4 + drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c | 112 + drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 32 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h | 10 + drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 32 +- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h | 13 +- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c | 8 +- drivers/gpu/drm/amd/amdgpu/atom.c | 1 + drivers/gpu/drm/amd/amdgpu/cik.c | 6 +- drivers/gpu/drm/amd/amdgpu/cik_ih.c | 2 + drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 6 +- drivers/gpu/drm/amd/amdgpu/cz_ih.c | 2 + drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 2 + drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 2 + drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 2 + drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 2 + drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 151 +- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 19 +- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 2 + drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 2 + drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 2 + drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 8 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 12 - drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 58 +- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 17 + drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 15 + drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 2 + drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 2 + drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 2 + drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 12 +- drivers/gpu/drm/amd/amdgpu/iceland_ih.c | 2 + drivers/gpu/drm/amd/amdgpu/ih_v6_0.c | 28 +- drivers/gpu/drm/amd/amdgpu/ih_v6_1.c | 28 +- drivers/gpu/drm/amd/amdgpu/ih_v7_0.c | 2 + drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c | 2 + drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c | 4 + drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c | 2 + drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c | 2 + drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 2 + drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c | 2 + drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c | 2 + drivers/gpu/drm/amd/amdgpu/mes_v10_1.c | 2 + drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 188 +- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 57 +- drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 2 + drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c | 3 +- drivers/gpu/drm/amd/amdgpu/navi10_ih.c | 2 + drivers/gpu/drm/amd/amdgpu/nv.c | 8 +- drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h | 5 +- drivers/gpu/drm/amd/amdgpu/psp_v14_0.c | 8 +- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 6 +- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 46 +- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 9 +- drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 21 +- drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c | 16 +- drivers/gpu/drm/amd/amdgpu/si.c | 6 +- drivers/gpu/drm/amd/amdgpu/si_dma.c | 6 +- drivers/gpu/drm/amd/amdgpu/si_ih.c | 2 + drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c | 2 +- drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c | 62 + drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h | 30 + drivers/gpu/drm/amd/amdgpu/soc15.c | 16 +- drivers/gpu/drm/amd/amdgpu/soc15.h | 2 + drivers/gpu/drm/amd/amdgpu/soc21.c | 6 +- drivers/gpu/drm/amd/amdgpu/ta_ras_if.h | 1 + drivers/gpu/drm/amd/amdgpu/tonga_ih.c | 2 + drivers/gpu/drm/amd/amdgpu/umc_v12_0.c | 416 +- drivers/gpu/drm/amd/amdgpu/umc_v12_0.h | 77 +- drivers/gpu/drm/amd/amdgpu/umc_v8_10.c | 6 - drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c | 2 + drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 2 + drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 2 + drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 2 + drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | 2 + drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 2 + drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 2 + drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 2 + drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 4 + drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 3 + drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 9 + drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 2 + drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c | 9 + drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c | 23 +- drivers/gpu/drm/amd/amdgpu/vi.c | 10 +- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 5 + drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 +- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c | 32 +- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c | 9 +- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 53 +- drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c | 2 + drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 18 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 27 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 1 - drivers/gpu/drm/amd/display/Kconfig | 2 +- drivers/gpu/drm/amd/display/Makefile | 1 + drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 61 +- .../drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 42 +- .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 66 +- .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.h | 3 - .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c | 2 +- .../drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 8 + .../drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h | 2 +- drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 35 +- drivers/gpu/drm/amd/display/dc/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | 3 +- drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 2 - .../gpu/drm/amd/display/dc/bios/command_table.c | 2 +- .../gpu/drm/amd/display/dc/bios/command_table2.c | 2 +- drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c | 17 +- .../amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c | 4 +- .../drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c | 1 - .../dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c | 1 - .../amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 2 +- .../amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c | 11 + .../drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 5 +- .../amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c | 2 +- .../dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c | 6 +- .../drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c | 3 +- .../amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c | 3 +- .../drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c | 1 - .../drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h | 42 +- .../drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c | 2 +- .../amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c | 3 +- .../amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c | 4 +- .../dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c | 9 + .../dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h | 3 +- .../amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c | 41 +- .../drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c | 21 - .../drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h | 2 - drivers/gpu/drm/amd/display/dc/core/dc.c | 1028 +- .../gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 13 +- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 108 +- drivers/gpu/drm/amd/display/dc/core/dc_stat.c | 2 +- drivers/gpu/drm/amd/display/dc/core/dc_state.c | 62 +- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 22 +- drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 68 +- drivers/gpu/drm/amd/display/dc/dc.h | 226 +- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 156 +- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h | 3 + drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 18 +- drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 8 + drivers/gpu/drm/amd/display/dc/dc_plane.h | 2 +- drivers/gpu/drm/amd/display/dc/dc_plane_priv.h | 1 + drivers/gpu/drm/amd/display/dc/dc_state.h | 8 +- drivers/gpu/drm/amd/display/dc/dc_state_priv.h | 12 +- drivers/gpu/drm/amd/display/dc/dc_stream.h | 10 +- drivers/gpu/drm/amd/display/dc/dc_types.h | 20 +- drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c | 6 - drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h | 1 + drivers/gpu/drm/amd/display/dc/dce/dce_opp.h | 1 + drivers/gpu/drm/amd/display/dc/dce/dce_transform.c | 2 +- drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c | 4 - drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn10/Makefile | 4 +- .../gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 585 - drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | 1527 --- .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c | 884 -- .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c | 696 -- .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c | 10 +- .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h | 10 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 1 + .../display/dc/dcn10/dcn10_hw_sequencer_debug.c | 2 +- .../drm/amd/display/dc/dcn10/dcn10_link_encoder.c | 1 - .../drm/amd/display/dc/dcn10/dcn10_link_encoder.h | 6 - drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c | 9 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h | 2 + .../amd/display/dc/dcn10/dcn10_stream_encoder.h | 10 +- drivers/gpu/drm/amd/display/dc/dcn20/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c | 435 - drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h | 781 -- .../gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c | 1202 -- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c | 11 + .../gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c | 2 +- .../gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h | 2 +- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c | 6 + drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h | 2 +- .../drm/amd/display/dc/dcn20/dcn20_link_encoder.h | 5 +- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c | 10 +- drivers/gpu/drm/amd/display/dc/dcn201/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c | 313 - drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h | 83 - .../gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c | 4 +- .../gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c | 5 + .../amd/display/dc/dcn201/dcn201_link_encoder.h | 14 +- .../gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c | 8 +- .../gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h | 8 +- drivers/gpu/drm/amd/display/dc/dcn30/Makefile | 2 - .../gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h | 21 +- .../amd/display/dc/dcn30/dcn30_dio_link_encoder.c | 2 - .../amd/display/dc/dcn30/dcn30_dio_link_encoder.h | 3 +- .../display/dc/dcn30/dcn30_dio_stream_encoder.c | 3 - drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c | 1527 --- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h | 642 - .../gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c | 461 - drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c | 2 - drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h | 14 +- .../gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c | 2 +- .../gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h | 2 +- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c | 3 + drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h | 23 +- .../gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h | 6 - .../gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c | 1 + drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h | 6 + .../amd/display/dc/dcn31/dcn31_dio_link_encoder.c | 2 +- .../amd/display/dc/dcn31/dcn31_dio_link_encoder.h | 4 + .../display/dc/dcn31/dcn31_hpo_dp_link_encoder.c | 2 +- .../display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c | 4 + .../gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c | 8 +- .../drm/amd/display/dc/dcn31/dcn31_panel_cntl.c | 9 +- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c | 7 +- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h | 1 + drivers/gpu/drm/amd/display/dc/dcn32/Makefile | 2 +- .../amd/display/dc/dcn32/dcn32_dio_link_encoder.c | 6 +- .../amd/display/dc/dcn32/dcn32_dio_link_encoder.h | 10 +- .../display/dc/dcn32/dcn32_dio_stream_encoder.h | 5 +- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c | 165 - drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h | 38 - .../gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c | 10 +- .../gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h | 8 +- .../amd/display/dc/dcn32/dcn32_resource_helpers.c | 25 +- .../display/dc/dcn321/dcn321_dio_link_encoder.c | 8 +- drivers/gpu/drm/amd/display/dc/dcn35/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c | 207 +- .../amd/display/dc/dcn35/dcn35_dio_link_encoder.c | 3 +- .../amd/display/dc/dcn35/dcn35_dio_link_encoder.h | 12 +- .../display/dc/dcn35/dcn35_dio_stream_encoder.h | 1 - drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c | 53 - drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h | 57 - .../gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c | 4 +- drivers/gpu/drm/amd/display/dc/dm_helpers.h | 3 +- drivers/gpu/drm/amd/display/dc/dm_services.h | 10 + drivers/gpu/drm/amd/display/dc/dml/Makefile | 36 +- .../gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 11 +- .../gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c | 2 +- .../amd/display/dc/dml/dcn30/display_mode_vba_30.c | 1 - .../gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c | 10 +- .../gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h | 1 + .../amd/display/dc/dml/dcn31/display_mode_vba_31.c | 1 - .../gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c | 2 +- .../display/dc/dml/dcn314/display_mode_vba_314.c | 1 - .../gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 269 +- .../dc/dml/dcn32/display_mode_vba_util_32.c | 18 +- .../gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c | 8 +- .../gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c | 3 +- drivers/gpu/drm/amd/display/dc/dml2/Makefile | 37 +- .../drm/amd/display/dc/dml2/display_mode_core.c | 15 +- .../drm/amd/display/dc/dml2/display_mode_core.h | 1 + .../amd/display/dc/dml2/display_mode_lib_defines.h | 2 + .../amd/display/dc/dml2/dml2_dc_resource_mgmt.c | 155 +- .../amd/display/dc/dml2/dml2_dc_resource_mgmt.h | 2 + .../drm/amd/display/dc/dml2/dml2_internal_types.h | 11 + .../drm/amd/display/dc/dml2/dml2_mall_phantom.c | 2 + .../amd/display/dc/dml2/dml2_translation_helper.c | 120 +- drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c | 73 +- drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h | 5 + drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c | 16 +- drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h | 34 +- drivers/gpu/drm/amd/display/dc/dpp/Makefile | 77 + .../drm/amd/display/dc/dpp/dcn10/CMakeLists.txt | 6 + .../gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c | 585 + .../gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h | 1528 +++ .../drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c | 884 ++ .../drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c | 696 ++ .../drm/amd/display/dc/dpp/dcn20/CMakeLists.txt | 5 + .../gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c | 435 + .../gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h | 781 ++ .../drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c | 1202 ++ .../drm/amd/display/dc/dpp/dcn201/CMakeLists.txt | 4 + .../gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c | 313 + .../gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h | 83 + .../drm/amd/display/dc/dpp/dcn30/CMakeLists.txt | 5 + .../gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c | 1531 +++ .../gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h | 646 + .../drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c | 461 + .../drm/amd/display/dc/dpp/dcn32/CMakeLists.txt | 4 + .../gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c | 165 + .../gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h | 38 + .../drm/amd/display/dc/dpp/dcn35/CMakeLists.txt | 4 + .../gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c | 112 + .../gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h | 64 + drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 8 +- drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c | 7 +- .../amd/display/dc/gpio/dcn21/hw_translate_dcn21.c | 13 - drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c | 6 +- drivers/gpu/drm/amd/display/dc/hwss/Makefile | 2 +- .../drm/amd/display/dc/hwss/dce110/dce110_hwseq.c | 54 +- .../drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c | 48 +- .../drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 126 +- .../gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c | 1 + .../drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c | 2 +- .../drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c | 2 +- .../drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c | 74 +- .../drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h | 5 +- .../drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c | 4 +- .../drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c | 2 +- .../drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c | 89 +- .../drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 106 +- .../drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h | 8 +- .../gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c | 4 +- .../gpu/drm/amd/display/dc/hwss/dcn351/Makefile | 25 +- .../drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c | 182 + .../drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h | 41 + .../drm/amd/display/dc/hwss/dcn351/dcn351_init.c | 4 +- drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h | 13 +- .../drm/amd/display/dc/hwss/hw_sequencer_private.h | 4 + drivers/gpu/drm/amd/display/dc/inc/core_types.h | 30 +- drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h | 2 - .../drm/amd/display/dc/inc/hw/clk_mgr_internal.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h | 5 + drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h | 30 +- drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 3 +- .../gpu/drm/amd/display/dc/inc/hw/link_encoder.h | 3 +- drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h | 12 +- drivers/gpu/drm/amd/display/dc/inc/hw/optc.h | 2 + .../gpu/drm/amd/display/dc/inc/hw/stream_encoder.h | 4 - .../drm/amd/display/dc/inc/hw/timing_generator.h | 7 + drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h | 53 + drivers/gpu/drm/amd/display/dc/inc/link.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/resource.h | 24 +- .../amd/display/dc/link/accessories/link_dp_cts.c | 2 +- .../display/dc/link/accessories/link_dp_trace.c | 1 - .../gpu/drm/amd/display/dc/link/link_detection.c | 4 +- drivers/gpu/drm/amd/display/dc/link/link_dpms.c | 16 +- .../display/dc/link/protocols/link_dp_capability.c | 16 +- .../display/dc/link/protocols/link_dp_dpia_bw.c | 4 +- .../dc/link/protocols/link_dp_irq_handler.c | 10 +- .../display/dc/link/protocols/link_dp_training.c | 18 +- .../dc/link/protocols/link_dp_training_dpia.c | 4 +- .../drm/amd/display/dc/link/protocols/link_dpcd.c | 2 +- .../dc/link/protocols/link_edp_panel_control.c | 75 +- .../dc/link/protocols/link_edp_panel_control.h | 4 +- .../drm/amd/display/dc/link/protocols/link_hpd.c | 2 +- .../gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c | 3 + .../gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h | 9 +- .../gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c | 165 +- .../gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h | 6 +- drivers/gpu/drm/amd/display/dc/resource/Makefile | 6 - .../display/dc/resource/dce112/dce112_resource.c | 2 - .../display/dc/resource/dce120/dce120_resource.c | 2 +- .../amd/display/dc/resource/dce80/dce80_resource.c | 1 - .../amd/display/dc/resource/dcn10/dcn10_resource.c | 4 +- .../amd/display/dc/resource/dcn20/dcn20_resource.c | 18 +- .../display/dc/resource/dcn201/dcn201_resource.c | 6 +- .../amd/display/dc/resource/dcn21/dcn21_resource.c | 33 +- .../amd/display/dc/resource/dcn30/dcn30_resource.c | 2 +- .../display/dc/resource/dcn303/dcn303_resource.c | 2 +- .../amd/display/dc/resource/dcn31/dcn31_resource.c | 5 +- .../display/dc/resource/dcn314/dcn314_resource.c | 21 +- .../display/dc/resource/dcn316/dcn316_resource.c | 3 +- .../amd/display/dc/resource/dcn32/dcn32_resource.c | 122 +- .../amd/display/dc/resource/dcn32/dcn32_resource.h | 12 +- .../display/dc/resource/dcn321/dcn321_resource.c | 43 +- .../amd/display/dc/resource/dcn35/dcn35_resource.c | 22 +- .../amd/display/dc/resource/dcn35/dcn35_resource.h | 2 + .../display/dc/resource/dcn351/dcn351_resource.c | 24 +- drivers/gpu/drm/amd/display/dmub/dmub_srv.h | 18 +- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 221 +- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c | 1 + drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c | 1 + drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c | 2 + drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c | 2 + drivers/gpu/drm/amd/display/include/dal_types.h | 1 - .../gpu/drm/amd/display/include/grph_object_id.h | 4 +- .../drm/amd/display/include/link_service_types.h | 1 - drivers/gpu/drm/amd/display/include/logger_types.h | 1 + drivers/gpu/drm/amd/display/include/signal_types.h | 13 + .../drm/amd/display/modules/color/color_gamma.c | 2 +- .../drm/amd/display/modules/freesync/freesync.c | 8 +- .../drm/amd/display/modules/hdcp/hdcp2_execution.c | 2 +- .../amd/display/modules/info_packet/info_packet.c | 2 - drivers/gpu/drm/amd/include/amd_shared.h | 3 + .../amd/include/asic_reg/dcn/dcn_3_0_0_offset.h | 28 + .../amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h | 14 + .../amd/include/asic_reg/dcn/dcn_3_0_2_offset.h | 20 + .../amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h | 8 + .../amd/include/asic_reg/dcn/dcn_3_0_3_offset.h | 28 + .../amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h | 18 + .../amd/include/asic_reg/dcn/dcn_3_1_2_offset.h | 4 + .../amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h | 19 + .../amd/include/asic_reg/dcn/dcn_3_1_5_offset.h | 4 + .../amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h | 10 + .../amd/include/asic_reg/dcn/dcn_3_2_0_offset.h | 60 + .../amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h | 27 + .../amd/include/asic_reg/dcn/dcn_3_2_1_offset.h | 37 +- .../amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h | 16 + .../amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h | 24 + .../amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h | 4 +- .../amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h | 4 +- .../amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h | 10 + .../drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h | 12 + .../drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h | 4 + .../amd/include/asic_reg/oss/osssys_6_0_0_offset.h | 4 + .../include/asic_reg/oss/osssys_6_0_0_sh_mask.h | 10 + .../include/asic_reg/smuio/smuio_14_0_2_offset.h | 511 + .../include/asic_reg/smuio/smuio_14_0_2_sh_mask.h | 1106 ++ drivers/gpu/drm/amd/include/kgd_pp_interface.h | 3 +- drivers/gpu/drm/amd/include/mes_v11_api_def.h | 38 +- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 8 +- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 226 +- drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 6 +- drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h | 41 + drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c | 2 + drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c | 2 + drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c | 8 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c | 6 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h | 2 +- .../gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c | 2 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c | 8 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h | 2 +- .../gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c | 2 +- .../gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c | 2 +- .../gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c | 8 +- .../gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h | 2 +- .../gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c | 2 +- drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h | 2 +- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 12 +- drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 13 +- .../pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h | 1836 +++ .../amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h | 6 +- .../amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h | 140 + drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h | 7 + drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h | 2 +- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h | 2 +- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h | 7 +- .../gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h | 164 + drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 2 +- .../drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 18 +- drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 8 +- drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 22 +- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 25 +- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 2 +- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c | 8 +- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c | 8 +- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 96 +- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 2 +- .../gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c | 8 +- drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c | 135 +- .../gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c | 6 + .../gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c | 1795 +++ .../gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h | 28 + drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c | 67 +- drivers/gpu/drm/arm/display/komeda/Makefile | 4 +- .../gpu/drm/arm/display/komeda/d71/d71_component.c | 2 + .../gpu/drm/arm/display/komeda/komeda_color_mgmt.c | 5 - drivers/gpu/drm/arm/display/komeda/komeda_crtc.c | 43 +- drivers/gpu/drm/arm/display/komeda/komeda_dev.c | 8 - .../gpu/drm/arm/display/komeda/komeda_pipeline.c | 1 + drivers/gpu/drm/armada/armada_debugfs.c | 1 + drivers/gpu/drm/ast/Makefile | 10 +- drivers/gpu/drm/ast/ast_ddc.c | 187 + drivers/gpu/drm/ast/ast_ddc.h | 11 + drivers/gpu/drm/ast/ast_drv.c | 1 + drivers/gpu/drm/ast/ast_drv.h | 39 +- drivers/gpu/drm/ast/ast_i2c.c | 151 - drivers/gpu/drm/ast/ast_main.c | 1 + drivers/gpu/drm/ast/ast_mode.c | 165 +- drivers/gpu/drm/bridge/Kconfig | 14 +- drivers/gpu/drm/bridge/Makefile | 1 + drivers/gpu/drm/bridge/adv7511/adv7511.h | 3 +- drivers/gpu/drm/bridge/adv7511/adv7511_cec.c | 13 +- drivers/gpu/drm/bridge/adv7511/adv7511_drv.c | 33 +- drivers/gpu/drm/bridge/analogix/Kconfig | 2 +- drivers/gpu/drm/bridge/chipone-icn6211.c | 1 - drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c | 6 +- drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c | 6 +- drivers/gpu/drm/bridge/ite-it6505.c | 74 +- drivers/gpu/drm/bridge/ite-it66121.c | 25 +- drivers/gpu/drm/bridge/microchip-lvds.c | 229 + drivers/gpu/drm/bridge/panel.c | 2 + drivers/gpu/drm/bridge/samsung-dsim.c | 4 +- drivers/gpu/drm/bridge/sii902x.c | 2 +- drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 31 +- drivers/gpu/drm/bridge/tc358764.c | 1 - drivers/gpu/drm/bridge/tc358775.c | 77 +- drivers/gpu/drm/bridge/thc63lvd1024.c | 21 +- drivers/gpu/drm/ci/arm64.config | 4 +- drivers/gpu/drm/display/Kconfig | 52 +- drivers/gpu/drm/display/Makefile | 6 +- drivers/gpu/drm/display/drm_dp_helper.c | 41 +- drivers/gpu/drm/display/drm_dp_helper_internal.h | 2 +- drivers/gpu/drm/display/drm_dp_mst_topology.c | 46 +- .../gpu/drm/display/drm_dp_mst_topology_internal.h | 4 +- drivers/gpu/drm/display/drm_dp_tunnel.c | 17 +- drivers/gpu/drm/drm_atomic_helper.c | 4 + drivers/gpu/drm/drm_atomic_uapi.c | 6 +- drivers/gpu/drm/drm_bridge.c | 14 + drivers/gpu/drm/drm_buddy.c | 430 +- drivers/gpu/drm/drm_client.c | 105 +- drivers/gpu/drm/drm_client_modeset.c | 129 +- drivers/gpu/drm/drm_crtc.c | 38 +- drivers/gpu/drm/drm_crtc_helper.c | 100 +- drivers/gpu/drm/drm_crtc_helper_internal.h | 15 +- drivers/gpu/drm/drm_crtc_internal.h | 13 + drivers/gpu/drm/drm_displayid.c | 7 +- drivers/gpu/drm/drm_displayid_internal.h | 170 + drivers/gpu/drm/drm_drv.c | 5 + drivers/gpu/drm/drm_edid.c | 266 +- drivers/gpu/drm/drm_eld.c | 4 +- drivers/gpu/drm/drm_fb_dma_helper.c | 45 + drivers/gpu/drm/drm_fbdev_dma.c | 3 +- drivers/gpu/drm/drm_fbdev_generic.c | 4 +- drivers/gpu/drm/drm_gem.c | 34 +- drivers/gpu/drm/drm_gem_shmem_helper.c | 9 +- drivers/gpu/drm/drm_gem_vram_helper.c | 101 +- drivers/gpu/drm/drm_internal.h | 10 +- drivers/gpu/drm/drm_mipi_dsi.c | 41 +- drivers/gpu/drm/drm_mode_config.c | 7 + drivers/gpu/drm/drm_modes.c | 40 +- drivers/gpu/drm/drm_panel_orientation_quirks.c | 4 +- drivers/gpu/drm/drm_panic.c | 579 + drivers/gpu/drm/drm_plane.c | 56 + drivers/gpu/drm/drm_print.c | 6 +- drivers/gpu/drm/drm_probe_helper.c | 95 +- drivers/gpu/drm/drm_sysfs.c | 20 +- drivers/gpu/drm/drm_vblank.c | 58 +- drivers/gpu/drm/drm_vblank_work.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 6 +- drivers/gpu/drm/etnaviv/etnaviv_sched.c | 9 +- drivers/gpu/drm/exynos/exynos_drm_dsi.c | 1 - drivers/gpu/drm/exynos/exynos_drm_fimc.c | 1 - drivers/gpu/drm/exynos/exynos_drm_fimd.c | 1 - drivers/gpu/drm/exynos/exynos_drm_g2d.c | 1 - drivers/gpu/drm/exynos/exynos_drm_gsc.c | 1 - drivers/gpu/drm/exynos/exynos_drm_mic.c | 1 - drivers/gpu/drm/exynos/exynos_drm_rotator.c | 1 - drivers/gpu/drm/exynos/exynos_drm_scaler.c | 1 - drivers/gpu/drm/exynos/exynos_drm_vidi.c | 1 - drivers/gpu/drm/exynos/exynos_hdmi.c | 16 +- drivers/gpu/drm/exynos/exynos_mixer.c | 1 - drivers/gpu/drm/gma500/cdv_intel_lvds.c | 3 + drivers/gpu/drm/gma500/mmu.c | 1 + drivers/gpu/drm/gma500/oaktrail_lvds.c | 2 - drivers/gpu/drm/gma500/psb_intel_lvds.c | 3 + drivers/gpu/drm/gud/gud_connector.c | 12 +- drivers/gpu/drm/i915/Kconfig.debug | 4 +- drivers/gpu/drm/i915/Makefile | 10 +- drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h | 273 + drivers/gpu/drm/i915/display/icl_dsi.c | 3 +- drivers/gpu/drm/i915/display/intel_atomic.c | 2 +- drivers/gpu/drm/i915/display/intel_audio_regs.h | 16 + drivers/gpu/drm/i915/display/intel_backlight.c | 50 +- drivers/gpu/drm/i915/display/intel_bios.c | 247 +- drivers/gpu/drm/i915/display/intel_bw.c | 160 +- drivers/gpu/drm/i915/display/intel_bw.h | 9 +- drivers/gpu/drm/i915/display/intel_cdclk.c | 204 +- drivers/gpu/drm/i915/display/intel_cdclk.h | 12 +- drivers/gpu/drm/i915/display/intel_color.c | 53 +- drivers/gpu/drm/i915/display/intel_color_regs.h | 42 +- .../gpu/drm/i915/display/intel_combo_phy_regs.h | 117 +- drivers/gpu/drm/i915/display/intel_crt.c | 5 +- .../gpu/drm/i915/display/intel_crtc_state_dump.c | 354 +- drivers/gpu/drm/i915/display/intel_cursor.c | 48 +- drivers/gpu/drm/i915/display/intel_cx0_phy.c | 389 +- drivers/gpu/drm/i915/display/intel_cx0_phy.h | 3 +- drivers/gpu/drm/i915/display/intel_ddi.c | 255 +- drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c | 18 +- drivers/gpu/drm/i915/display/intel_de.h | 186 +- drivers/gpu/drm/i915/display/intel_display.c | 706 +- drivers/gpu/drm/i915/display/intel_display.h | 22 + .../drm/i915/display/intel_display_conversion.h | 22 + drivers/gpu/drm/i915/display/intel_display_core.h | 17 + .../gpu/drm/i915/display/intel_display_debugfs.c | 126 +- .../gpu/drm/i915/display/intel_display_device.c | 8 + .../gpu/drm/i915/display/intel_display_device.h | 6 +- .../gpu/drm/i915/display/intel_display_driver.c | 28 +- drivers/gpu/drm/i915/display/intel_display_irq.c | 57 +- .../gpu/drm/i915/display/intel_display_params.c | 9 + .../gpu/drm/i915/display/intel_display_params.h | 2 + drivers/gpu/drm/i915/display/intel_display_power.c | 8 +- .../drm/i915/display/intel_display_power_well.c | 107 +- .../gpu/drm/i915/display/intel_display_reg_defs.h | 22 +- drivers/gpu/drm/i915/display/intel_display_trace.h | 56 +- drivers/gpu/drm/i915/display/intel_display_types.h | 101 +- drivers/gpu/drm/i915/display/intel_display_wa.c | 8 - drivers/gpu/drm/i915/display/intel_dmc.c | 185 +- drivers/gpu/drm/i915/display/intel_dmc_regs.h | 6 + drivers/gpu/drm/i915/display/intel_dmc_wl.c | 264 + drivers/gpu/drm/i915/display/intel_dmc_wl.h | 31 + drivers/gpu/drm/i915/display/intel_dp.c | 315 +- drivers/gpu/drm/i915/display/intel_dp.h | 5 +- drivers/gpu/drm/i915/display/intel_dp_aux.c | 15 +- drivers/gpu/drm/i915/display/intel_dp_aux.h | 1 + drivers/gpu/drm/i915/display/intel_dp_hdcp.c | 17 +- .../gpu/drm/i915/display/intel_dp_link_training.c | 57 +- drivers/gpu/drm/i915/display/intel_dp_mst.c | 246 +- drivers/gpu/drm/i915/display/intel_dp_tunnel.c | 2 +- drivers/gpu/drm/i915/display/intel_dpio_phy.c | 368 +- drivers/gpu/drm/i915/display/intel_dpio_phy.h | 48 +- drivers/gpu/drm/i915/display/intel_dpll.c | 596 +- drivers/gpu/drm/i915/display/intel_dpll.h | 12 +- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 611 +- drivers/gpu/drm/i915/display/intel_dpll_mgr.h | 82 +- drivers/gpu/drm/i915/display/intel_dsb.c | 5 +- drivers/gpu/drm/i915/display/intel_dsi.c | 5 +- drivers/gpu/drm/i915/display/intel_dvo.c | 5 +- drivers/gpu/drm/i915/display/intel_fb.c | 6 +- drivers/gpu/drm/i915/display/intel_fbc.c | 35 +- drivers/gpu/drm/i915/display/intel_fbc_regs.h | 120 + drivers/gpu/drm/i915/display/intel_fbdev.c | 270 +- drivers/gpu/drm/i915/display/intel_fbdev.h | 29 +- drivers/gpu/drm/i915/display/intel_fixed.h | 148 + drivers/gpu/drm/i915/display/intel_gmbus.c | 2 +- drivers/gpu/drm/i915/display/intel_hdcp.c | 6 +- drivers/gpu/drm/i915/display/intel_hdcp_gsc.c | 6 + drivers/gpu/drm/i915/display/intel_hdcp_gsc.h | 7 +- drivers/gpu/drm/i915/display/intel_hdmi.c | 96 +- drivers/gpu/drm/i915/display/intel_hotplug_irq.c | 2 +- drivers/gpu/drm/i915/display/intel_lpe_audio.c | 2 +- drivers/gpu/drm/i915/display/intel_lvds.c | 5 +- drivers/gpu/drm/i915/display/intel_opregion.c | 58 +- drivers/gpu/drm/i915/display/intel_opregion.h | 6 + drivers/gpu/drm/i915/display/intel_overlay.c | 7 +- drivers/gpu/drm/i915/display/intel_panel.c | 10 +- drivers/gpu/drm/i915/display/intel_pch_display.c | 4 +- drivers/gpu/drm/i915/display/intel_pmdemand.c | 14 +- drivers/gpu/drm/i915/display/intel_pmdemand.h | 5 +- drivers/gpu/drm/i915/display/intel_pps.c | 40 +- drivers/gpu/drm/i915/display/intel_pps.h | 2 + drivers/gpu/drm/i915/display/intel_psr.c | 486 +- drivers/gpu/drm/i915/display/intel_psr.h | 5 + drivers/gpu/drm/i915/display/intel_psr_regs.h | 50 +- drivers/gpu/drm/i915/display/intel_quirks.c | 56 +- drivers/gpu/drm/i915/display/intel_quirks.h | 6 +- drivers/gpu/drm/i915/display/intel_sdvo.c | 9 +- drivers/gpu/drm/i915/display/intel_snps_phy.c | 22 +- drivers/gpu/drm/i915/display/intel_snps_phy.h | 4 +- drivers/gpu/drm/i915/display/intel_sprite.c | 1 + drivers/gpu/drm/i915/display/intel_sprite_regs.h | 348 + drivers/gpu/drm/i915/display/intel_tc.c | 33 +- drivers/gpu/drm/i915/display/intel_tv.c | 8 +- drivers/gpu/drm/i915/display/intel_vbt_defs.h | 36 +- drivers/gpu/drm/i915/display/intel_vrr.c | 33 +- drivers/gpu/drm/i915/display/skl_scaler.c | 7 +- drivers/gpu/drm/i915/display/skl_watermark.c | 322 +- drivers/gpu/drm/i915/display/skl_watermark.h | 14 +- drivers/gpu/drm/i915/display/skl_watermark_regs.h | 18 +- drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h | 309 + drivers/gpu/drm/i915/display/vlv_dsi.c | 470 +- drivers/gpu/drm/i915/display/vlv_dsi_pll.c | 22 +- drivers/gpu/drm/i915/display/vlv_dsi_regs.h | 327 +- drivers/gpu/drm/i915/gem/i915_gem_context.c | 16 +- drivers/gpu/drm/i915/gem/i915_gem_context_types.h | 1 + drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 2 +- drivers/gpu/drm/i915/gem/i915_gem_pages.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 6 +- drivers/gpu/drm/i915/gem/i915_gem_stolen.h | 8 +- drivers/gpu/drm/i915/gem/i915_gem_tiling.c | 18 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 6 +- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 18 +- .../drm/i915/gem/selftests/i915_gem_client_blt.c | 8 +- .../gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c | 5 +- drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c | 1 + drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 27 +- drivers/gpu/drm/i915/gt/gen8_ppgtt.c | 40 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 1 + drivers/gpu/drm/i915/gt/intel_engine_cs.c | 49 +- drivers/gpu/drm/i915/gt/intel_engine_types.h | 8 +- .../gpu/drm/i915/gt/intel_execlists_submission.c | 18 +- drivers/gpu/drm/i915/gt/intel_ggtt.c | 9 +- drivers/gpu/drm/i915/gt/intel_gsc.c | 15 - drivers/gpu/drm/i915/gt/intel_gt.c | 6 +- drivers/gpu/drm/i915/gt/intel_gt.h | 5 + drivers/gpu/drm/i915/gt/intel_gt_irq.c | 6 +- drivers/gpu/drm/i915/gt/intel_gt_mcr.c | 52 +- drivers/gpu/drm/i915/gt/intel_gt_mcr.h | 2 +- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 2 +- drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c | 6 +- drivers/gpu/drm/i915/gt/intel_gt_regs.h | 60 +- drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c | 27 +- drivers/gpu/drm/i915/gt/intel_gtt.c | 2 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 51 +- drivers/gpu/drm/i915/gt/intel_migrate.c | 22 +- drivers/gpu/drm/i915/gt/intel_mocs.c | 52 +- drivers/gpu/drm/i915/gt/intel_rc6.c | 4 +- drivers/gpu/drm/i915/gt/intel_reset.c | 51 +- drivers/gpu/drm/i915/gt/intel_reset.h | 3 +- drivers/gpu/drm/i915/gt/intel_rps.c | 12 +- drivers/gpu/drm/i915/gt/intel_sseu.c | 13 +- drivers/gpu/drm/i915/gt/intel_tlb.c | 2 +- drivers/gpu/drm/i915/gt/intel_workarounds.c | 194 +- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 2 +- drivers/gpu/drm/i915/gt/selftest_reset.c | 2 +- drivers/gpu/drm/i915/gt/selftest_slpc.c | 6 +- drivers/gpu/drm/i915/gt/shmem_utils.c | 1 + .../gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h | 21 + drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h | 1 + drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h | 7 + drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c | 4 +- drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c | 3 +- drivers/gpu/drm/i915/gt/uc/intel_guc.c | 22 +- drivers/gpu/drm/i915/gt/uc/intel_guc.h | 2 + drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 95 +- drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c | 12 +- drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c | 8 +- drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 8 +- drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c | 17 + drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h | 1 + drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 57 +- drivers/gpu/drm/i915/gt/uc/intel_huc.c | 4 +- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 4 - drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 4 +- drivers/gpu/drm/i915/gt/uc/selftest_guc.c | 2 +- drivers/gpu/drm/i915/gvt/cmd_parser.c | 1 + drivers/gpu/drm/i915/gvt/display.c | 2 + drivers/gpu/drm/i915/gvt/fb_decoder.c | 5 +- drivers/gpu/drm/i915/gvt/firmware.c | 27 +- drivers/gpu/drm/i915/gvt/gtt.c | 1 + drivers/gpu/drm/i915/gvt/handlers.c | 9 +- drivers/gpu/drm/i915/gvt/mmio.c | 2 + drivers/gpu/drm/i915/gvt/vgpu.c | 1 + drivers/gpu/drm/i915/i915_debugfs.c | 13 +- drivers/gpu/drm/i915/i915_debugfs_params.c | 1 + drivers/gpu/drm/i915/i915_driver.c | 24 +- drivers/gpu/drm/i915/i915_drv.h | 26 +- drivers/gpu/drm/i915/i915_fixed.h | 148 - drivers/gpu/drm/i915/i915_getparam.c | 10 +- drivers/gpu/drm/i915/i915_gpu_error.c | 6 +- drivers/gpu/drm/i915/i915_hwmon.c | 6 - drivers/gpu/drm/i915/i915_irq.c | 8 +- drivers/gpu/drm/i915/i915_params.c | 3 - drivers/gpu/drm/i915/i915_params.h | 1 - drivers/gpu/drm/i915/i915_pci.c | 66 +- drivers/gpu/drm/i915/i915_perf.c | 19 +- drivers/gpu/drm/i915/i915_query.c | 2 +- drivers/gpu/drm/i915/i915_reg.h | 1405 +-- drivers/gpu/drm/i915/i915_ttm_buddy_manager.c | 6 +- drivers/gpu/drm/i915/i915_utils.h | 14 - drivers/gpu/drm/i915/i915_vma.c | 2 - drivers/gpu/drm/i915/intel_clock_gating.c | 60 +- drivers/gpu/drm/i915/intel_device_info.c | 2 - drivers/gpu/drm/i915/intel_device_info.h | 2 - drivers/gpu/drm/i915/intel_gvt.c | 1 + drivers/gpu/drm/i915/intel_gvt_mmio_table.c | 21 +- drivers/gpu/drm/i915/intel_runtime_pm.c | 14 +- drivers/gpu/drm/i915/intel_step.c | 80 +- drivers/gpu/drm/i915/intel_uncore.c | 380 +- drivers/gpu/drm/i915/selftests/i915_selftest.c | 36 +- drivers/gpu/drm/i915/selftests/intel_uncore.c | 3 - drivers/gpu/drm/i915/soc/intel_dram.c | 2 + drivers/gpu/drm/i915/vlv_sideband.c | 1 - drivers/gpu/drm/imagination/Makefile | 2 +- drivers/gpu/drm/imagination/pvr_fw_trace.c | 1 + drivers/gpu/drm/imagination/pvr_vm_mips.c | 1 + drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c | 12 +- drivers/gpu/drm/lima/lima_drv.c | 21 +- drivers/gpu/drm/lima/lima_drv.h | 5 + drivers/gpu/drm/lima/lima_gem.c | 2 +- drivers/gpu/drm/lima/lima_trace.h | 2 +- drivers/gpu/drm/loongson/lsdc_crtc.c | 1 + drivers/gpu/drm/loongson/lsdc_gem.c | 13 +- drivers/gpu/drm/mediatek/Kconfig | 2 +- drivers/gpu/drm/mediatek/Makefile | 12 +- drivers/gpu/drm/mediatek/mtk_crtc.c | 1138 ++ drivers/gpu/drm/mediatek/mtk_crtc.h | 28 + drivers/gpu/drm/mediatek/mtk_ddp_comp.c | 684 ++ drivers/gpu/drm/mediatek/mtk_ddp_comp.h | 346 + drivers/gpu/drm/mediatek/mtk_disp_aal.c | 5 +- drivers/gpu/drm/mediatek/mtk_disp_ccorr.c | 5 +- drivers/gpu/drm/mediatek/mtk_disp_color.c | 5 +- drivers/gpu/drm/mediatek/mtk_disp_drv.h | 2 +- drivers/gpu/drm/mediatek/mtk_disp_gamma.c | 5 +- drivers/gpu/drm/mediatek/mtk_disp_merge.c | 3 +- drivers/gpu/drm/mediatek/mtk_disp_ovl.c | 46 +- drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c | 7 +- drivers/gpu/drm/mediatek/mtk_disp_rdma.c | 5 +- drivers/gpu/drm/mediatek/mtk_dp.c | 10 +- drivers/gpu/drm/mediatek/mtk_dpi.c | 7 +- drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 1146 -- drivers/gpu/drm/mediatek/mtk_drm_crtc.h | 30 - drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c | 644 - drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h | 343 - drivers/gpu/drm/mediatek/mtk_drm_drv.c | 58 +- drivers/gpu/drm/mediatek/mtk_drm_drv.h | 8 +- drivers/gpu/drm/mediatek/mtk_drm_gem.c | 288 - drivers/gpu/drm/mediatek/mtk_drm_gem.h | 49 - drivers/gpu/drm/mediatek/mtk_drm_plane.c | 350 - drivers/gpu/drm/mediatek/mtk_drm_plane.h | 54 - drivers/gpu/drm/mediatek/mtk_dsi.c | 36 +- drivers/gpu/drm/mediatek/mtk_ethdr.c | 26 +- drivers/gpu/drm/mediatek/mtk_gem.c | 288 + drivers/gpu/drm/mediatek/mtk_gem.h | 48 + drivers/gpu/drm/mediatek/mtk_hdmi.c | 14 +- drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c | 3 +- drivers/gpu/drm/mediatek/mtk_mdp_rdma.c | 1 - drivers/gpu/drm/mediatek/mtk_padding.c | 5 +- drivers/gpu/drm/mediatek/mtk_plane.c | 352 + drivers/gpu/drm/mediatek/mtk_plane.h | 54 + drivers/gpu/drm/meson/meson_drv.c | 37 +- drivers/gpu/drm/mgag200/mgag200_drv.h | 7 +- drivers/gpu/drm/mgag200/mgag200_mode.c | 18 + drivers/gpu/drm/msm/.gitignore | 1 + drivers/gpu/drm/msm/Kconfig | 10 +- drivers/gpu/drm/msm/Makefile | 114 +- drivers/gpu/drm/msm/adreno/a2xx.xml.h | 3251 ----- drivers/gpu/drm/msm/adreno/a2xx_gpu.c | 4 +- drivers/gpu/drm/msm/adreno/a2xx_gpu.h | 4 + drivers/gpu/drm/msm/adreno/a2xx_gpummu.c | 124 + drivers/gpu/drm/msm/adreno/a3xx.xml.h | 3268 ----- drivers/gpu/drm/msm/adreno/a4xx.xml.h | 4379 ------- drivers/gpu/drm/msm/adreno/a5xx.xml.h | 5572 --------- drivers/gpu/drm/msm/adreno/a6xx.xml.h | 11858 ------------------- drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 2 +- drivers/gpu/drm/msm/adreno/a6xx_gmu.h | 12 +- drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h | 422 - drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 10 +- drivers/gpu/drm/msm/adreno/a6xx_gpu.h | 4 +- drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c | 94 +- drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h | 14 +- drivers/gpu/drm/msm/adreno/adreno_common.xml.h | 539 - .../gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h | 1446 +++ drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h | 2803 ----- drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c | 12 +- drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 31 +- drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h | 7 +- .../gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c | 32 +- .../gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c | 21 +- .../gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c | 26 +- drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c | 660 +- drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h | 25 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c | 6 - drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c | 6 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h | 4 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h | 3 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c | 30 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h | 2 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h | 124 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c | 42 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h | 6 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c | 14 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h | 4 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c | 22 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h | 2 +- drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c | 13 +- drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h | 2 + drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c | 91 +- drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | 56 + drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h | 8 + drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h | 12 +- drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h | 1181 -- drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c | 4 +- drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c | 1 - drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h | 4 +- drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c | 129 +- drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h | 1979 ---- drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h | 11 + drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 7 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c | 1 - drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h | 4 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c | 125 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c | 4 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h | 2 +- drivers/gpu/drm/msm/disp/mdp_common.xml.h | 111 - drivers/gpu/drm/msm/disp/mdp_format.c | 630 +- drivers/gpu/drm/msm/disp/mdp_format.h | 77 + drivers/gpu/drm/msm/disp/mdp_kms.h | 18 +- drivers/gpu/drm/msm/dp/dp_audio.c | 25 +- drivers/gpu/drm/msm/dp/dp_aux.c | 31 +- drivers/gpu/drm/msm/dp/dp_catalog.c | 64 +- drivers/gpu/drm/msm/dp/dp_catalog.h | 49 +- drivers/gpu/drm/msm/dp/dp_ctrl.c | 17 +- drivers/gpu/drm/msm/dp/dp_ctrl.h | 1 - drivers/gpu/drm/msm/dp/dp_debug.c | 59 +- drivers/gpu/drm/msm/dp/dp_debug.h | 38 +- drivers/gpu/drm/msm/dp/dp_display.c | 97 +- drivers/gpu/drm/msm/dp/dp_display.h | 3 - drivers/gpu/drm/msm/dp/dp_drm.c | 2 - drivers/gpu/drm/msm/dp/dp_link.c | 4 - drivers/gpu/drm/msm/dp/dp_link.h | 1 - drivers/gpu/drm/msm/dp/dp_panel.c | 14 +- drivers/gpu/drm/msm/dp/dp_panel.h | 3 - drivers/gpu/drm/msm/dsi/dsi.c | 26 +- drivers/gpu/drm/msm/dsi/dsi.h | 7 +- drivers/gpu/drm/msm/dsi/dsi.xml.h | 790 -- drivers/gpu/drm/msm/dsi/dsi_host.c | 16 +- drivers/gpu/drm/msm/dsi/dsi_manager.c | 79 +- drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h | 227 - drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h | 309 - drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h | 237 - drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h | 384 - drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h | 286 - drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h | 483 - drivers/gpu/drm/msm/dsi/mmss_cc.xml.h | 131 - drivers/gpu/drm/msm/dsi/phy/dsi_phy.h | 8 +- drivers/gpu/drm/msm/dsi/sfpb.xml.h | 70 - drivers/gpu/drm/msm/hdmi/hdmi.c | 2 +- drivers/gpu/drm/msm/hdmi/hdmi.h | 10 +- drivers/gpu/drm/msm/hdmi/hdmi.xml.h | 1399 --- drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c | 6 +- drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c | 4 +- drivers/gpu/drm/msm/hdmi/qfprom.xml.h | 61 - drivers/gpu/drm/msm/msm_drv.c | 3 +- drivers/gpu/drm/msm/msm_drv.h | 11 +- drivers/gpu/drm/msm/msm_fb.c | 12 +- drivers/gpu/drm/msm/msm_gem.c | 20 +- drivers/gpu/drm/msm/msm_gem.h | 4 +- drivers/gpu/drm/msm/msm_gem_prime.c | 20 +- drivers/gpu/drm/msm/msm_gpu.c | 2 +- drivers/gpu/drm/msm/msm_gpu.h | 12 +- drivers/gpu/drm/msm/msm_gpummu.c | 121 - drivers/gpu/drm/msm/msm_kms.h | 4 - drivers/gpu/drm/msm/msm_mmu.h | 5 - drivers/gpu/drm/msm/registers/.gitignore | 4 + drivers/gpu/drm/msm/registers/adreno/a2xx.xml | 1865 +++ drivers/gpu/drm/msm/registers/adreno/a3xx.xml | 1751 +++ drivers/gpu/drm/msm/registers/adreno/a4xx.xml | 2409 ++++ drivers/gpu/drm/msm/registers/adreno/a5xx.xml | 3039 +++++ drivers/gpu/drm/msm/registers/adreno/a6xx.xml | 5011 ++++++++ drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml | 228 + .../gpu/drm/msm/registers/adreno/adreno_common.xml | 400 + .../gpu/drm/msm/registers/adreno/adreno_pm4.xml | 2268 ++++ drivers/gpu/drm/msm/registers/display/dsi.xml | 390 + .../gpu/drm/msm/registers/display/dsi_phy_10nm.xml | 102 + .../gpu/drm/msm/registers/display/dsi_phy_14nm.xml | 135 + .../gpu/drm/msm/registers/display/dsi_phy_20nm.xml | 100 + .../gpu/drm/msm/registers/display/dsi_phy_28nm.xml | 180 + .../msm/registers/display/dsi_phy_28nm_8960.xml | 134 + .../gpu/drm/msm/registers/display/dsi_phy_7nm.xml | 230 + drivers/gpu/drm/msm/registers/display/edp.xml | 239 + drivers/gpu/drm/msm/registers/display/hdmi.xml | 1015 ++ drivers/gpu/drm/msm/registers/display/mdp4.xml | 504 + drivers/gpu/drm/msm/registers/display/mdp5.xml | 806 ++ .../gpu/drm/msm/registers/display/mdp_common.xml | 90 + drivers/gpu/drm/msm/registers/display/msm.xml | 32 + drivers/gpu/drm/msm/registers/display/sfpb.xml | 17 + .../gpu/drm/msm/registers/freedreno_copyright.xml | 40 + drivers/gpu/drm/msm/registers/gen_header.py | 971 ++ drivers/gpu/drm/msm/registers/rules-fd.xsd | 404 + drivers/gpu/drm/nouveau/Kbuild | 10 +- drivers/gpu/drm/nouveau/dispnv50/crc.c | 2 + drivers/gpu/drm/nouveau/nouveau_abi16.c | 20 +- drivers/gpu/drm/nouveau/nouveau_abi16.h | 12 - drivers/gpu/drm/nouveau/nouveau_bios.c | 5 - drivers/gpu/drm/nouveau/nouveau_bo.c | 43 +- drivers/gpu/drm/nouveau/nouveau_bo.h | 2 + drivers/gpu/drm/nouveau/nouveau_display.c | 2 +- drivers/gpu/drm/nouveau/nouveau_dp.c | 2 +- drivers/gpu/drm/nouveau/nouveau_prime.c | 8 +- drivers/gpu/drm/nouveau/nvif/object.c | 24 +- drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | 1 - drivers/gpu/drm/omapdrm/omap_fb.c | 1 + drivers/gpu/drm/omapdrm/omap_gem.c | 1 + drivers/gpu/drm/panel/Kconfig | 38 +- drivers/gpu/drm/panel/Makefile | 3 + drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c | 8 +- drivers/gpu/drm/panel/panel-edp.c | 155 +- drivers/gpu/drm/panel/panel-himax-hx8394.c | 3 +- drivers/gpu/drm/panel/panel-ilitek-ili9881c.c | 222 + drivers/gpu/drm/panel/panel-ilitek-ili9882t.c | 8 +- drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c | 1 + drivers/gpu/drm/panel/panel-khadas-ts050.c | 1112 +- drivers/gpu/drm/panel/panel-lg-sw43408.c | 341 + drivers/gpu/drm/panel/panel-novatek-nt36672a.c | 11 +- drivers/gpu/drm/panel/panel-novatek-nt36672e.c | 33 +- drivers/gpu/drm/panel/panel-raydium-rm69380.c | 344 + drivers/gpu/drm/panel/panel-samsung-atna33xc20.c | 22 +- drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c | 285 + drivers/gpu/drm/panel/panel-simple.c | 120 +- drivers/gpu/drm/panel/panel-sitronix-st7703.c | 87 + drivers/gpu/drm/panel/panel-truly-nt35597.c | 6 +- drivers/gpu/drm/panel/panel-visionox-rm69299.c | 16 +- drivers/gpu/drm/panfrost/Makefile | 2 - drivers/gpu/drm/panfrost/panfrost_debugfs.c | 21 - drivers/gpu/drm/panfrost/panfrost_debugfs.h | 14 - drivers/gpu/drm/panfrost/panfrost_device.h | 2 +- drivers/gpu/drm/panfrost/panfrost_drv.c | 51 +- drivers/gpu/drm/panfrost/panfrost_gem.c | 2 +- drivers/gpu/drm/panfrost/panfrost_job.c | 2 +- drivers/gpu/drm/panthor/Kconfig | 23 + drivers/gpu/drm/panthor/Makefile | 14 + drivers/gpu/drm/panthor/panthor_devfreq.c | 283 + drivers/gpu/drm/panthor/panthor_devfreq.h | 21 + drivers/gpu/drm/panthor/panthor_device.c | 557 + drivers/gpu/drm/panthor/panthor_device.h | 358 + drivers/gpu/drm/panthor/panthor_drv.c | 1488 +++ drivers/gpu/drm/panthor/panthor_fw.c | 1364 +++ drivers/gpu/drm/panthor/panthor_fw.h | 503 + drivers/gpu/drm/panthor/panthor_gem.c | 232 + drivers/gpu/drm/panthor/panthor_gem.h | 146 + drivers/gpu/drm/panthor/panthor_gpu.c | 482 + drivers/gpu/drm/panthor/panthor_gpu.h | 52 + drivers/gpu/drm/panthor/panthor_heap.c | 605 + drivers/gpu/drm/panthor/panthor_heap.h | 39 + drivers/gpu/drm/panthor/panthor_mmu.c | 2774 +++++ drivers/gpu/drm/panthor/panthor_mmu.h | 102 + drivers/gpu/drm/panthor/panthor_regs.h | 239 + drivers/gpu/drm/panthor/panthor_sched.c | 3573 ++++++ drivers/gpu/drm/panthor/panthor_sched.h | 50 + drivers/gpu/drm/qxl/qxl_display.c | 17 +- drivers/gpu/drm/qxl/qxl_object.c | 39 +- drivers/gpu/drm/qxl/qxl_object.h | 6 +- drivers/gpu/drm/qxl/qxl_prime.c | 4 +- drivers/gpu/drm/radeon/r100.c | 1 + drivers/gpu/drm/radeon/r300.c | 1 + drivers/gpu/drm/radeon/r420.c | 1 + drivers/gpu/drm/radeon/r600.c | 3 +- drivers/gpu/drm/radeon/radeon_fence.c | 1 + drivers/gpu/drm/radeon/radeon_gem.c | 1 + drivers/gpu/drm/radeon/radeon_ib.c | 2 + drivers/gpu/drm/radeon/radeon_pm.c | 1 + drivers/gpu/drm/radeon/radeon_prime.c | 11 - drivers/gpu/drm/radeon/radeon_ring.c | 2 + drivers/gpu/drm/radeon/radeon_ttm.c | 1 + drivers/gpu/drm/radeon/rs400.c | 1 + drivers/gpu/drm/radeon/rv515.c | 1 + drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c | 3 - drivers/gpu/drm/rockchip/cdn-dp-core.c | 34 +- drivers/gpu/drm/rockchip/cdn-dp-core.h | 2 +- drivers/gpu/drm/rockchip/inno_hdmi.c | 12 +- drivers/gpu/drm/rockchip/rk3066_hdmi.c | 12 +- drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 2 +- drivers/gpu/drm/rockchip/rockchip_lvds.c | 1 - drivers/gpu/drm/scheduler/gpu_scheduler_trace.h | 4 +- drivers/gpu/drm/sti/sti_drv.c | 1 + drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c | 18 +- drivers/gpu/drm/tegra/Kconfig | 2 +- drivers/gpu/drm/tests/drm_buddy_test.c | 249 +- drivers/gpu/drm/tidss/tidss_kms.c | 3 +- drivers/gpu/drm/tilcdc/tilcdc_panel.c | 6 - drivers/gpu/drm/tiny/simpledrm.c | 16 + drivers/gpu/drm/ttm/tests/ttm_bo_test.c | 48 +- drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c | 7 +- drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h | 3 +- drivers/gpu/drm/ttm/tests/ttm_pool_test.c | 4 +- drivers/gpu/drm/ttm/tests/ttm_resource_test.c | 2 +- drivers/gpu/drm/ttm/tests/ttm_tt_test.c | 20 +- drivers/gpu/drm/ttm/ttm_bo.c | 235 +- drivers/gpu/drm/ttm/ttm_device.c | 1 + drivers/gpu/drm/ttm/ttm_resource.c | 20 +- drivers/gpu/drm/ttm/ttm_tt.c | 5 +- drivers/gpu/drm/udl/udl_modeset.c | 3 +- drivers/gpu/drm/v3d/v3d_bo.c | 1 + drivers/gpu/drm/v3d/v3d_drv.c | 33 +- drivers/gpu/drm/v3d/v3d_drv.h | 30 +- drivers/gpu/drm/v3d/v3d_gem.c | 9 +- drivers/gpu/drm/v3d/v3d_irq.c | 48 +- drivers/gpu/drm/v3d/v3d_sched.c | 94 +- drivers/gpu/drm/v3d/v3d_sysfs.c | 13 +- drivers/gpu/drm/vc4/vc4_drv.h | 1 + drivers/gpu/drm/vc4/vc4_hdmi.c | 46 +- drivers/gpu/drm/virtio/virtgpu_drv.c | 1 - drivers/gpu/drm/virtio/virtgpu_trace.h | 2 +- drivers/gpu/drm/vkms/vkms_crtc.c | 7 +- drivers/gpu/drm/vmwgfx/Makefile | 2 +- drivers/gpu/drm/vmwgfx/ttm_object.c | 4 - drivers/gpu/drm/vmwgfx/vmwgfx_binding.c | 1 + drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c | 1 + drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c | 1 + drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 17 +- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 31 +- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 1 + drivers/gpu/drm/vmwgfx/vmwgfx_gem.c | 27 +- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 4 +- drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 1 + drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 52 +- drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 36 +- drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 39 +- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 32 +- drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 31 +- drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 57 +- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | 110 - drivers/gpu/drm/vmwgfx/vmwgfx_validation.c | 19 +- drivers/gpu/drm/vmwgfx/vmwgfx_validation.h | 7 - drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c | 632 + drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h | 75 + drivers/gpu/drm/xe/Kconfig | 2 + drivers/gpu/drm/xe/Makefile | 27 +- drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h | 200 +- drivers/gpu/drm/xe/abi/guc_klvs_abi.h | 10 + drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h | 57 +- .../gpu/drm/xe/compat-i915-headers/i915_fixed.h | 6 - drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h | 9 - .../drm/xe/compat-i915-headers/i915_gem_stolen.h | 9 +- drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h | 26 - .../gpu/drm/xe/compat-i915-headers/intel_uc_fw.h | 11 - .../gpu/drm/xe/compat-i915-headers/intel_uncore.h | 6 +- drivers/gpu/drm/xe/display/intel_fb_bo.c | 8 +- drivers/gpu/drm/xe/display/intel_fbdev_fb.c | 16 +- drivers/gpu/drm/xe/display/xe_display.c | 19 +- drivers/gpu/drm/xe/display/xe_dsb_buffer.c | 4 +- drivers/gpu/drm/xe/display/xe_fb_pin.c | 39 +- drivers/gpu/drm/xe/display/xe_hdcp_gsc.c | 244 +- drivers/gpu/drm/xe/display/xe_plane_initial.c | 7 +- .../drm/xe/instructions/xe_gfx_state_commands.h | 18 + .../gpu/drm/xe/instructions/xe_gfxpipe_commands.h | 3 + drivers/gpu/drm/xe/instructions/xe_instr_defs.h | 1 + drivers/gpu/drm/xe/regs/xe_engine_regs.h | 3 - drivers/gpu/drm/xe/regs/xe_gsc_regs.h | 7 + drivers/gpu/drm/xe/regs/xe_gt_regs.h | 65 +- drivers/gpu/drm/xe/regs/xe_gtt_defs.h | 37 + drivers/gpu/drm/xe/regs/xe_guc_regs.h | 15 +- drivers/gpu/drm/xe/regs/xe_reg_defs.h | 19 + drivers/gpu/drm/xe/regs/xe_regs.h | 2 +- drivers/gpu/drm/xe/regs/xe_sriov_regs.h | 3 + drivers/gpu/drm/xe/tests/Makefile | 3 +- drivers/gpu/drm/xe/tests/xe_bo.c | 12 +- drivers/gpu/drm/xe/tests/xe_bo_test.c | 5 - drivers/gpu/drm/xe/tests/xe_dma_buf.c | 54 +- drivers/gpu/drm/xe/tests/xe_dma_buf_test.c | 5 - drivers/gpu/drm/xe/tests/xe_guc_id_mgr_test.c | 136 + drivers/gpu/drm/xe/tests/xe_live_test_mod.c | 10 + drivers/gpu/drm/xe/tests/xe_migrate.c | 27 +- drivers/gpu/drm/xe/tests/xe_migrate_test.c | 5 - drivers/gpu/drm/xe/tests/xe_mocs.c | 96 +- drivers/gpu/drm/xe/tests/xe_mocs_test.c | 5 - drivers/gpu/drm/xe/tests/xe_wa_test.c | 1 + drivers/gpu/drm/xe/xe_bb.c | 3 +- drivers/gpu/drm/xe/xe_bo.c | 178 +- drivers/gpu/drm/xe/xe_bo.h | 74 +- drivers/gpu/drm/xe/xe_bo_evict.c | 4 +- drivers/gpu/drm/xe/xe_bo_types.h | 3 +- drivers/gpu/drm/xe/xe_debugfs.c | 24 +- drivers/gpu/drm/xe/xe_devcoredump.c | 37 +- drivers/gpu/drm/xe/xe_devcoredump.h | 6 + drivers/gpu/drm/xe/xe_device.c | 209 +- drivers/gpu/drm/xe/xe_device.h | 9 - drivers/gpu/drm/xe/xe_device_sysfs.c | 16 +- drivers/gpu/drm/xe/xe_device_sysfs.h | 2 +- drivers/gpu/drm/xe/xe_device_types.h | 26 +- drivers/gpu/drm/xe/xe_dma_buf.c | 7 +- drivers/gpu/drm/xe/xe_drm_client.c | 8 +- drivers/gpu/drm/xe/xe_exec.c | 28 +- drivers/gpu/drm/xe/xe_exec_queue.c | 74 +- drivers/gpu/drm/xe/xe_exec_queue_types.h | 8 +- drivers/gpu/drm/xe/xe_ggtt.c | 136 +- drivers/gpu/drm/xe/xe_ggtt.h | 8 +- drivers/gpu/drm/xe/xe_gsc.c | 100 +- drivers/gpu/drm/xe/xe_gsc.h | 2 + drivers/gpu/drm/xe/xe_gsc_proxy.c | 15 +- drivers/gpu/drm/xe/xe_gsc_proxy.h | 1 + drivers/gpu/drm/xe/xe_gsc_submit.c | 15 + drivers/gpu/drm/xe/xe_gsc_submit.h | 1 + drivers/gpu/drm/xe/xe_gsc_types.h | 1 + drivers/gpu/drm/xe/xe_gt.c | 59 +- drivers/gpu/drm/xe/xe_gt_clock.c | 5 - drivers/gpu/drm/xe/xe_gt_clock.h | 2 +- drivers/gpu/drm/xe/xe_gt_debugfs.c | 242 +- drivers/gpu/drm/xe/xe_gt_debugfs.h | 2 + drivers/gpu/drm/xe/xe_gt_freq.c | 63 +- drivers/gpu/drm/xe/xe_gt_freq.h | 2 +- drivers/gpu/drm/xe/xe_gt_idle.c | 43 +- drivers/gpu/drm/xe/xe_gt_idle.h | 2 +- drivers/gpu/drm/xe/xe_gt_mcr.c | 39 +- drivers/gpu/drm/xe/xe_gt_mcr.h | 14 + drivers/gpu/drm/xe/xe_gt_sriov_pf.c | 52 + drivers/gpu/drm/xe/xe_gt_sriov_pf.h | 20 + drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 1990 ++++ drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h | 56 + drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h | 54 + drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c | 257 + drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h | 27 + drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h | 35 + drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c | 418 + drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.h | 25 + drivers/gpu/drm/xe/xe_gt_sriov_pf_policy_types.h | 31 + drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h | 34 + drivers/gpu/drm/xe/xe_gt_sysfs.c | 14 +- drivers/gpu/drm/xe/xe_gt_sysfs.h | 2 +- drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c | 16 +- drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h | 2 +- drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 43 +- drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h | 2 +- drivers/gpu/drm/xe/xe_gt_topology.c | 115 +- drivers/gpu/drm/xe/xe_gt_topology.h | 11 + drivers/gpu/drm/xe/xe_gt_types.h | 22 +- drivers/gpu/drm/xe/xe_guc.c | 122 +- drivers/gpu/drm/xe/xe_guc_ads.c | 137 +- drivers/gpu/drm/xe/xe_guc_ads_types.h | 2 + drivers/gpu/drm/xe/xe_guc_ct.c | 126 +- drivers/gpu/drm/xe/xe_guc_debugfs.c | 9 +- drivers/gpu/drm/xe/xe_guc_fwif.h | 7 +- drivers/gpu/drm/xe/xe_guc_hwconfig.c | 7 +- drivers/gpu/drm/xe/xe_guc_id_mgr.c | 279 + drivers/gpu/drm/xe/xe_guc_id_mgr.h | 22 + drivers/gpu/drm/xe/xe_guc_klv_helpers.c | 134 + drivers/gpu/drm/xe/xe_guc_klv_helpers.h | 51 + drivers/gpu/drm/xe/xe_guc_log.c | 5 +- drivers/gpu/drm/xe/xe_guc_pc.c | 56 +- drivers/gpu/drm/xe/xe_guc_submit.c | 230 +- drivers/gpu/drm/xe/xe_guc_submit.h | 6 +- drivers/gpu/drm/xe/xe_guc_submit_types.h | 13 +- drivers/gpu/drm/xe/xe_guc_types.h | 21 +- drivers/gpu/drm/xe/xe_hmm.c | 253 + drivers/gpu/drm/xe/xe_hmm.h | 11 + drivers/gpu/drm/xe/xe_huc.c | 4 +- drivers/gpu/drm/xe/xe_huc_debugfs.c | 5 +- drivers/gpu/drm/xe/xe_hw_engine.c | 46 +- drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c | 155 +- drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h | 7 + drivers/gpu/drm/xe/xe_hw_fence.c | 2 +- drivers/gpu/drm/xe/xe_hwmon.c | 266 +- drivers/gpu/drm/xe/xe_irq.c | 3 +- drivers/gpu/drm/xe/xe_lmtt.c | 6 +- drivers/gpu/drm/xe/xe_lrc.c | 169 +- drivers/gpu/drm/xe/xe_lrc.h | 5 + drivers/gpu/drm/xe/xe_lrc_types.h | 2 + drivers/gpu/drm/xe/xe_memirq.c | 9 +- drivers/gpu/drm/xe/xe_migrate.c | 8 +- drivers/gpu/drm/xe/xe_mmio.c | 144 +- drivers/gpu/drm/xe/xe_mmio.h | 82 +- drivers/gpu/drm/xe/xe_mocs.c | 66 +- drivers/gpu/drm/xe/xe_module.c | 7 + drivers/gpu/drm/xe/xe_module.h | 3 + drivers/gpu/drm/xe/xe_pat.c | 21 +- drivers/gpu/drm/xe/xe_pci.c | 44 +- drivers/gpu/drm/xe/xe_pcode.c | 2 +- drivers/gpu/drm/xe/xe_platform_types.h | 1 + drivers/gpu/drm/xe/xe_pm.c | 291 +- drivers/gpu/drm/xe/xe_pm.h | 13 +- drivers/gpu/drm/xe/xe_pt.c | 13 +- drivers/gpu/drm/xe/xe_query.c | 53 +- drivers/gpu/drm/xe/xe_ring_ops.c | 11 +- drivers/gpu/drm/xe/xe_sa.c | 5 +- drivers/gpu/drm/xe/xe_sched_job.c | 23 +- drivers/gpu/drm/xe/xe_sched_job.h | 3 + drivers/gpu/drm/xe/xe_sriov.c | 62 +- drivers/gpu/drm/xe/xe_sriov.h | 6 +- drivers/gpu/drm/xe/xe_sriov_pf.c | 104 + drivers/gpu/drm/xe/xe_sriov_pf.h | 30 + drivers/gpu/drm/xe/xe_sriov_pf_helpers.h | 46 + drivers/gpu/drm/xe/xe_sriov_types.h | 19 + drivers/gpu/drm/xe/xe_sync.c | 7 +- drivers/gpu/drm/xe/xe_sync.h | 1 - drivers/gpu/drm/xe/xe_tile.c | 17 +- drivers/gpu/drm/xe/xe_tile_sysfs.c | 17 +- drivers/gpu/drm/xe/xe_tile_sysfs.h | 2 +- drivers/gpu/drm/xe/xe_trace.h | 6 +- drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c | 10 +- drivers/gpu/drm/xe/xe_ttm_sys_mgr.c | 5 +- drivers/gpu/drm/xe/xe_ttm_vram_mgr.c | 16 +- drivers/gpu/drm/xe/xe_ttm_vram_mgr.h | 1 + drivers/gpu/drm/xe/xe_tuning.c | 10 +- drivers/gpu/drm/xe/xe_uc.c | 11 - drivers/gpu/drm/xe/xe_uc_debugfs.c | 2 + drivers/gpu/drm/xe/xe_uc_fw.c | 53 +- drivers/gpu/drm/xe/xe_uc_fw.h | 8 +- drivers/gpu/drm/xe/xe_uc_fw_types.h | 3 +- drivers/gpu/drm/xe/xe_vm.c | 213 +- drivers/gpu/drm/xe/xe_vm_types.h | 11 +- drivers/gpu/drm/xe/xe_vram_freq.c | 20 +- drivers/gpu/drm/xe/xe_vram_freq.h | 2 +- drivers/gpu/drm/xe/xe_wa.c | 134 +- drivers/gpu/drm/xe/xe_wa_oob.rules | 11 +- drivers/gpu/drm/xen/xen_drm_front_gem.c | 1 + drivers/gpu/drm/xlnx/zynqmp_disp.c | 231 +- drivers/gpu/drm/xlnx/zynqmp_disp.h | 17 +- drivers/gpu/drm/xlnx/zynqmp_disp_regs.h | 8 +- drivers/gpu/drm/xlnx/zynqmp_dp.c | 85 +- drivers/gpu/drm/xlnx/zynqmp_dpsub.c | 1 + drivers/gpu/drm/xlnx/zynqmp_kms.c | 14 +- drivers/gpu/host1x/dev.c | 24 + drivers/hid/Kconfig | 16 + drivers/hid/Makefile | 1 + drivers/hid/amd-sfh-hid/Makefile | 2 +- drivers/hid/amd-sfh-hid/amd_sfh_pcie.c | 5 +- drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c | 7 +- drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c | 2 +- drivers/hid/bpf/hid_bpf_dispatch.c | 226 +- drivers/hid/bpf/progs/FR-TEC__Raptor-Mach-2.bpf.c | 185 + drivers/hid/bpf/progs/HP__Elite-Presenter.bpf.c | 58 + drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c | 290 + .../hid/bpf/progs/IOGEAR__Kaliber-MMOmentum.bpf.c | 59 + drivers/hid/bpf/progs/Makefile | 91 + .../hid/bpf/progs/Microsoft__XBox-Elite-2.bpf.c | 133 + drivers/hid/bpf/progs/README | 102 + drivers/hid/bpf/progs/Wacom__ArtPen.bpf.c | 173 + drivers/hid/bpf/progs/XPPen__Artist24.bpf.c | 229 + drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c | 274 + drivers/hid/bpf/progs/hid_bpf.h | 15 + drivers/hid/bpf/progs/hid_bpf_helpers.h | 168 + drivers/hid/hid-asus.c | 81 +- drivers/hid/hid-core.c | 2 + drivers/hid/hid-corsair.c | 4 +- drivers/hid/hid-debug.c | 3437 +++++- drivers/hid/hid-google-hammer.c | 5 +- drivers/hid/hid-ids.h | 2 + drivers/hid/hid-kye.c | 75 +- drivers/hid/hid-lenovo.c | 23 +- drivers/hid/hid-logitech-hidpp.c | 15 +- drivers/hid/hid-nintendo.c | 63 +- drivers/hid/hid-picolcd_backlight.c | 7 - drivers/hid/hid-picolcd_core.c | 20 +- drivers/hid/hid-picolcd_fb.c | 14 +- drivers/hid/hid-picolcd_lcd.c | 2 +- drivers/hid/hid-playstation.c | 138 +- drivers/hid/hid-roccat-isku.c | 2 +- drivers/hid/hid-roccat-kone.c | 12 +- drivers/hid/hid-roccat-koneplus.c | 4 +- drivers/hid/hid-roccat-kovaplus.c | 10 +- drivers/hid/hid-roccat-pyra.c | 6 +- drivers/hid/hid-sensor-custom.c | 17 +- drivers/hid/hid-sony.c | 7 +- drivers/hid/hid-steam.c | 155 +- drivers/hid/hid-uclogic-params.c | 3 + drivers/hid/hid-winwing.c | 226 + drivers/hid/i2c-hid/i2c-hid-core.c | 44 +- drivers/hid/intel-ish-hid/Makefile | 3 +- drivers/hid/intel-ish-hid/ipc/hw-ish.h | 45 +- drivers/hid/intel-ish-hid/ipc/ipc.c | 21 +- drivers/hid/intel-ish-hid/ipc/pci-ish.c | 75 +- drivers/hid/intel-ish-hid/ishtp/hbm.c | 21 + drivers/hid/intel-ish-hid/ishtp/init.c | 8 + drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h | 28 + drivers/hid/intel-ish-hid/ishtp/loader.c | 284 + drivers/hid/intel-ish-hid/ishtp/loader.h | 231 + drivers/hid/surface-hid/surface_kbd.c | 5 +- drivers/hsi/controllers/omap_ssi_core.c | 6 +- drivers/hsi/controllers/omap_ssi_port.c | 6 +- drivers/hte/hte-tegra194-test.c | 6 +- drivers/hv/Makefile | 2 +- drivers/hv/channel_mgmt.c | 15 +- drivers/hv/hv.c | 37 +- drivers/hv/hv_balloon.c | 190 +- drivers/hv/hv_common.c | 6 +- drivers/hv/hv_fcopy.c | 427 - drivers/hv/hv_util.c | 12 - drivers/hv/hyperv_vmbus.h | 5 + drivers/hwmon/Kconfig | 38 +- drivers/hwmon/Makefile | 1 + drivers/hwmon/acpi_power_meter.c | 16 + drivers/hwmon/ad7414.c | 2 +- drivers/hwmon/adc128d818.c | 59 +- drivers/hwmon/adm1026.c | 2 +- drivers/hwmon/adm1029.c | 2 +- drivers/hwmon/adm1177.c | 2 +- drivers/hwmon/adt7410.c | 4 +- drivers/hwmon/adt7411.c | 2 +- drivers/hwmon/adt7462.c | 2 +- drivers/hwmon/adt7470.c | 2 +- drivers/hwmon/adt7475.c | 2 +- drivers/hwmon/aquacomputer_d5next.c | 51 +- drivers/hwmon/asb100.c | 2 +- drivers/hwmon/aspeed-g6-pwm-tach.c | 27 +- drivers/hwmon/atxp1.c | 2 +- drivers/hwmon/coretemp.c | 2 +- drivers/hwmon/da9052-hwmon.c | 38 +- drivers/hwmon/dell-smm-hwmon.c | 15 + drivers/hwmon/ds620.c | 2 +- drivers/hwmon/emc1403.c | 838 +- drivers/hwmon/emc2103.c | 2 +- drivers/hwmon/emc2305.c | 8 +- drivers/hwmon/emc6w201.c | 2 +- drivers/hwmon/ftsteutates.c | 2 +- drivers/hwmon/g760a.c | 2 +- drivers/hwmon/g762.c | 4 +- drivers/hwmon/gl518sm.c | 2 +- drivers/hwmon/gl520sm.c | 2 +- drivers/hwmon/hih6130.c | 2 +- drivers/hwmon/hs3001.c | 2 +- drivers/hwmon/i5k_amb.c | 15 +- drivers/hwmon/ibmpex.c | 14 +- drivers/hwmon/ina209.c | 2 +- drivers/hwmon/ina238.c | 2 +- drivers/hwmon/ina3221.c | 2 +- drivers/hwmon/it87.c | 127 +- drivers/hwmon/jc42.c | 4 +- drivers/hwmon/lenovo-ec-sensors.c | 602 + drivers/hwmon/lineage-pem.c | 2 +- drivers/hwmon/lm70.c | 4 +- drivers/hwmon/lm73.c | 2 +- drivers/hwmon/lm77.c | 2 +- drivers/hwmon/lm87.c | 4 +- drivers/hwmon/lm93.c | 4 +- drivers/hwmon/lm95241.c | 4 +- drivers/hwmon/lm95245.c | 4 +- drivers/hwmon/ltc2945.c | 2 +- drivers/hwmon/ltc2947-i2c.c | 2 +- drivers/hwmon/ltc2990.c | 2 +- drivers/hwmon/ltc2991.c | 6 +- drivers/hwmon/ltc2992.c | 2 +- drivers/hwmon/ltc4151.c | 2 +- drivers/hwmon/ltc4215.c | 2 +- drivers/hwmon/ltc4222.c | 2 +- drivers/hwmon/ltc4245.c | 2 +- drivers/hwmon/ltc4260.c | 2 +- drivers/hwmon/ltc4261.c | 2 +- drivers/hwmon/max127.c | 2 +- drivers/hwmon/max1619.c | 2 +- drivers/hwmon/max31730.c | 2 +- drivers/hwmon/max31790.c | 10 +- drivers/hwmon/max6620.c | 2 +- drivers/hwmon/max6621.c | 2 +- drivers/hwmon/max6639.c | 341 +- drivers/hwmon/max6642.c | 2 +- drivers/hwmon/max6697.c | 5 +- drivers/hwmon/mc34vr500.c | 2 +- drivers/hwmon/nct7802.c | 2 +- drivers/hwmon/nct7904.c | 2 +- drivers/hwmon/npcm750-pwm-fan.c | 7 +- drivers/hwmon/nzxt-kraken3.c | 58 +- drivers/hwmon/pcf8591.c | 2 +- drivers/hwmon/pmbus/Kconfig | 23 +- drivers/hwmon/pmbus/Makefile | 2 + drivers/hwmon/pmbus/adm1266.c | 2 +- drivers/hwmon/pmbus/adm1275.c | 7 +- drivers/hwmon/pmbus/adp1050.c | 56 + drivers/hwmon/pmbus/inspur-ipsps.c | 2 +- drivers/hwmon/pmbus/ir35221.c | 2 +- drivers/hwmon/pmbus/ir36021.c | 2 +- drivers/hwmon/pmbus/ir38064.c | 8 +- drivers/hwmon/pmbus/irps5401.c | 2 +- drivers/hwmon/pmbus/lt7182s.c | 2 +- drivers/hwmon/pmbus/ltc3815.c | 2 +- drivers/hwmon/pmbus/max15301.c | 4 +- drivers/hwmon/pmbus/max16064.c | 2 +- drivers/hwmon/pmbus/max20751.c | 2 +- drivers/hwmon/pmbus/max31785.c | 6 +- drivers/hwmon/pmbus/max8688.c | 2 +- drivers/hwmon/pmbus/mp2888.c | 2 +- drivers/hwmon/pmbus/mp2975.c | 136 +- drivers/hwmon/pmbus/mp5990.c | 2 +- drivers/hwmon/pmbus/mpq8785.c | 2 +- drivers/hwmon/pmbus/pli1209bc.c | 2 +- drivers/hwmon/pmbus/pm6764tr.c | 2 +- drivers/hwmon/pmbus/pxe1610.c | 6 +- drivers/hwmon/pmbus/stpddc60.c | 4 +- drivers/hwmon/pmbus/tda38640.c | 2 +- drivers/hwmon/pmbus/tps40422.c | 2 +- drivers/hwmon/pmbus/tps546d24.c | 2 +- drivers/hwmon/pmbus/xdp710.c | 131 + drivers/hwmon/pmbus/xdpe12284.c | 6 +- drivers/hwmon/pmbus/xdpe152c4.c | 4 +- drivers/hwmon/pt5161l.c | 2 +- drivers/hwmon/pwm-fan.c | 45 +- drivers/hwmon/sbrmi.c | 2 +- drivers/hwmon/sbtsi_temp.c | 2 +- drivers/hwmon/sht21.c | 2 +- drivers/hwmon/sht4x.c | 2 +- drivers/hwmon/smsc47m192.c | 2 +- drivers/hwmon/stts751.c | 3 +- drivers/hwmon/tc654.c | 4 +- drivers/hwmon/tc74.c | 2 +- drivers/hwmon/tmp102.c | 2 +- drivers/hwmon/tmp103.c | 2 +- drivers/hwmon/tmp108.c | 2 +- drivers/hwmon/w83791d.c | 2 +- drivers/hwmon/w83792d.c | 2 +- drivers/hwmon/w83793.c | 2 +- drivers/hwmon/w83l785ts.c | 2 +- drivers/hwmon/w83l786ng.c | 2 +- drivers/hwtracing/coresight/coresight-catu.c | 138 +- drivers/hwtracing/coresight/coresight-catu.h | 1 + drivers/hwtracing/coresight/coresight-core.c | 29 + drivers/hwtracing/coresight/coresight-cpu-debug.c | 137 +- drivers/hwtracing/coresight/coresight-cti-core.c | 1 - drivers/hwtracing/coresight/coresight-etb10.c | 1 - drivers/hwtracing/coresight/coresight-etm3x-core.c | 1 - drivers/hwtracing/coresight/coresight-etm4x-core.c | 1 - drivers/hwtracing/coresight/coresight-funnel.c | 88 +- drivers/hwtracing/coresight/coresight-platform.c | 4 +- drivers/hwtracing/coresight/coresight-priv.h | 10 + drivers/hwtracing/coresight/coresight-replicator.c | 83 +- drivers/hwtracing/coresight/coresight-stm.c | 115 +- drivers/hwtracing/coresight/coresight-tmc-core.c | 182 +- drivers/hwtracing/coresight/coresight-tmc.h | 2 + drivers/hwtracing/coresight/coresight-tpda.c | 1 - drivers/hwtracing/coresight/coresight-tpdm.c | 1 - drivers/hwtracing/coresight/coresight-tpiu.c | 117 +- drivers/hwtracing/coresight/coresight-trbe.c | 1 + drivers/hwtracing/intel_th/acpi.c | 6 +- drivers/hwtracing/intel_th/core.c | 14 +- drivers/hwtracing/intel_th/gth.c | 8 +- drivers/hwtracing/intel_th/msu.c | 12 +- drivers/hwtracing/intel_th/pci.c | 8 +- drivers/hwtracing/intel_th/sth.c | 2 +- drivers/hwtracing/ptt/hisi_ptt.c | 1 + drivers/hwtracing/stm/console.c | 1 + drivers/hwtracing/stm/core.c | 8 +- drivers/hwtracing/stm/ftrace.c | 1 + drivers/hwtracing/stm/heartbeat.c | 1 + drivers/hwtracing/stm/p_basic.c | 3 +- drivers/hwtracing/stm/p_sys-t.c | 93 +- drivers/hwtracing/stm/stm.h | 2 +- drivers/i2c/Kconfig | 2 +- drivers/i2c/busses/Kconfig | 49 +- drivers/i2c/busses/Makefile | 3 +- drivers/i2c/busses/i2c-ali1535.c | 8 +- drivers/i2c/busses/i2c-ali1563.c | 1 - drivers/i2c/busses/i2c-ali15x3.c | 4 +- drivers/i2c/busses/i2c-amd-mp2-plat.c | 8 +- drivers/i2c/busses/i2c-at91-master.c | 1 - drivers/i2c/busses/i2c-bcm-iproc.c | 2 - drivers/i2c/busses/i2c-bcm2835.c | 1 - drivers/i2c/busses/i2c-cadence.c | 2 - drivers/i2c/busses/i2c-davinci.c | 1 - drivers/i2c/busses/i2c-designware-pcidrv.c | 2 - drivers/i2c/busses/i2c-designware-platdrv.c | 9 +- drivers/i2c/busses/i2c-digicolor.c | 6 +- drivers/i2c/busses/i2c-exynos5.c | 12 +- drivers/i2c/busses/i2c-hix5hd2.c | 8 +- drivers/i2c/busses/i2c-i801.c | 39 +- drivers/i2c/busses/i2c-img-scb.c | 5 +- drivers/i2c/busses/i2c-imx-lpi2c.c | 6 +- drivers/i2c/busses/i2c-ismt.c | 1 - drivers/i2c/busses/i2c-jz4780.c | 22 +- drivers/i2c/busses/i2c-mpc.c | 11 +- drivers/i2c/busses/i2c-nomadik.c | 8 +- drivers/i2c/busses/i2c-ocores.c | 21 +- drivers/i2c/busses/i2c-octeon-core.c | 141 +- drivers/i2c/busses/i2c-octeon-core.h | 53 +- drivers/i2c/busses/i2c-omap.c | 11 +- drivers/i2c/busses/i2c-pxa.c | 14 +- drivers/i2c/busses/i2c-qcom-geni.c | 10 +- drivers/i2c/busses/i2c-qup.c | 4 +- drivers/i2c/busses/i2c-riic.c | 125 +- drivers/i2c/busses/i2c-rk3x.c | 14 +- drivers/i2c/busses/i2c-s3c2410.c | 6 +- drivers/i2c/busses/i2c-sh_mobile.c | 1 - drivers/i2c/busses/i2c-st.c | 11 +- drivers/i2c/busses/i2c-stm32f4.c | 8 +- drivers/i2c/busses/i2c-stm32f7.c | 8 +- drivers/i2c/busses/i2c-synquacer.c | 19 +- drivers/i2c/busses/i2c-tegra.c | 2 - drivers/i2c/busses/i2c-thunderx-pcidrv.c | 13 +- drivers/i2c/busses/i2c-uniphier-f.c | 1 - drivers/i2c/busses/i2c-uniphier.c | 4 +- drivers/i2c/busses/i2c-viai2c-common.c | 203 + drivers/i2c/busses/i2c-viai2c-common.h | 85 + drivers/i2c/busses/i2c-viai2c-wmt.c | 184 + drivers/i2c/busses/i2c-viai2c-zhaoxin.c | 367 + drivers/i2c/busses/i2c-viperboard.c | 1 - drivers/i2c/busses/i2c-wmt.c | 421 - drivers/i2c/i2c-mux.c | 24 +- drivers/i2c/muxes/i2c-arb-gpio-challenge.c | 2 +- drivers/i2c/muxes/i2c-mux-gpio.c | 3 +- drivers/i2c/muxes/i2c-mux-gpmux.c | 2 +- drivers/i2c/muxes/i2c-mux-ltc4306.c | 2 +- drivers/i2c/muxes/i2c-mux-mlxcpld.c | 2 +- drivers/i2c/muxes/i2c-mux-pca9541.c | 2 +- drivers/i2c/muxes/i2c-mux-pca954x.c | 2 +- drivers/i2c/muxes/i2c-mux-pinctrl.c | 2 +- drivers/i2c/muxes/i2c-mux-reg.c | 2 +- drivers/i3c/device.c | 4 + drivers/i3c/master.c | 6 + drivers/i3c/master/dw-i3c-master.c | 67 +- drivers/i3c/master/dw-i3c-master.h | 2 + drivers/i3c/master/mipi-i3c-hci/core.c | 8 + drivers/iio/Makefile | 1 + drivers/iio/accel/adxl345.h | 36 +- drivers/iio/accel/adxl345_core.c | 92 +- drivers/iio/accel/adxl345_i2c.c | 2 +- drivers/iio/accel/adxl345_spi.c | 10 +- drivers/iio/accel/adxl367.c | 2 +- drivers/iio/accel/bmc150-accel-core.c | 44 +- drivers/iio/accel/fxls8962af-core.c | 10 +- drivers/iio/accel/kxcjk-1013.c | 80 +- drivers/iio/accel/mma8452.c | 6 +- drivers/iio/accel/mxc4005.c | 22 + drivers/iio/adc/Kconfig | 27 + drivers/iio/adc/Makefile | 2 + drivers/iio/adc/ab8500-gpadc.c | 8 +- drivers/iio/adc/ad4130.c | 7 +- drivers/iio/adc/ad7124.c | 55 +- drivers/iio/adc/ad7173.c | 1181 ++ drivers/iio/adc/ad7192.c | 38 +- drivers/iio/adc/ad7266.c | 1 - drivers/iio/adc/ad7292.c | 13 +- drivers/iio/adc/ad7944.c | 690 ++ drivers/iio/adc/ad799x.c | 7 +- drivers/iio/adc/ad9467.c | 405 +- drivers/iio/adc/ad_sigma_delta.c | 29 +- drivers/iio/adc/adi-axi-adc.c | 140 +- drivers/iio/adc/exynos_adc.c | 16 +- drivers/iio/adc/fsl-imx25-gcq.c | 150 +- drivers/iio/adc/hx711.c | 5 +- drivers/iio/adc/intel_mrfld_adc.c | 12 +- drivers/iio/adc/max11410.c | 27 +- drivers/iio/adc/mcp3564.c | 16 +- drivers/iio/adc/mxs-lradc-adc.c | 1 - drivers/iio/adc/pac1934.c | 77 +- drivers/iio/adc/qcom-spmi-adc5.c | 7 +- drivers/iio/adc/rcar-gyroadc.c | 21 +- drivers/iio/adc/rtq6056.c | 34 +- drivers/iio/adc/rzg2l_adc.c | 11 +- drivers/iio/adc/spear_adc.c | 25 +- drivers/iio/adc/stm32-adc.c | 72 +- drivers/iio/adc/stm32-dfsdm-adc.c | 12 +- drivers/iio/adc/ti-ads1015.c | 5 +- drivers/iio/adc/ti-ads131e08.c | 12 +- drivers/iio/adc/twl4030-madc.c | 19 +- drivers/iio/adc/twl6030-gpadc.c | 8 +- drivers/iio/addac/ad74115.c | 40 +- drivers/iio/addac/ad74413r.c | 10 +- drivers/iio/buffer/industrialio-buffer-dma.c | 100 +- drivers/iio/buffer/industrialio-buffer-dmaengine.c | 86 +- .../iio/common/inv_sensors/inv_sensors_timestamp.c | 24 +- drivers/iio/dac/Kconfig | 37 + drivers/iio/dac/Makefile | 2 + drivers/iio/dac/ad3552r.c | 110 +- drivers/iio/dac/ad5755.c | 24 +- drivers/iio/dac/ad5770r.c | 19 +- drivers/iio/dac/ad9739a.c | 464 + drivers/iio/dac/adi-axi-dac.c | 635 + drivers/iio/dac/ltc2688.c | 28 +- drivers/iio/dac/ti-dac5571.c | 3 + drivers/iio/frequency/admfm2000.c | 24 +- drivers/iio/frequency/admv1013.c | 40 +- drivers/iio/frequency/adrf6780.c | 1 - drivers/iio/gyro/mpu3050-i2c.c | 2 +- drivers/iio/health/max30102.c | 2 + drivers/iio/humidity/hdc3020.c | 111 +- drivers/iio/humidity/hts221_core.c | 2 +- drivers/iio/imu/inv_icm42600/inv_icm42600.h | 37 + drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c | 75 +- drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c | 46 +- drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h | 2 + drivers/iio/imu/inv_icm42600/inv_icm42600_core.c | 27 + drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c | 84 +- drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c | 6 + drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c | 6 + drivers/iio/imu/inv_mpu6050/inv_mpu_core.c | 542 +- drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c | 2 +- drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h | 36 +- drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c | 21 +- drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c | 84 +- drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c | 2 +- drivers/iio/industrialio-acpi.c | 85 + drivers/iio/industrialio-backend.c | 305 +- drivers/iio/industrialio-buffer.c | 122 +- drivers/iio/industrialio-core.c | 43 +- drivers/iio/industrialio-gts-helper.c | 7 +- drivers/iio/industrialio-trigger.c | 71 +- drivers/iio/inkern.c | 263 +- drivers/iio/light/Kconfig | 12 + drivers/iio/light/Makefile | 1 + drivers/iio/light/apds9306.c | 1361 +++ drivers/iio/light/st_uvis25_core.c | 2 +- drivers/iio/light/stk3310.c | 1 - drivers/iio/pressure/bmp280-core.c | 16 +- drivers/iio/pressure/dps310.c | 127 +- drivers/iio/pressure/hsc030pa_spi.c | 7 +- drivers/iio/pressure/zpa2326.c | 10 +- drivers/iio/temperature/ltc2983.c | 142 +- drivers/infiniband/core/cache.c | 14 +- drivers/infiniband/core/cma_trace.h | 4 +- drivers/infiniband/core/device.c | 22 +- drivers/infiniband/core/iwcm.c | 11 +- drivers/infiniband/core/lag.c | 3 +- drivers/infiniband/core/nldev.c | 23 +- drivers/infiniband/core/restrack.c | 12 +- drivers/infiniband/core/roce_gid_mgmt.c | 3 +- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 8 +- drivers/infiniband/hw/bnxt_re/qplib_fp.h | 6 +- drivers/infiniband/hw/efa/efa_admin_cmds_defs.h | 11 +- drivers/infiniband/hw/efa/efa_com_cmd.c | 3 + drivers/infiniband/hw/efa/efa_com_cmd.h | 1 + drivers/infiniband/hw/efa/efa_main.c | 11 + drivers/infiniband/hw/efa/efa_verbs.c | 19 +- drivers/infiniband/hw/erdma/erdma.h | 13 +- drivers/infiniband/hw/erdma/erdma_cmdq.c | 99 +- drivers/infiniband/hw/erdma/erdma_cq.c | 2 +- drivers/infiniband/hw/erdma/erdma_eq.c | 54 +- drivers/infiniband/hw/erdma/erdma_hw.h | 6 +- drivers/infiniband/hw/erdma/erdma_main.c | 15 +- drivers/infiniband/hw/erdma/erdma_qp.c | 4 +- drivers/infiniband/hw/erdma/erdma_verbs.c | 105 +- drivers/infiniband/hw/erdma/erdma_verbs.h | 16 +- drivers/infiniband/hw/hfi1/hfi.h | 2 +- drivers/infiniband/hw/hfi1/ipoib_main.c | 20 +- drivers/infiniband/hw/hfi1/netdev.h | 2 +- drivers/infiniband/hw/hfi1/netdev_rx.c | 9 +- drivers/infiniband/hw/hfi1/pcie.c | 30 +- drivers/infiniband/hw/hfi1/trace_dbg.h | 2 +- drivers/infiniband/hw/hfi1/trace_rx.h | 2 +- drivers/infiniband/hw/hfi1/trace_tid.h | 4 +- drivers/infiniband/hw/hfi1/trace_tx.h | 4 +- drivers/infiniband/hw/hns/hns_roce_ah.c | 33 +- drivers/infiniband/hw/hns/hns_roce_alloc.c | 3 +- drivers/infiniband/hw/hns/hns_roce_cq.c | 1 + drivers/infiniband/hw/hns/hns_roce_device.h | 18 +- drivers/infiniband/hw/hns/hns_roce_hem.c | 15 +- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 309 +- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 19 + drivers/infiniband/hw/hns/hns_roce_main.c | 24 +- drivers/infiniband/hw/hns/hns_roce_mr.c | 9 +- drivers/infiniband/hw/hns/hns_roce_qp.c | 33 +- drivers/infiniband/hw/hns/hns_roce_srq.c | 8 +- drivers/infiniband/hw/irdma/cm.c | 3 +- drivers/infiniband/hw/mana/cq.c | 75 +- drivers/infiniband/hw/mana/device.c | 53 +- drivers/infiniband/hw/mana/main.c | 291 +- drivers/infiniband/hw/mana/mana_ib.h | 133 +- drivers/infiniband/hw/mana/mr.c | 2 +- drivers/infiniband/hw/mana/qp.c | 110 +- drivers/infiniband/hw/mana/wq.c | 31 +- drivers/infiniband/hw/mlx4/alias_GUID.c | 2 +- drivers/infiniband/hw/mlx4/mad.c | 2 +- drivers/infiniband/hw/mlx5/main.c | 3 +- drivers/infiniband/hw/mlx5/mlx5_ib.h | 13 + drivers/infiniband/hw/mlx5/odp.c | 6 +- drivers/infiniband/hw/mlx5/qp.c | 3 +- drivers/infiniband/hw/mlx5/restrack.c | 29 + drivers/infiniband/hw/qedr/qedr_iw_cm.c | 3 +- drivers/infiniband/hw/qib/qib.h | 1 - drivers/infiniband/hw/qib/qib_driver.c | 6 - drivers/infiniband/hw/qib/qib_iba7220.c | 2 +- drivers/infiniband/hw/qib/qib_iba7322.c | 5 +- drivers/infiniband/hw/qib/qib_pcie.c | 2 +- drivers/infiniband/hw/qib/qib_sysfs.c | 10 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | 2 +- drivers/infiniband/sw/rdmavt/trace.h | 2 +- drivers/infiniband/sw/rdmavt/trace_rvt.h | 2 +- drivers/infiniband/sw/rxe/rxe_comp.c | 32 +- drivers/infiniband/sw/rxe/rxe_hw_counters.c | 2 +- drivers/infiniband/sw/rxe/rxe_hw_counters.h | 2 +- drivers/infiniband/sw/rxe/rxe_loc.h | 3 +- drivers/infiniband/sw/rxe/rxe_net.c | 57 +- drivers/infiniband/sw/rxe/rxe_pool.c | 4 +- drivers/infiniband/sw/rxe/rxe_qp.c | 46 +- drivers/infiniband/sw/rxe/rxe_req.c | 89 +- drivers/infiniband/sw/rxe/rxe_resp.c | 14 +- drivers/infiniband/sw/rxe/rxe_verbs.c | 11 +- drivers/infiniband/sw/rxe/rxe_verbs.h | 7 +- drivers/infiniband/ulp/ipoib/ipoib_main.c | 7 +- drivers/input/joystick/adafruit-seesaw.c | 21 +- drivers/input/joystick/as5011.c | 2 +- drivers/input/joystick/qwiic-joystick.c | 4 +- drivers/input/joystick/xpad.c | 2 + drivers/input/keyboard/adp5588-keys.c | 4 +- drivers/input/keyboard/cros_ec_keyb.c | 2 - drivers/input/keyboard/cypress-sf.c | 2 +- drivers/input/keyboard/dlink-dir685-touchkeys.c | 2 +- drivers/input/keyboard/lm8323.c | 2 +- drivers/input/keyboard/lm8333.c | 2 +- drivers/input/keyboard/lpc32xx-keys.c | 3 +- drivers/input/keyboard/matrix_keypad.c | 1 - drivers/input/keyboard/max7359_keypad.c | 2 +- drivers/input/keyboard/mpr121_touchkey.c | 2 +- drivers/input/keyboard/qt1050.c | 7 +- drivers/input/keyboard/qt1070.c | 4 +- drivers/input/keyboard/qt2160.c | 2 +- drivers/input/keyboard/stmpe-keypad.c | 1 - drivers/input/keyboard/tca6416-keypad.c | 6 - drivers/input/keyboard/tm2-touchkey.c | 4 +- drivers/input/misc/ad714x-i2c.c | 10 +- drivers/input/misc/adxl34x-i2c.c | 2 +- drivers/input/misc/apanel.c | 2 +- drivers/input/misc/atlas_btns.c | 1 - drivers/input/misc/atmel_captouch.c | 2 +- drivers/input/misc/bma150.c | 6 +- drivers/input/misc/cma3000_d0x_i2c.c | 4 +- drivers/input/misc/da7280.c | 1 - drivers/input/misc/drv260x.c | 2 +- drivers/input/misc/drv2665.c | 2 +- drivers/input/misc/drv2667.c | 2 +- drivers/input/misc/kxtj9.c | 4 +- drivers/input/misc/mma8450.c | 4 +- drivers/input/misc/pcf8574_keypad.c | 2 +- drivers/input/misc/pm8xxx-vibrator.c | 93 +- drivers/input/mouse/cyapa.c | 4 +- drivers/input/mouse/elan_i2c_core.c | 6 +- drivers/input/mouse/synaptics_i2c.c | 4 +- drivers/input/rmi4/rmi_i2c.c | 2 +- drivers/input/rmi4/rmi_smbus.c | 2 +- drivers/input/serio/ambakmi.c | 1 - drivers/input/serio/i8042-io.h | 5 +- drivers/input/touchscreen/ad7879-i2c.c | 4 +- drivers/input/touchscreen/ar1021_i2c.c | 4 +- drivers/input/touchscreen/atmel_mxt_ts.c | 10 +- drivers/input/touchscreen/auo-pixcir-ts.c | 2 +- drivers/input/touchscreen/bu21013_ts.c | 2 +- drivers/input/touchscreen/bu21029_ts.c | 2 +- drivers/input/touchscreen/chipone_icn8505.c | 1 - drivers/input/touchscreen/cy8ctma140.c | 2 +- drivers/input/touchscreen/cyttsp4_i2c.c | 2 +- drivers/input/touchscreen/cyttsp5.c | 2 +- drivers/input/touchscreen/cyttsp_i2c.c | 2 +- drivers/input/touchscreen/edt-ft5x06.c | 12 + drivers/input/touchscreen/eeti_ts.c | 2 +- drivers/input/touchscreen/egalax_ts.c | 2 +- drivers/input/touchscreen/ektf2127.c | 4 +- drivers/input/touchscreen/goodix.c | 2 +- drivers/input/touchscreen/goodix_berlin_i2c.c | 2 +- drivers/input/touchscreen/hideep.c | 2 +- drivers/input/touchscreen/himax_hx83112b.c | 2 +- drivers/input/touchscreen/ilitek_ts_i2c.c | 4 +- drivers/input/touchscreen/max11801_ts.c | 2 +- drivers/input/touchscreen/mcs5000_ts.c | 2 +- drivers/input/touchscreen/melfas_mip4.c | 4 +- drivers/input/touchscreen/migor_ts.c | 2 +- drivers/input/touchscreen/mms114.c | 2 +- drivers/input/touchscreen/raydium_i2c_ts.c | 4 +- drivers/input/touchscreen/rohm_bu21023.c | 2 +- drivers/input/touchscreen/s6sy761.c | 4 +- drivers/input/touchscreen/silead.c | 12 +- drivers/input/touchscreen/sis_i2c.c | 4 +- drivers/input/touchscreen/stmfts.c | 4 +- drivers/input/touchscreen/sur40.c | 2 +- drivers/input/touchscreen/tsc2004.c | 2 +- drivers/input/touchscreen/tsc2007_core.c | 2 +- drivers/input/touchscreen/wacom_i2c.c | 4 +- drivers/input/touchscreen/wdt87xx_i2c.c | 2 +- drivers/input/touchscreen/zet6223.c | 4 +- drivers/input/touchscreen/zforce_ts.c | 2 +- drivers/interconnect/qcom/qcm2290.c | 2 +- drivers/interconnect/qcom/sm6115.c | 33 +- drivers/interconnect/trace.h | 10 +- drivers/iommu/Kconfig | 25 +- drivers/iommu/amd/Kconfig | 3 + drivers/iommu/amd/Makefile | 2 +- drivers/iommu/amd/amd_iommu.h | 62 +- drivers/iommu/amd/amd_iommu_types.h | 33 + drivers/iommu/amd/init.c | 173 +- drivers/iommu/amd/io_pgtable.c | 13 +- drivers/iommu/amd/io_pgtable_v2.c | 18 +- drivers/iommu/amd/iommu.c | 311 +- drivers/iommu/amd/pasid.c | 198 + drivers/iommu/amd/ppr.c | 273 + drivers/iommu/arm/arm-smmu-v3/Makefile | 2 + drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 165 +- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c | 468 + drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 568 +- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 60 +- drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c | 481 + drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c | 47 + drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h | 4 + drivers/iommu/arm/arm-smmu/arm-smmu.c | 20 +- drivers/iommu/arm/arm-smmu/arm-smmu.h | 3 + drivers/iommu/dma-iommu.c | 86 +- drivers/iommu/dma-iommu.h | 14 +- drivers/iommu/exynos-iommu.c | 14 +- drivers/iommu/intel/Makefile | 2 +- drivers/iommu/intel/cache.c | 420 + drivers/iommu/intel/debugfs.c | 7 - drivers/iommu/intel/dmar.c | 26 +- drivers/iommu/intel/iommu.c | 374 +- drivers/iommu/intel/iommu.h | 88 +- drivers/iommu/intel/irq_remapping.c | 148 +- drivers/iommu/intel/nested.c | 69 +- drivers/iommu/intel/pasid.c | 18 +- drivers/iommu/intel/perf.h | 1 - drivers/iommu/intel/svm.c | 383 +- drivers/iommu/intel/trace.h | 103 +- drivers/iommu/io-pgtable-arm.c | 15 +- drivers/iommu/io-pgtable-dart.c | 37 +- drivers/iommu/iommu-pages.h | 186 + drivers/iommu/iommu-sva.c | 16 +- drivers/iommu/iommu.c | 29 +- drivers/iommu/iommufd/iova_bitmap.c | 5 +- drivers/iommu/iommufd/selftest.c | 2 +- drivers/iommu/irq_remapping.c | 10 +- drivers/iommu/irq_remapping.h | 2 +- drivers/iommu/rockchip-iommu.c | 14 +- drivers/iommu/s390-iommu.c | 6 - drivers/iommu/sprd-iommu.c | 2 +- drivers/iommu/sun50i-iommu.c | 7 +- drivers/iommu/tegra-smmu.c | 18 +- drivers/iommu/virtio-iommu.c | 11 - drivers/irqchip/Kconfig | 25 + drivers/irqchip/Makefile | 3 + drivers/irqchip/irq-bcm6345-l1.c | 6 +- drivers/irqchip/irq-brcmstb-l2.c | 17 +- drivers/irqchip/irq-gic-v3-its.c | 17 +- drivers/irqchip/irq-imx-irqsteer.c | 24 +- drivers/irqchip/irq-loongson-eiointc.c | 12 +- drivers/irqchip/irq-loongson-pch-pic.c | 76 +- drivers/irqchip/irq-mxs.c | 2 +- drivers/irqchip/irq-renesas-rzg2l.c | 28 +- drivers/irqchip/irq-riscv-aplic-direct.c | 323 + drivers/irqchip/irq-riscv-aplic-main.c | 211 + drivers/irqchip/irq-riscv-aplic-main.h | 52 + drivers/irqchip/irq-riscv-aplic-msi.c | 257 + drivers/irqchip/irq-riscv-imsic-early.c | 201 + drivers/irqchip/irq-riscv-imsic-platform.c | 375 + drivers/irqchip/irq-riscv-imsic-state.c | 865 ++ drivers/irqchip/irq-riscv-imsic-state.h | 108 + drivers/irqchip/irq-sifive-plic.c | 7 +- drivers/irqchip/irq-stm32-exti.c | 139 +- drivers/irqchip/irq-sunxi-nmi.c | 1 - drivers/irqchip/irq-tb10x.c | 1 - drivers/isdn/capi/Makefile | 3 +- drivers/isdn/capi/kcapi.c | 7 +- drivers/isdn/hardware/mISDN/hfcmulti.c | 7 +- drivers/leds/flash/leds-aat1290.c | 2 - drivers/leds/flash/leds-mt6360.c | 17 +- drivers/leds/flash/leds-qcom-flash.c | 10 +- drivers/leds/led-class.c | 1 - drivers/leds/led-triggers.c | 14 +- drivers/leds/leds-apu.c | 3 +- drivers/leds/leds-aw200xx.c | 32 +- drivers/leds/leds-aw2013.c | 25 +- drivers/leds/leds-lm3532.c | 29 +- drivers/leds/leds-lp3952.c | 21 +- drivers/leds/leds-lp50xx.c | 5 - drivers/leds/leds-nic78bx.c | 23 +- drivers/leds/leds-ss4200.c | 7 +- drivers/leds/leds-sun50i-a100.c | 14 +- drivers/leds/rgb/leds-mt6370-rgb.c | 1 - drivers/leds/rgb/leds-qcom-lpg.c | 16 +- drivers/leds/simple/simatic-ipc-leds-gpio-core.c | 1 + drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c | 52 +- drivers/leds/trigger/Kconfig | 7 - drivers/leds/trigger/Makefile | 1 - drivers/leds/trigger/ledtrig-audio.c | 67 - drivers/leds/trigger/ledtrig-netdev.c | 2 - drivers/leds/trigger/ledtrig-pattern.c | 126 +- drivers/leds/trigger/ledtrig-timer.c | 5 - drivers/macintosh/Kconfig | 2 +- drivers/macintosh/macio-adb.c | 24 +- drivers/macintosh/therm_windtunnel.c | 2 +- drivers/mailbox/Kconfig | 21 +- drivers/mailbox/Makefile | 2 + drivers/mailbox/arm_mhuv3.c | 1103 ++ drivers/mailbox/bcm-pdc-mailbox.c | 21 +- drivers/mailbox/imx-mailbox.c | 26 +- drivers/mailbox/mtk-cmdq-mailbox.c | 13 +- drivers/mailbox/omap-mailbox.c | 520 +- drivers/mailbox/zynqmp-ipi-mailbox.c | 411 +- drivers/mcb/mcb-lpc.c | 6 +- drivers/md/bcache/alloc.c | 21 +- drivers/md/bcache/bcache.h | 1 + drivers/md/bcache/btree.c | 7 +- drivers/md/bcache/request.c | 16 +- drivers/md/bcache/super.c | 16 +- drivers/md/dm-bio-prison-v2.c | 3 +- drivers/md/dm-cache-target.c | 17 +- drivers/md/dm-clone-metadata.c | 5 - drivers/md/dm-clone-target.c | 18 +- drivers/md/dm-core.h | 2 +- drivers/md/dm-crypt.c | 73 +- drivers/md/dm-delay.c | 46 +- drivers/md/dm-era-target.c | 3 +- drivers/md/dm-log-writes.c | 2 +- drivers/md/dm-mpath.c | 3 +- drivers/md/dm-raid.c | 5 +- drivers/md/dm-snap.c | 2 +- drivers/md/dm-table.c | 44 +- drivers/md/dm-target.c | 1 - drivers/md/dm-thin.c | 16 +- drivers/md/dm-vdo/Makefile | 2 +- drivers/md/dm-vdo/data-vio.c | 3 +- drivers/md/dm-vdo/dm-vdo-target.c | 6 +- drivers/md/dm-vdo/flush.c | 3 +- drivers/md/dm-vdo/indexer/io-factory.c | 2 +- drivers/md/dm-verity-target.c | 16 +- drivers/md/dm-zero.c | 1 - drivers/md/dm-zone.c | 510 +- drivers/md/dm-zoned-target.c | 1 - drivers/md/dm.c | 74 +- drivers/md/dm.h | 6 +- drivers/md/md-bitmap.c | 6 +- drivers/md/md-cluster.c | 22 +- drivers/md/md.c | 39 +- drivers/md/md.h | 3 +- drivers/md/raid0.c | 21 +- drivers/md/raid1.c | 15 +- drivers/md/raid5.c | 76 +- drivers/media/cec/core/cec-core.c | 4 +- drivers/media/cec/platform/cros-ec/cros-ec-cec.c | 9 +- drivers/media/cec/platform/sti/stih-cec.c | 1 + drivers/media/common/saa7146/saa7146_hlp.c | 8 +- drivers/media/common/videobuf2/videobuf2-core.c | 231 +- drivers/media/common/videobuf2/videobuf2-v4l2.c | 34 +- drivers/media/dvb-frontends/af9013.c | 2 +- drivers/media/dvb-frontends/cxd2880/Kconfig | 2 +- drivers/media/dvb-frontends/drx39xyj/drx_driver.h | 2 - drivers/media/dvb-frontends/drx39xyj/drxj.c | 58 +- drivers/media/dvb-frontends/lgdt3306a.c | 2 +- drivers/media/dvb-frontends/m88ds3103.c | 9 +- drivers/media/dvb-frontends/rtl2830.c | 2 +- drivers/media/dvb-frontends/rtl2832.c | 2 +- drivers/media/dvb-frontends/si2165.c | 6 +- drivers/media/dvb-frontends/si2168.c | 2 +- drivers/media/dvb-frontends/stb0899_drv.c | 2 +- drivers/media/i2c/Kconfig | 3 + drivers/media/i2c/adv7180.c | 2 +- drivers/media/i2c/adv748x/adv748x-hdmi.c | 16 +- drivers/media/i2c/adv7511-v4l2.c | 16 +- drivers/media/i2c/adv7604.c | 20 +- drivers/media/i2c/adv7842.c | 25 +- drivers/media/i2c/alvium-csi2.c | 6 +- drivers/media/i2c/dw9714.c | 6 +- drivers/media/i2c/hi556.c | 105 +- drivers/media/i2c/hi846.c | 2 +- drivers/media/i2c/imx214.c | 1 + drivers/media/i2c/imx219.c | 79 +- drivers/media/i2c/imx335.c | 637 +- drivers/media/i2c/imx412.c | 9 +- drivers/media/i2c/max9271.h | 5 + drivers/media/i2c/max9286.c | 2 +- drivers/media/i2c/ov2680.c | 90 +- drivers/media/i2c/ov2740.c | 9 +- drivers/media/i2c/ov4689.c | 673 +- drivers/media/i2c/rdacm20.c | 4 +- drivers/media/i2c/tc358743.c | 25 +- drivers/media/i2c/tda1997x.c | 14 +- drivers/media/i2c/ths7303.c | 10 +- drivers/media/i2c/ths8200.c | 14 +- drivers/media/i2c/tvp7002.c | 32 +- drivers/media/mmc/siano/smssdio.c | 25 +- drivers/media/pci/cobalt/cobalt-v4l2.c | 12 +- drivers/media/pci/intel/Kconfig | 4 +- drivers/media/pci/intel/Makefile | 1 + drivers/media/pci/intel/ipu3/ipu3-cio2.c | 19 +- drivers/media/pci/intel/ipu3/ipu3-cio2.h | 4 - drivers/media/pci/intel/ipu6/Kconfig | 18 + drivers/media/pci/intel/ipu6/Makefile | 23 + drivers/media/pci/intel/ipu6/ipu6-bus.c | 165 + drivers/media/pci/intel/ipu6/ipu6-bus.h | 58 + drivers/media/pci/intel/ipu6/ipu6-buttress.c | 917 ++ drivers/media/pci/intel/ipu6/ipu6-buttress.h | 92 + drivers/media/pci/intel/ipu6/ipu6-cpd.c | 362 + drivers/media/pci/intel/ipu6/ipu6-cpd.h | 105 + drivers/media/pci/intel/ipu6/ipu6-dma.c | 502 + drivers/media/pci/intel/ipu6/ipu6-dma.h | 19 + drivers/media/pci/intel/ipu6/ipu6-fw-com.c | 413 + drivers/media/pci/intel/ipu6/ipu6-fw-com.h | 47 + drivers/media/pci/intel/ipu6/ipu6-fw-isys.c | 487 + drivers/media/pci/intel/ipu6/ipu6-fw-isys.h | 596 + drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c | 663 ++ drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h | 82 + drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c | 536 + drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c | 242 + drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c | 720 ++ drivers/media/pci/intel/ipu6/ipu6-isys-queue.c | 810 ++ drivers/media/pci/intel/ipu6/ipu6-isys-queue.h | 78 + drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c | 403 + drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h | 59 + drivers/media/pci/intel/ipu6/ipu6-isys-video.c | 1420 +++ drivers/media/pci/intel/ipu6/ipu6-isys-video.h | 141 + drivers/media/pci/intel/ipu6/ipu6-isys.c | 1382 +++ drivers/media/pci/intel/ipu6/ipu6-isys.h | 206 + drivers/media/pci/intel/ipu6/ipu6-mmu.c | 846 ++ drivers/media/pci/intel/ipu6/ipu6-mmu.h | 73 + .../pci/intel/ipu6/ipu6-platform-buttress-regs.h | 226 + .../pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h | 172 + drivers/media/pci/intel/ipu6/ipu6-platform-regs.h | 179 + drivers/media/pci/intel/ipu6/ipu6.c | 853 ++ drivers/media/pci/intel/ipu6/ipu6.h | 342 + drivers/media/pci/intel/ivsc/Kconfig | 1 + drivers/media/pci/intel/ivsc/mei_csi.c | 37 +- drivers/media/pci/ivtv/ivtv-udma.c | 8 + drivers/media/pci/ivtv/ivtv-yuv.c | 6 + drivers/media/pci/ivtv/ivtvfb.c | 6 +- drivers/media/pci/mgb4/mgb4_core.c | 4 +- drivers/media/pci/mgb4/mgb4_regs.c | 2 +- drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c | 2 +- drivers/media/pci/saa7134/saa7134-alsa.c | 9 +- drivers/media/pci/saa7134/saa7134-cards.c | 2 +- drivers/media/pci/saa7134/saa7134-dvb.c | 8 +- drivers/media/pci/solo6x10/solo6x10-core.c | 16 +- drivers/media/pci/ttpci/budget-av.c | 573 +- drivers/media/pci/ttpci/budget-ci.c | 495 +- drivers/media/pci/ttpci/budget-core.c | 38 +- drivers/media/pci/ttpci/budget.c | 173 +- drivers/media/pci/ttpci/budget.h | 21 +- drivers/media/platform/Kconfig | 1 + drivers/media/platform/Makefile | 1 + drivers/media/platform/broadcom/Kconfig | 23 + drivers/media/platform/broadcom/Makefile | 3 + .../media/platform/broadcom/bcm2835-unicam-regs.h | 246 + drivers/media/platform/broadcom/bcm2835-unicam.c | 2739 +++++ .../platform/chips-media/wave5/wave5-helper.c | 17 +- .../platform/chips-media/wave5/wave5-vpu-dec.c | 13 +- .../platform/chips-media/wave5/wave5-vpu-enc.c | 13 +- .../media/platform/chips-media/wave5/wave5-vpu.c | 125 +- .../platform/chips-media/wave5/wave5-vpuapi.h | 4 + .../media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c | 10 +- .../media/platform/mediatek/mdp3/mtk-mdp3-core.c | 6 +- .../media/platform/mediatek/mdp3/mtk-mdp3-core.h | 2 +- .../media/platform/mediatek/mdp3/mtk-mdp3-m2m.c | 6 +- .../mediatek/vcodec/common/mtk_vcodec_util.c | 23 +- .../mediatek/vcodec/decoder/mtk_vcodec_dec.c | 2 +- .../mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h | 4 +- .../vcodec/decoder/vdec/vdec_av1_req_lat_if.c | 20 +- .../mediatek/vcodec/decoder/vdec/vdec_h264_if.c | 12 +- .../vcodec/decoder/vdec/vdec_h264_req_common.h | 15 +- .../vcodec/decoder/vdec/vdec_h264_req_if.c | 14 +- .../vcodec/decoder/vdec/vdec_h264_req_multi_if.c | 6 +- .../vcodec/decoder/vdec/vdec_hevc_req_multi_if.c | 4 +- .../mediatek/vcodec/decoder/vdec/vdec_vp8_if.c | 6 +- .../mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c | 2 +- .../mediatek/vcodec/decoder/vdec/vdec_vp9_if.c | 4 +- .../mediatek/vcodec/decoder/vdec_msg_queue.h | 4 +- .../platform/mediatek/vcodec/decoder/vdec_vpu_if.c | 6 + .../platform/mediatek/vcodec/decoder/vdec_vpu_if.h | 4 +- .../mediatek/vcodec/encoder/mtk_vcodec_enc.c | 2 +- .../platform/mediatek/vcodec/encoder/venc_drv_if.h | 2 +- drivers/media/platform/nvidia/tegra-vde/h264.c | 6 +- drivers/media/platform/nvidia/tegra-vde/trace.h | 2 +- drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c | 3 + drivers/media/platform/nxp/imx-mipi-csis.c | 34 +- drivers/media/platform/nxp/imx-pxp.c | 3 + drivers/media/platform/qcom/camss/Makefile | 2 +- .../platform/qcom/camss/camss-csiphy-3ph-1-0.c | 108 +- drivers/media/platform/qcom/camss/camss-csiphy.c | 1 + drivers/media/platform/qcom/camss/camss-vfe-170.c | 699 -- drivers/media/platform/qcom/camss/camss-vfe-17x.c | 699 ++ drivers/media/platform/qcom/camss/camss-vfe.c | 25 +- drivers/media/platform/qcom/camss/camss-video.c | 1 + drivers/media/platform/qcom/camss/camss.c | 307 + drivers/media/platform/qcom/camss/camss.h | 1 + drivers/media/platform/qcom/venus/vdec.c | 3 +- drivers/media/platform/renesas/rcar-csi2.c | 5 +- drivers/media/platform/renesas/rcar-vin/rcar-dma.c | 16 +- .../media/platform/renesas/rcar-vin/rcar-v4l2.c | 9 +- drivers/media/platform/renesas/vsp1/vsp1_histo.c | 20 +- drivers/media/platform/renesas/vsp1/vsp1_pipe.h | 2 +- drivers/media/platform/renesas/vsp1/vsp1_rpf.c | 8 +- drivers/media/platform/st/sti/c8sectpfe/Kconfig | 1 - drivers/media/platform/st/sti/c8sectpfe/Makefile | 7 +- .../platform/st/sti/c8sectpfe/c8sectpfe-core.c | 3 +- .../platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h | 5 + drivers/media/platform/st/sti/hva/hva-hw.c | 3 +- .../platform/st/stm32/stm32-dcmipp/dcmipp-core.c | 11 +- drivers/media/platform/ti/davinci/vpif_capture.c | 4 +- drivers/media/platform/ti/davinci/vpif_display.c | 2 +- .../media/platform/verisilicon/hantro_h1_regs.h | 4 +- drivers/media/platform/verisilicon/hantro_v4l2.c | 1 + drivers/media/rc/gpio-ir-recv.c | 1 - drivers/media/rc/imon.c | 17 +- drivers/media/rc/ir-spi.c | 41 +- drivers/media/rc/lirc_dev.c | 22 +- drivers/media/rc/mtk-cir.c | 1 + drivers/media/rc/serial_ir.c | 1 + drivers/media/rc/st_rc.c | 1 + drivers/media/rc/sunxi-cir.c | 1 + drivers/media/spi/cxd2880-spi.c | 2 +- drivers/media/spi/gs1662.c | 27 +- drivers/media/test-drivers/vicodec/vicodec-core.c | 1 + drivers/media/test-drivers/vimc/vimc-capture.c | 3 +- drivers/media/test-drivers/visl/visl-video.c | 1 + drivers/media/test-drivers/vivid/vivid-core.c | 9 +- drivers/media/test-drivers/vivid/vivid-meta-out.c | 4 - drivers/media/test-drivers/vivid/vivid-touch-cap.c | 4 - drivers/media/tuners/xc5000.c | 39 +- drivers/media/usb/as102/as102_usb_drv.c | 2 +- drivers/media/usb/au0828/au0828-video.c | 5 +- drivers/media/usb/b2c2/flexcop-usb.c | 15 +- drivers/media/usb/cx231xx/cx231xx-i2c.c | 5 +- drivers/media/usb/dvb-usb-v2/af9035.c | 10 +- drivers/media/usb/dvb-usb-v2/anysee.c | 4 +- drivers/media/usb/dvb-usb/dvb-usb-init.c | 35 +- drivers/media/usb/dvb-usb/dw2102.c | 360 +- drivers/media/usb/go7007/go7007-fw.c | 4 +- drivers/media/usb/gspca/cpia1.c | 6 +- drivers/media/usb/siano/smsusb.c | 20 +- drivers/media/usb/stk1160/stk1160-video.c | 10 +- drivers/media/usb/uvc/uvc_ctrl.c | 35 +- drivers/media/usb/uvc/uvc_driver.c | 35 +- drivers/media/usb/uvc/uvc_video.c | 21 +- drivers/media/usb/uvc/uvcvideo.h | 2 + drivers/media/v4l2-core/v4l2-async.c | 21 +- drivers/media/v4l2-core/v4l2-common.c | 2 + drivers/media/v4l2-core/v4l2-ctrls-api.c | 33 +- drivers/media/v4l2-core/v4l2-ctrls-core.c | 6 + drivers/media/v4l2-core/v4l2-dev.c | 3 + drivers/media/v4l2-core/v4l2-device.c | 12 +- drivers/media/v4l2-core/v4l2-i2c.c | 2 +- drivers/media/v4l2-core/v4l2-ioctl.c | 73 +- drivers/media/v4l2-core/v4l2-mem2mem.c | 15 + drivers/media/v4l2-core/v4l2-spi.c | 2 +- drivers/media/v4l2-core/v4l2-subdev.c | 89 +- drivers/memory/Kconfig | 2 +- drivers/memory/brcmstb_memc.c | 1 + drivers/memory/mtk-smi.c | 2 + drivers/memory/pl353-smc.c | 1 - drivers/memstick/host/rtsx_pci_ms.c | 9 +- drivers/memstick/host/rtsx_usb_ms.c | 6 +- drivers/message/fusion/mptfc.c | 1 + drivers/message/fusion/mptsas.c | 15 +- drivers/message/fusion/mptscsih.c | 2 - drivers/message/fusion/mptspi.c | 1 + drivers/mfd/Kconfig | 16 +- drivers/mfd/Makefile | 6 +- drivers/mfd/axp20x-i2c.c | 2 + drivers/mfd/axp20x-rsb.c | 1 + drivers/mfd/axp20x.c | 91 + drivers/mfd/cs42l43.c | 36 +- drivers/mfd/intel-lpss-pci.c | 2 +- drivers/mfd/intel-m10-bmc-pmci.c | 1 + drivers/mfd/intel-m10-bmc-spi.c | 1 + drivers/mfd/kempld-core.c | 227 +- drivers/mfd/ocelot-spi.c | 5 +- drivers/mfd/omap-usb-tll.c | 3 +- drivers/mfd/rk8xx-core.c | 104 + drivers/mfd/rk8xx-i2c.c | 45 +- drivers/mfd/rohm-bd71828.c | 36 +- drivers/mfd/rsmu_core.c | 2 + drivers/mfd/rsmu_i2c.c | 107 +- drivers/mfd/rsmu_spi.c | 8 +- drivers/mfd/ssbi.c | 1 - drivers/mfd/timberdale.c | 1 - drivers/mfd/tps6594-core.c | 253 +- drivers/mfd/tps6594-i2c.c | 20 +- drivers/mfd/tps6594-spi.c | 20 +- drivers/misc/Kconfig | 31 +- drivers/misc/Makefile | 1 + drivers/misc/cardreader/rtsx_pcr.c | 12 +- drivers/misc/ds1682.c | 37 + drivers/misc/eeprom/at25.c | 1 - drivers/misc/eeprom/ee1004.c | 6 +- drivers/misc/eeprom/eeprom_93xx46.c | 2 - drivers/misc/lkdtm/Makefile | 4 - drivers/misc/mei/bus.c | 2 +- drivers/misc/mei/hw.h | 2 - drivers/misc/mei/mei-trace.h | 6 +- drivers/misc/mei/vsc-fw-loader.c | 2 +- drivers/misc/mei/vsc-tp.c | 2 +- drivers/misc/nsm.c | 1 - drivers/misc/ntsync.c | 249 + drivers/misc/pvpanic/pvpanic.c | 43 +- drivers/misc/ti-st/st_kim.c | 4 +- drivers/misc/tifm_core.c | 2 +- drivers/misc/tps6594-pfsm.c | 48 +- drivers/misc/vmw_vmci/vmci_guest.c | 3 +- drivers/mmc/core/block.c | 8 +- drivers/mmc/core/debugfs.c | 7 +- drivers/mmc/core/host.c | 1 - drivers/mmc/core/sd_ops.c | 83 +- drivers/mmc/core/sdio_bus.c | 9 +- drivers/mmc/core/slot-gpio.c | 5 +- drivers/mmc/host/Kconfig | 1 + drivers/mmc/host/atmel-mci.c | 309 +- drivers/mmc/host/cqhci-core.c | 11 +- drivers/mmc/host/cqhci.h | 4 + drivers/mmc/host/dw_mmc-hi3798cv200.c | 1 - drivers/mmc/host/dw_mmc-hi3798mv200.c | 1 - drivers/mmc/host/mtk-sd.c | 1 - drivers/mmc/host/renesas_sdhi_core.c | 3 + drivers/mmc/host/renesas_sdhi_internal_dmac.c | 9 +- drivers/mmc/host/sdhci-esdhc-mcf.c | 2 +- drivers/mmc/host/sdhci-of-dwcmshc.c | 305 +- drivers/mmc/host/sdhci-omap.c | 2 +- drivers/mmc/host/sdhci-pci-core.c | 2 +- drivers/mmc/host/sdhci-pci-gli.c | 46 +- drivers/mmc/host/sdhci-s3c.c | 35 +- drivers/mmc/host/sdhci-sprd.c | 3 +- drivers/mmc/host/sdhci_am654.c | 14 +- drivers/mtd/devices/block2mtd.c | 6 +- drivers/mtd/devices/mchp23k256.c | 1 - drivers/mtd/maps/sa1100-flash.c | 6 +- drivers/mtd/mtdcore.c | 3 + drivers/mtd/nand/raw/Kconfig | 3 +- drivers/mtd/nand/raw/davinci_nand.c | 5 +- drivers/mtd/spi-nor/core.c | 4 +- drivers/mtd/spi-nor/winbond.c | 2 + drivers/mtd/tests/Makefile | 34 +- drivers/mtd/tests/mtd_test.c | 9 + drivers/mtd/ubi/eba.c | 3 +- drivers/mux/core.c | 4 +- drivers/net/Kconfig | 16 +- drivers/net/Makefile | 1 + drivers/net/arcnet/Kconfig | 2 +- drivers/net/arcnet/arcdevice.h | 3 +- drivers/net/arcnet/arcnet.c | 11 +- drivers/net/bareudp.c | 19 +- drivers/net/bonding/bond_main.c | 19 +- drivers/net/bonding/bond_netlink.c | 3 +- drivers/net/bonding/bond_options.c | 2 +- drivers/net/bonding/bond_procfs.c | 2 +- drivers/net/bonding/bond_sysfs.c | 25 +- drivers/net/bonding/bond_sysfs_slave.c | 2 +- drivers/net/caif/caif_virtio.c | 1 - drivers/net/can/cc770/Kconfig | 1 + drivers/net/can/dev/dev.c | 2 +- drivers/net/can/sja1000/Kconfig | 1 + drivers/net/can/vcan.c | 2 +- drivers/net/can/vxcan.c | 2 +- drivers/net/dsa/b53/b53_common.c | 211 +- drivers/net/dsa/b53/b53_priv.h | 12 - drivers/net/dsa/bcm_sf2.c | 49 +- drivers/net/dsa/hirschmann/hellcreek_ptp.c | 25 +- drivers/net/dsa/lan9303-core.c | 38 +- drivers/net/dsa/lantiq_gswip.c | 39 +- drivers/net/dsa/microchip/Kconfig | 2 + drivers/net/dsa/microchip/Makefile | 2 +- drivers/net/dsa/microchip/ksz8.h | 9 +- drivers/net/dsa/microchip/ksz8795.c | 251 +- drivers/net/dsa/microchip/ksz8795_reg.h | 10 +- drivers/net/dsa/microchip/ksz9477.c | 57 +- drivers/net/dsa/microchip/ksz9477.h | 2 + drivers/net/dsa/microchip/ksz9477_reg.h | 10 +- drivers/net/dsa/microchip/ksz9477_tc_flower.c | 3 + drivers/net/dsa/microchip/ksz_common.c | 242 +- drivers/net/dsa/microchip/ksz_common.h | 17 +- drivers/net/dsa/microchip/ksz_dcb.c | 819 ++ drivers/net/dsa/microchip/ksz_dcb.h | 23 + drivers/net/dsa/microchip/ksz_spi.c | 8 - drivers/net/dsa/mt7530-mdio.c | 28 +- drivers/net/dsa/mt7530.c | 467 +- drivers/net/dsa/mt7530.h | 293 +- drivers/net/dsa/mv88e6xxx/chip.c | 66 +- drivers/net/dsa/mv88e6xxx/trace.h | 4 +- drivers/net/dsa/ocelot/felix_vsc9959.c | 3 + drivers/net/dsa/qca/ar9331.c | 37 +- drivers/net/dsa/qca/qca8k-8xxx.c | 49 +- drivers/net/dsa/realtek/realtek.h | 2 + drivers/net/dsa/realtek/rtl8365mb.c | 32 +- drivers/net/dsa/realtek/rtl8366rb.c | 333 +- drivers/net/dsa/realtek/rtl83xx.c | 1 + drivers/net/dsa/rzn1_a5psw.c | 47 +- drivers/net/dsa/sja1105/sja1105_flower.c | 3 + drivers/net/dsa/sja1105/sja1105_main.c | 39 +- drivers/net/dsa/vitesse-vsc73xx-core.c | 255 +- drivers/net/dsa/vitesse-vsc73xx.h | 27 +- drivers/net/dsa/xrs700x/xrs700x.c | 26 +- drivers/net/ethernet/3com/3c515.c | 3 - drivers/net/ethernet/3com/3c589_cs.c | 2 +- drivers/net/ethernet/3com/Kconfig | 4 +- drivers/net/ethernet/8390/Kconfig | 6 +- drivers/net/ethernet/8390/etherh.c | 2 +- drivers/net/ethernet/8390/pcnet_cs.c | 2 +- drivers/net/ethernet/adi/adin1110.c | 2 +- drivers/net/ethernet/agere/et131x.c | 2 +- drivers/net/ethernet/alteon/acenic.c | 2 +- drivers/net/ethernet/altera/altera_tse_main.c | 2 +- drivers/net/ethernet/amazon/ena/ena_com.h | 6 +- drivers/net/ethernet/amazon/ena/ena_eth_com.h | 2 +- drivers/net/ethernet/amazon/ena/ena_ethtool.c | 17 +- drivers/net/ethernet/amazon/ena/ena_netdev.c | 37 +- drivers/net/ethernet/amazon/ena/ena_netdev.h | 1 + drivers/net/ethernet/amd/Kconfig | 4 +- drivers/net/ethernet/amd/amd8111e.c | 7 +- drivers/net/ethernet/amd/amd8111e.h | 1 - drivers/net/ethernet/amd/nmclan_cs.c | 2 +- drivers/net/ethernet/amd/pds_core/core.h | 3 +- drivers/net/ethernet/amd/pds_core/devlink.c | 3 +- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 2 +- drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 2 +- drivers/net/ethernet/amd/xgbe/xgbe-platform.c | 8 - drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 2 +- drivers/net/ethernet/aquantia/atlantic/Makefile | 2 +- drivers/net/ethernet/aquantia/atlantic/aq_cfg.h | 2 +- drivers/net/ethernet/aquantia/atlantic/aq_hw.h | 2 +- drivers/net/ethernet/aquantia/atlantic/aq_main.c | 2 +- drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 2 +- .../net/ethernet/aquantia/atlantic/aq_pci_func.c | 9 +- .../ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c | 2 +- .../ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | 2 +- .../ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c | 2 +- drivers/net/ethernet/atheros/ag71xx.c | 2 +- drivers/net/ethernet/atheros/alx/main.c | 4 +- drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 2 +- drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 2 +- drivers/net/ethernet/atheros/atlx/atl1.c | 2 +- drivers/net/ethernet/atheros/atlx/atl2.c | 2 +- drivers/net/ethernet/broadcom/b44.c | 4 +- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 2 +- drivers/net/ethernet/broadcom/bnx2.c | 2 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 725 +- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 47 +- drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | 13 +- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 247 +- drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 491 +- drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c | 30 +- drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h | 5 + drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 4 + drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 169 +- drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h | 17 +- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 30 +- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h | 2 +- drivers/net/ethernet/broadcom/cnic.c | 3 +- drivers/net/ethernet/broadcom/tg3.c | 32 +- drivers/net/ethernet/brocade/bna/bna_types.h | 2 +- drivers/net/ethernet/brocade/bna/bnad.c | 13 +- drivers/net/ethernet/cadence/macb_main.c | 2 +- drivers/net/ethernet/calxeda/xgmac.c | 2 +- drivers/net/ethernet/cavium/liquidio/lio_core.c | 2 +- drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c | 2 +- drivers/net/ethernet/cavium/octeon/octeon_mgmt.c | 2 +- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 2 +- drivers/net/ethernet/chelsio/cxgb/cxgb2.c | 2 +- drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 67 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | 2 +- .../net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 2 +- .../chelsio/inline_crypto/ch_ktls/chcr_ktls.c | 1 + drivers/net/ethernet/chelsio/libcxgb/Makefile | 2 +- drivers/net/ethernet/cisco/enic/enic_main.c | 2 +- drivers/net/ethernet/cisco/enic/vnic_dev.c | 20 +- drivers/net/ethernet/cisco/enic/vnic_dev.h | 5 + drivers/net/ethernet/cortina/gemini.c | 25 +- drivers/net/ethernet/dlink/sundance.c | 2 +- drivers/net/ethernet/emulex/benet/be_main.c | 5 +- drivers/net/ethernet/engleder/tsnep_main.c | 2 +- drivers/net/ethernet/faraday/ftmac100.c | 2 +- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 2 +- .../net/ethernet/freescale/dpaa/dpaa_eth_trace.h | 2 +- .../net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h | 4 +- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 2 +- .../ethernet/freescale/dpaa2/dpaa2-switch-flower.c | 6 + .../net/ethernet/freescale/dpaa2/dpaa2-switch.c | 2 +- drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c | 2 +- drivers/net/ethernet/freescale/fec_main.c | 6 + drivers/net/ethernet/freescale/fman/fman_memac.c | 1 - drivers/net/ethernet/freescale/fman/fman_muram.c | 1 - drivers/net/ethernet/freescale/gianfar.c | 2 +- drivers/net/ethernet/fujitsu/Kconfig | 2 +- drivers/net/ethernet/fungible/funeth/Makefile | 2 +- drivers/net/ethernet/fungible/funeth/funeth_main.c | 2 +- .../net/ethernet/fungible/funeth/funeth_trace.h | 6 +- drivers/net/ethernet/google/gve/gve.h | 97 +- drivers/net/ethernet/google/gve/gve_adminq.c | 229 +- drivers/net/ethernet/google/gve/gve_adminq.h | 50 +- drivers/net/ethernet/google/gve/gve_dqo.h | 6 + drivers/net/ethernet/google/gve/gve_ethtool.c | 121 +- drivers/net/ethernet/google/gve/gve_main.c | 619 +- drivers/net/ethernet/google/gve/gve_rx.c | 138 +- drivers/net/ethernet/google/gve/gve_rx_dqo.c | 140 +- drivers/net/ethernet/google/gve/gve_tx.c | 36 +- drivers/net/ethernet/google/gve/gve_tx_dqo.c | 44 +- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 2 +- drivers/net/ethernet/hisilicon/hns3/Makefile | 13 +- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 13 + .../hisilicon/hns3/hns3_common/hclge_comm_cmd.c | 30 + .../hisilicon/hns3/hns3_common/hclge_comm_cmd.h | 24 +- .../hisilicon/hns3/hns3_common/hclge_comm_rss.c | 14 + .../hns3/hns3_common/hclge_comm_tqp_stats.c | 5 + drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 6 +- drivers/net/ethernet/hisilicon/hns3/hns3_trace.h | 4 +- .../ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c | 646 +- .../ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h | 643 +- .../ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c | 44 +- .../ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h | 2 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c | 433 +- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h | 36 + .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 81 +- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 1 + .../ethernet/hisilicon/hns3/hns3pf/hclge_trace.h | 102 +- .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 40 + .../ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h | 58 +- drivers/net/ethernet/huawei/hinic/hinic_main.c | 2 +- drivers/net/ethernet/ibm/emac/core.c | 4 +- drivers/net/ethernet/ibm/emac/mal.c | 14 +- drivers/net/ethernet/ibm/emac/mal.h | 2 +- drivers/net/ethernet/ibm/ibmveth.c | 2 +- drivers/net/ethernet/ibm/ibmvnic.c | 10 +- drivers/net/ethernet/intel/Kconfig | 9 +- drivers/net/ethernet/intel/Makefile | 3 + drivers/net/ethernet/intel/e100.c | 8 +- drivers/net/ethernet/intel/e1000/e1000_main.c | 16 +- drivers/net/ethernet/intel/e1000e/defines.h | 2 - drivers/net/ethernet/intel/e1000e/ethtool.c | 62 +- drivers/net/ethernet/intel/e1000e/ich8lan.c | 55 + drivers/net/ethernet/intel/e1000e/netdev.c | 42 +- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 10 +- drivers/net/ethernet/intel/i40e/i40e.h | 29 +- drivers/net/ethernet/intel/i40e/i40e_client.c | 28 +- drivers/net/ethernet/intel/i40e/i40e_common.c | 253 - drivers/net/ethernet/intel/i40e/i40e_ddp.c | 3 +- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 36 +- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 29 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 475 +- drivers/net/ethernet/intel/i40e/i40e_nvm.c | 1050 +- drivers/net/ethernet/intel/i40e/i40e_prototype.h | 7 - drivers/net/ethernet/intel/i40e/i40e_ptp.c | 6 +- drivers/net/ethernet/intel/i40e/i40e_trace.h | 10 +- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 92 +- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 2 +- drivers/net/ethernet/intel/i40e/i40e_type.h | 88 - drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 14 +- drivers/net/ethernet/intel/i40e/i40e_xsk.c | 5 +- drivers/net/ethernet/intel/iavf/iavf.h | 2 +- drivers/net/ethernet/intel/iavf/iavf_common.c | 253 - drivers/net/ethernet/intel/iavf/iavf_ethtool.c | 140 - drivers/net/ethernet/intel/iavf/iavf_main.c | 54 +- drivers/net/ethernet/intel/iavf/iavf_prototype.h | 7 - drivers/net/ethernet/intel/iavf/iavf_trace.h | 6 +- drivers/net/ethernet/intel/iavf/iavf_txrx.c | 553 +- drivers/net/ethernet/intel/iavf/iavf_txrx.h | 146 +- drivers/net/ethernet/intel/iavf/iavf_type.h | 90 - drivers/net/ethernet/intel/iavf/iavf_virtchnl.c | 17 +- drivers/net/ethernet/intel/ice/Makefile | 7 +- drivers/net/ethernet/intel/ice/devlink/devlink.c | 1778 +++ drivers/net/ethernet/intel/ice/devlink/devlink.h | 25 + .../net/ethernet/intel/ice/devlink/devlink_port.c | 430 + .../net/ethernet/intel/ice/devlink/devlink_port.h | 12 + drivers/net/ethernet/intel/ice/ice.h | 26 +- drivers/net/ethernet/intel/ice/ice_adapter.c | 116 + drivers/net/ethernet/intel/ice/ice_adapter.h | 28 + drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 34 + drivers/net/ethernet/intel/ice/ice_base.c | 47 +- drivers/net/ethernet/intel/ice/ice_common.c | 21 +- drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 6 +- drivers/net/ethernet/intel/ice/ice_ddp.c | 218 + drivers/net/ethernet/intel/ice/ice_ddp.h | 2 + drivers/net/ethernet/intel/ice/ice_devids.h | 22 +- drivers/net/ethernet/intel/ice/ice_devlink.c | 2022 ---- drivers/net/ethernet/intel/ice/ice_devlink.h | 25 - drivers/net/ethernet/intel/ice/ice_eswitch.c | 369 +- drivers/net/ethernet/intel/ice/ice_eswitch.h | 13 +- drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c | 142 +- drivers/net/ethernet/intel/ice/ice_fdir.c | 111 +- drivers/net/ethernet/intel/ice/ice_fdir.h | 8 + drivers/net/ethernet/intel/ice/ice_flex_type.h | 4 +- drivers/net/ethernet/intel/ice/ice_fw_update.c | 7 +- drivers/net/ethernet/intel/ice/ice_fw_update.h | 3 + drivers/net/ethernet/intel/ice/ice_lag.c | 53 +- drivers/net/ethernet/intel/ice/ice_lag.h | 3 +- drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h | 320 - drivers/net/ethernet/intel/ice/ice_lib.c | 83 +- drivers/net/ethernet/intel/ice/ice_lib.h | 39 +- drivers/net/ethernet/intel/ice/ice_main.c | 237 +- drivers/net/ethernet/intel/ice/ice_nvm.c | 7 +- drivers/net/ethernet/intel/ice/ice_nvm.h | 3 + drivers/net/ethernet/intel/ice/ice_protocol_type.h | 12 + drivers/net/ethernet/intel/ice/ice_ptp.c | 33 +- drivers/net/ethernet/intel/ice/ice_ptp_hw.c | 3 + drivers/net/ethernet/intel/ice/ice_repr.c | 141 +- drivers/net/ethernet/intel/ice/ice_repr.h | 24 +- drivers/net/ethernet/intel/ice/ice_sched.c | 37 +- drivers/net/ethernet/intel/ice/ice_sched.h | 11 + drivers/net/ethernet/intel/ice/ice_sriov.c | 42 +- drivers/net/ethernet/intel/ice/ice_sriov.h | 7 +- drivers/net/ethernet/intel/ice/ice_switch.c | 284 +- drivers/net/ethernet/intel/ice/ice_switch.h | 8 +- drivers/net/ethernet/intel/ice/ice_tc_lib.c | 128 +- drivers/net/ethernet/intel/ice/ice_tc_lib.h | 8 +- drivers/net/ethernet/intel/ice/ice_trace.h | 12 +- drivers/net/ethernet/intel/ice/ice_txrx.c | 3 +- drivers/net/ethernet/intel/ice/ice_txrx.h | 1 + drivers/net/ethernet/intel/ice/ice_txrx_lib.c | 122 +- drivers/net/ethernet/intel/ice/ice_type.h | 5 +- drivers/net/ethernet/intel/ice/ice_vf_lib.c | 13 +- drivers/net/ethernet/intel/ice/ice_virtchnl.c | 14 +- drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c | 16 + drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h | 1 + drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c | 1 - drivers/net/ethernet/intel/ice/ice_xsk.c | 5 +- drivers/net/ethernet/intel/idpf/idpf_lib.c | 2 +- drivers/net/ethernet/intel/idpf/idpf_txrx.c | 5 +- drivers/net/ethernet/intel/idpf/idpf_txrx.h | 2 + drivers/net/ethernet/intel/idpf/virtchnl2.h | 24 +- drivers/net/ethernet/intel/igb/igb_ethtool.c | 15 - drivers/net/ethernet/intel/igb/igb_main.c | 64 +- drivers/net/ethernet/intel/igbvf/netdev.c | 8 +- drivers/net/ethernet/intel/igc/igc.h | 71 +- drivers/net/ethernet/intel/igc/igc_ethtool.c | 17 - drivers/net/ethernet/intel/igc/igc_main.c | 176 +- drivers/net/ethernet/intel/igc/igc_ptp.c | 51 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 21 +- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 1 - drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 5 +- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 10 +- drivers/net/ethernet/intel/libeth/Kconfig | 9 + drivers/net/ethernet/intel/libeth/Makefile | 6 + drivers/net/ethernet/intel/libeth/rx.c | 150 + drivers/net/ethernet/intel/libie/Kconfig | 10 + drivers/net/ethernet/intel/libie/Makefile | 6 + drivers/net/ethernet/intel/libie/rx.c | 124 + drivers/net/ethernet/jme.c | 2 +- drivers/net/ethernet/lantiq_etop.c | 2 +- drivers/net/ethernet/lantiq_xrx200.c | 4 +- drivers/net/ethernet/marvell/mv643xx_eth.c | 2 +- drivers/net/ethernet/marvell/mvneta.c | 5 +- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 11 +- .../net/ethernet/marvell/octeon_ep/octep_main.c | 2 +- .../ethernet/marvell/octeon_ep/octep_pfvf_mbox.c | 1 + .../ethernet/marvell/octeon_ep_vf/octep_vf_main.c | 2 +- .../ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c | 1 + drivers/net/ethernet/marvell/octeontx2/af/cgx.c | 27 + drivers/net/ethernet/marvell/octeontx2/af/cgx.h | 1 + .../ethernet/marvell/octeontx2/af/lmac_common.h | 1 + drivers/net/ethernet/marvell/octeontx2/af/mbox.h | 7 +- drivers/net/ethernet/marvell/octeontx2/af/rpm.c | 17 + drivers/net/ethernet/marvell/octeontx2/af/rpm.h | 3 + .../net/ethernet/marvell/octeontx2/af/rvu_cgx.c | 29 + .../ethernet/marvell/octeontx2/af/rvu_devlink.c | 12 +- .../net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c | 2 + .../net/ethernet/marvell/octeontx2/af/rvu_trace.h | 12 +- .../ethernet/marvell/octeontx2/nic/otx2_common.h | 3 + .../ethernet/marvell/octeontx2/nic/otx2_devlink.c | 3 +- .../ethernet/marvell/octeontx2/nic/otx2_flows.c | 1 + .../net/ethernet/marvell/octeontx2/nic/otx2_pf.c | 42 +- .../net/ethernet/marvell/octeontx2/nic/otx2_tc.c | 21 +- .../net/ethernet/marvell/octeontx2/nic/otx2_txrx.c | 3 + .../net/ethernet/marvell/octeontx2/nic/otx2_txrx.h | 3 + .../net/ethernet/marvell/octeontx2/nic/otx2_vf.c | 2 +- drivers/net/ethernet/marvell/octeontx2/nic/qos.c | 80 +- .../ethernet/marvell/prestera/prestera_flower.c | 4 + .../net/ethernet/marvell/prestera/prestera_hw.c | 83 +- .../net/ethernet/marvell/prestera/prestera_main.c | 6 +- .../net/ethernet/marvell/prestera/prestera_rxtx.c | 15 +- drivers/net/ethernet/marvell/pxa168_eth.c | 2 +- drivers/net/ethernet/marvell/skge.c | 4 +- drivers/net/ethernet/marvell/sky2.c | 5 +- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 18 +- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +- drivers/net/ethernet/mediatek/mtk_ppe.c | 2 +- drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 4 + drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 4 +- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 1 + drivers/net/ethernet/mellanox/mlx4/main.c | 6 +- drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 22 +- .../mellanox/mlx5/core/diag/cmd_tracepoint.h | 4 +- .../mellanox/mlx5/core/diag/en_rep_tracepoint.h | 2 +- .../mellanox/mlx5/core/diag/en_tc_tracepoint.h | 2 +- .../mellanox/mlx5/core/diag/fw_tracer_tracepoint.h | 5 +- drivers/net/ethernet/mellanox/mlx5/core/en.h | 45 +- .../net/ethernet/mellanox/mlx5/core/en/channels.c | 83 + .../net/ethernet/mellanox/mlx5/core/en/channels.h | 4 + drivers/net/ethernet/mellanox/mlx5/core/en/dim.h | 45 + .../net/ethernet/mellanox/mlx5/core/en/params.c | 72 +- .../net/ethernet/mellanox/mlx5/core/en/params.h | 5 - drivers/net/ethernet/mellanox/mlx5/core/en/port.c | 50 +- drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c | 1 + .../net/ethernet/mellanox/mlx5/core/en/tc_tun.h | 2 +- .../ethernet/mellanox/mlx5/core/en/tc_tun_encap.c | 6 +- .../ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c | 12 +- .../ethernet/mellanox/mlx5/core/en/tc_tun_gre.c | 8 +- .../ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c | 9 +- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 2 +- .../net/ethernet/mellanox/mlx5/core/en/xsk/rx.c | 4 +- .../ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c | 2 +- .../ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h | 4 +- .../mellanox/mlx5/core/en_accel/ipsec_stats.c | 28 +- .../ethernet/mellanox/mlx5/core/en_accel/ktls.h | 14 +- .../mellanox/mlx5/core/en_accel/ktls_stats.c | 26 +- .../mellanox/mlx5/core/en_accel/macsec_stats.c | 22 +- drivers/net/ethernet/mellanox/mlx5/core/en_dim.c | 95 +- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 343 +- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 6 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 310 +- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 82 +- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 +- .../net/ethernet/mellanox/mlx5/core/en_selftest.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 543 +- drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 16 +- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 29 +- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 6 +- drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 4 +- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 13 +- .../ethernet/mellanox/mlx5/core/esw/devlink_port.c | 4 + .../mellanox/mlx5/core/esw/diag/qos_tracepoint.h | 8 +- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 3 +- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 7 + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 112 +- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 3 +- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c | 3 +- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 2 +- .../ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 3 - .../net/ethernet/mellanox/mlx5/core/lag/port_sel.c | 8 +- .../net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c | 254 +- .../net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h | 2 +- .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 12 +- .../net/ethernet/mellanox/mlx5/core/pagealloc.c | 3 + drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c | 56 +- .../mlx5/core/sf/dev/diag/dev_tracepoint.h | 2 +- .../mellanox/mlx5/core/sf/diag/sf_tracepoint.h | 14 +- .../mellanox/mlx5/core/sf/diag/vhca_tracepoint.h | 2 +- .../mellanox/mlx5/core/steering/dr_ste_v0.c | 2 +- .../mellanox/mlx5/core/steering/dr_ste_v1.c | 4 +- .../ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/pci.c | 530 +- drivers/net/ethernet/mellanox/mlxsw/pci_hw.h | 4 +- drivers/net/ethernet/mellanox/mlxsw/reg.h | 3 + drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 +- .../ethernet/mellanox/mlxsw/spectrum_acl_atcam.c | 18 +- .../mellanox/mlxsw/spectrum_acl_bloom_filter.c | 2 +- .../net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c | 13 - .../ethernet/mellanox/mlxsw/spectrum_acl_tcam.c | 3 +- .../ethernet/mellanox/mlxsw/spectrum_acl_tcam.h | 9 +- .../net/ethernet/mellanox/mlxsw/spectrum_ethtool.c | 60 + .../net/ethernet/mellanox/mlxsw/spectrum_flower.c | 4 + .../net/ethernet/mellanox/mlxsw/spectrum_ipip.c | 56 +- .../net/ethernet/mellanox/mlxsw/spectrum_ipip.h | 2 +- .../net/ethernet/mellanox/mlxsw/spectrum_span.c | 10 +- drivers/net/ethernet/micrel/ksz884x.c | 2 +- drivers/net/ethernet/microchip/encx24j600-regmap.c | 4 +- drivers/net/ethernet/microchip/encx24j600.c | 7 +- drivers/net/ethernet/microchip/encx24j600_hw.h | 2 +- drivers/net/ethernet/microchip/lan743x_ethtool.c | 21 - drivers/net/ethernet/microchip/lan743x_main.c | 13 +- drivers/net/ethernet/microchip/lan743x_ptp.c | 4 +- drivers/net/ethernet/microchip/lan743x_ptp.h | 1 + .../net/ethernet/microchip/lan966x/lan966x_ifh.h | 2 +- .../net/ethernet/microchip/lan966x/lan966x_main.c | 6 +- .../net/ethernet/microchip/lan966x/lan966x_main.h | 2 +- .../net/ethernet/microchip/lan966x/lan966x_port.c | 2 +- .../ethernet/microchip/lan966x/lan966x_tc_flower.c | 14 +- .../net/ethernet/microchip/lan966x/lan966x_vlan.c | 2 +- drivers/net/ethernet/microchip/sparx5/Makefile | 3 +- .../net/ethernet/microchip/sparx5/sparx5_fdma.c | 2 +- .../net/ethernet/microchip/sparx5/sparx5_main.c | 3 + .../net/ethernet/microchip/sparx5/sparx5_main.h | 25 + .../ethernet/microchip/sparx5/sparx5_main_regs.h | 68 + .../net/ethernet/microchip/sparx5/sparx5_mirror.c | 235 + .../net/ethernet/microchip/sparx5/sparx5_packet.c | 2 +- .../net/ethernet/microchip/sparx5/sparx5_port.c | 2 +- .../ethernet/microchip/sparx5/sparx5_switchdev.c | 2 +- .../ethernet/microchip/sparx5/sparx5_tc_flower.c | 88 +- .../ethernet/microchip/sparx5/sparx5_tc_matchall.c | 125 +- drivers/net/ethernet/microchip/vcap/vcap_ag_api.h | 2 +- drivers/net/ethernet/microchip/vcap/vcap_api.c | 16 +- .../net/ethernet/microchip/vcap/vcap_api_client.h | 4 +- .../net/ethernet/microchip/vcap/vcap_api_private.h | 2 +- drivers/net/ethernet/microsoft/Kconfig | 3 +- drivers/net/ethernet/microsoft/mana/hw_channel.c | 1 + drivers/net/ethernet/microsoft/mana/mana_en.c | 37 +- drivers/net/ethernet/mscc/ocelot_flower.c | 7 +- drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 8 +- drivers/net/ethernet/natsemi/natsemi.c | 2 +- drivers/net/ethernet/neterion/s2io.c | 2 +- drivers/net/ethernet/netronome/nfp/devlink_param.c | 3 +- drivers/net/ethernet/netronome/nfp/flower/action.c | 27 +- .../net/ethernet/netronome/nfp/flower/offload.c | 6 +- drivers/net/ethernet/netronome/nfp/nfd3/xsk.c | 2 +- drivers/net/ethernet/netronome/nfp/nfp_devlink.c | 1 + .../net/ethernet/netronome/nfp/nfp_net_common.c | 7 +- .../net/ethernet/netronome/nfp/nfp_net_debugdump.c | 41 +- drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 2 +- drivers/net/ethernet/ni/nixge.c | 2 +- drivers/net/ethernet/nvidia/forcedeth.c | 2 +- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 2 +- drivers/net/ethernet/pasemi/pasemi_mac.c | 2 +- drivers/net/ethernet/pensando/ionic/ionic_lif.c | 4 +- drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c | 2 +- drivers/net/ethernet/qlogic/qed/qed.h | 2 - drivers/net/ethernet/qlogic/qed/qed_devlink.c | 3 +- drivers/net/ethernet/qlogic/qed/qed_main.c | 5 +- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 2 +- drivers/net/ethernet/qlogic/qede/qede_filter.c | 138 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c | 2 +- drivers/net/ethernet/qualcomm/emac/emac.c | 2 +- drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | 2 +- drivers/net/ethernet/realtek/8139cp.c | 4 +- drivers/net/ethernet/realtek/r8169_main.c | 6 +- drivers/net/ethernet/renesas/ravb_main.c | 11 +- drivers/net/ethernet/renesas/sh_eth.c | 2 +- drivers/net/ethernet/rocker/rocker_main.c | 2 +- drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | 2 +- drivers/net/ethernet/sfc/efx_common.c | 2 +- drivers/net/ethernet/sfc/falcon/efx.c | 2 +- drivers/net/ethernet/sfc/siena/efx_common.c | 2 +- drivers/net/ethernet/sfc/tc.c | 7 +- drivers/net/ethernet/sis/Kconfig | 4 +- drivers/net/ethernet/sis/sis900.c | 6 +- drivers/net/ethernet/smsc/Kconfig | 2 +- drivers/net/ethernet/smsc/smc91c92_cs.c | 2 +- drivers/net/ethernet/stmicro/stmmac/Kconfig | 12 + drivers/net/ethernet/stmicro/stmmac/Makefile | 1 + drivers/net/ethernet/stmicro/stmmac/common.h | 2 +- .../net/ethernet/stmicro/stmmac/dwmac-ipq806x.c | 12 + drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 2 - drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c | 86 + .../net/ethernet/stmicro/stmmac/dwmac-socfpga.c | 107 +- drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | 10 +- .../net/ethernet/stmicro/stmmac/dwxgmac2_core.c | 2 +- drivers/net/ethernet/stmicro/stmmac/hwif.h | 10 +- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 18 +- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 96 +- drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 52 +- drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | 22 +- drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c | 40 +- drivers/net/ethernet/sun/cassini.c | 3 +- drivers/net/ethernet/sun/niu.c | 2 +- drivers/net/ethernet/sun/sungem.c | 2 +- drivers/net/ethernet/synopsys/dwc-xlgmac-net.c | 2 +- drivers/net/ethernet/tehuti/tehuti.c | 2 +- drivers/net/ethernet/ti/Kconfig | 17 +- drivers/net/ethernet/ti/Makefile | 9 + drivers/net/ethernet/ti/am65-cpsw-ethtool.c | 13 +- drivers/net/ethernet/ti/am65-cpsw-nuss.c | 704 +- drivers/net/ethernet/ti/am65-cpsw-nuss.h | 13 + drivers/net/ethernet/ti/am65-cpsw-qos.c | 19 +- drivers/net/ethernet/ti/am65-cpts.c | 107 +- drivers/net/ethernet/ti/am65-cpts.h | 11 +- drivers/net/ethernet/ti/cpsw_new.c | 6 +- drivers/net/ethernet/ti/cpsw_priv.c | 3 + drivers/net/ethernet/ti/icssg/icssg_classifier.c | 113 +- drivers/net/ethernet/ti/icssg/icssg_common.c | 1252 ++ drivers/net/ethernet/ti/icssg/icssg_config.c | 14 +- drivers/net/ethernet/ti/icssg/icssg_config.h | 56 + drivers/net/ethernet/ti/icssg/icssg_ethtool.c | 105 + drivers/net/ethernet/ti/icssg/icssg_prueth.c | 1199 +- drivers/net/ethernet/ti/icssg/icssg_prueth.h | 88 +- drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c | 1181 ++ drivers/net/ethernet/ti/k3-cppi-desc-pool.c | 46 +- drivers/net/ethernet/ti/k3-cppi-desc-pool.h | 6 + drivers/net/ethernet/via/Kconfig | 1 + drivers/net/ethernet/via/via-velocity.c | 4 +- drivers/net/ethernet/wangxun/libwx/wx_hw.c | 2 +- drivers/net/ethernet/wangxun/libwx/wx_lib.c | 8 +- drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c | 2 +- drivers/net/ethernet/xilinx/xilinx_axienet.h | 4 +- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 2 +- drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c | 23 +- drivers/net/ethernet/xircom/Kconfig | 2 +- drivers/net/ethernet/xircom/xirc2ps_cs.c | 4 +- drivers/net/ethernet/xscale/ixp4xx_eth.c | 2 +- drivers/net/fddi/defxx.c | 2 +- drivers/net/fjes/fjes_main.c | 3 +- drivers/net/fjes/fjes_trace.h | 10 +- drivers/net/geneve.c | 46 +- drivers/net/gtp.c | 865 +- drivers/net/hamradio/Kconfig | 6 +- drivers/net/hyperv/netvsc_drv.c | 4 +- drivers/net/hyperv/netvsc_trace.h | 8 +- drivers/net/ipa/data/ipa_data-v3.1.c | 5 +- drivers/net/ipa/data/ipa_data-v3.5.1.c | 5 +- drivers/net/ipa/data/ipa_data-v4.11.c | 5 +- drivers/net/ipa/data/ipa_data-v4.2.c | 5 +- drivers/net/ipa/data/ipa_data-v4.5.c | 5 +- drivers/net/ipa/data/ipa_data-v4.7.c | 5 +- drivers/net/ipa/data/ipa_data-v4.9.c | 5 +- drivers/net/ipa/data/ipa_data-v5.0.c | 5 +- drivers/net/ipa/data/ipa_data-v5.5.c | 5 +- drivers/net/ipa/gsi.c | 30 +- drivers/net/ipa/gsi.h | 12 +- drivers/net/ipa/gsi_private.h | 7 +- drivers/net/ipa/gsi_reg.c | 6 +- drivers/net/ipa/gsi_trans.c | 12 +- drivers/net/ipa/gsi_trans.h | 9 +- drivers/net/ipa/ipa.h | 15 +- drivers/net/ipa/ipa_cmd.c | 13 +- drivers/net/ipa/ipa_cmd.h | 18 +- drivers/net/ipa/ipa_data.h | 4 +- drivers/net/ipa/ipa_endpoint.c | 19 +- drivers/net/ipa/ipa_endpoint.h | 10 +- drivers/net/ipa/ipa_gsi.c | 7 +- drivers/net/ipa/ipa_interrupt.c | 54 +- drivers/net/ipa/ipa_interrupt.h | 6 +- drivers/net/ipa/ipa_main.c | 43 +- drivers/net/ipa/ipa_mem.c | 15 +- drivers/net/ipa/ipa_mem.h | 4 +- drivers/net/ipa/ipa_modem.c | 14 +- drivers/net/ipa/ipa_modem.h | 5 +- drivers/net/ipa/ipa_power.c | 27 +- drivers/net/ipa/ipa_power.h | 19 +- drivers/net/ipa/ipa_qmi.c | 10 +- drivers/net/ipa/ipa_qmi.h | 4 +- drivers/net/ipa/ipa_qmi_msg.c | 3 +- drivers/net/ipa/ipa_qmi_msg.h | 3 +- drivers/net/ipa/ipa_reg.c | 4 +- drivers/net/ipa/ipa_reg.h | 6 +- drivers/net/ipa/ipa_resource.c | 3 +- drivers/net/ipa/ipa_smp2p.c | 10 +- drivers/net/ipa/ipa_sysfs.c | 7 +- drivers/net/ipa/ipa_sysfs.h | 4 +- drivers/net/ipa/ipa_table.c | 27 +- drivers/net/ipa/ipa_table.h | 7 +- drivers/net/ipa/ipa_uc.c | 10 +- drivers/net/ipa/ipa_uc.h | 3 +- drivers/net/ipa/ipa_version.h | 22 +- drivers/net/ipa/reg.h | 8 +- drivers/net/ipa/reg/gsi_reg-v3.1.c | 8 +- drivers/net/ipa/reg/gsi_reg-v3.5.1.c | 8 +- drivers/net/ipa/reg/gsi_reg-v4.0.c | 8 +- drivers/net/ipa/reg/gsi_reg-v4.11.c | 8 +- drivers/net/ipa/reg/gsi_reg-v4.5.c | 8 +- drivers/net/ipa/reg/gsi_reg-v4.9.c | 8 +- drivers/net/ipa/reg/gsi_reg-v5.0.c | 8 +- drivers/net/ipa/reg/ipa_reg-v3.1.c | 20 +- drivers/net/ipa/reg/ipa_reg-v3.5.1.c | 20 +- drivers/net/ipa/reg/ipa_reg-v4.11.c | 20 +- drivers/net/ipa/reg/ipa_reg-v4.2.c | 6 +- drivers/net/ipa/reg/ipa_reg-v4.5.c | 20 +- drivers/net/ipa/reg/ipa_reg-v4.7.c | 20 +- drivers/net/ipa/reg/ipa_reg-v4.9.c | 20 +- drivers/net/ipa/reg/ipa_reg-v5.0.c | 6 +- drivers/net/ipa/reg/ipa_reg-v5.5.c | 6 +- drivers/net/ipvlan/ipvlan_main.c | 1 + drivers/net/loopback.c | 5 +- drivers/net/macsec.c | 2 +- drivers/net/macvlan.c | 2 +- drivers/net/mdio/mdio-gpio.c | 3 +- drivers/net/net_failover.c | 2 +- drivers/net/netconsole.c | 2 +- drivers/net/netdevsim/ethtool.c | 11 + drivers/net/netdevsim/netdev.c | 335 +- drivers/net/netdevsim/netdevsim.h | 10 + drivers/net/ntb_netdev.c | 4 +- drivers/net/pcs/pcs-lynx.c | 5 +- drivers/net/pcs/pcs-rzn1-miic.c | 28 + drivers/net/pfcp.c | 301 + drivers/net/phy/Kconfig | 5 + drivers/net/phy/Makefile | 1 + drivers/net/phy/air_en8811h.c | 1090 ++ drivers/net/phy/aquantia/aquantia_main.c | 21 + drivers/net/phy/dp83822.c | 37 +- drivers/net/phy/marvell.c | 397 +- drivers/net/phy/mediatek-ge.c | 3 - drivers/net/phy/micrel.c | 563 +- drivers/net/phy/phylink.c | 28 +- drivers/net/phy/qcom/at803x.c | 3 +- drivers/net/phy/realtek.c | 324 +- drivers/net/phy/sfp-bus.c | 5 +- drivers/net/phy/sfp.c | 24 +- drivers/net/ppp/ppp_generic.c | 2 +- drivers/net/pse-pd/Kconfig | 23 +- drivers/net/pse-pd/Makefile | 2 + drivers/net/pse-pd/pd692x0.c | 1223 ++ drivers/net/pse-pd/pse_core.c | 523 +- drivers/net/pse-pd/pse_regulator.c | 49 +- drivers/net/pse-pd/tps23881.c | 820 ++ drivers/net/slip/slip.c | 2 +- drivers/net/tap.c | 2 +- drivers/net/team/Makefile | 1 + drivers/net/team/team.c | 3101 ----- drivers/net/team/team_core.c | 3056 +++++ drivers/net/team/team_nl.c | 59 + drivers/net/team/team_nl.h | 29 + drivers/net/tun.c | 2 +- drivers/net/usb/aqc111.c | 2 +- drivers/net/usb/asix_devices.c | 2 +- drivers/net/usb/ax88179_178a.c | 8 +- drivers/net/usb/cdc_ncm.c | 2 +- drivers/net/usb/lan78xx.c | 44 +- drivers/net/usb/qmi_wwan.c | 12 +- drivers/net/usb/r8152.c | 6 +- drivers/net/usb/smsc75xx.c | 12 +- drivers/net/usb/usbnet.c | 3 +- drivers/net/veth.c | 1 + drivers/net/virtio_net.c | 1459 ++- drivers/net/vmxnet3/vmxnet3_drv.c | 2 +- drivers/net/vrf.c | 2 +- drivers/net/vsockmon.c | 2 +- drivers/net/vxlan/vxlan_core.c | 18 +- drivers/net/wan/Kconfig | 2 +- drivers/net/wan/fsl_qmc_hdlc.c | 6 +- drivers/net/wireguard/main.c | 2 +- drivers/net/wireless/ath/ath10k/ahb.c | 18 +- drivers/net/wireless/ath/ath10k/core.c | 51 +- drivers/net/wireless/ath/ath10k/core.h | 4 +- drivers/net/wireless/ath/ath10k/hw.h | 15 +- drivers/net/wireless/ath/ath10k/pci.c | 48 +- drivers/net/wireless/ath/ath10k/pci.h | 6 +- drivers/net/wireless/ath/ath10k/sdio.c | 23 +- drivers/net/wireless/ath/ath10k/snoc.c | 7 +- drivers/net/wireless/ath/ath10k/thermal.c | 2 +- drivers/net/wireless/ath/ath10k/trace.h | 64 +- drivers/net/wireless/ath/ath10k/usb.c | 2 +- drivers/net/wireless/ath/ath11k/Makefile | 3 +- drivers/net/wireless/ath/ath11k/ahb.c | 15 +- drivers/net/wireless/ath/ath11k/ce.h | 6 +- drivers/net/wireless/ath/ath11k/core.c | 154 +- drivers/net/wireless/ath/ath11k/core.h | 8 +- drivers/net/wireless/ath/ath11k/debugfs.c | 4 +- drivers/net/wireless/ath/ath11k/dp_rx.c | 3 +- drivers/net/wireless/ath/ath11k/dp_rx.h | 3 + drivers/net/wireless/ath/ath11k/hal.h | 2 +- drivers/net/wireless/ath/ath11k/hif.h | 14 +- drivers/net/wireless/ath/ath11k/mac.c | 197 +- drivers/net/wireless/ath/ath11k/mhi.c | 29 +- drivers/net/wireless/ath/ath11k/mhi.h | 5 +- drivers/net/wireless/ath/ath11k/p2p.c | 149 + drivers/net/wireless/ath/ath11k/p2p.h | 22 + drivers/net/wireless/ath/ath11k/pci.c | 44 +- drivers/net/wireless/ath/ath11k/pci.h | 1 + drivers/net/wireless/ath/ath11k/pcic.c | 34 +- drivers/net/wireless/ath/ath11k/qmi.c | 2 +- drivers/net/wireless/ath/ath11k/reg.c | 14 +- drivers/net/wireless/ath/ath11k/reg.h | 4 +- drivers/net/wireless/ath/ath11k/thermal.c | 2 +- drivers/net/wireless/ath/ath11k/trace.h | 44 +- drivers/net/wireless/ath/ath11k/wmi.c | 104 +- drivers/net/wireless/ath/ath11k/wmi.h | 78 +- drivers/net/wireless/ath/ath12k/Kconfig | 9 + drivers/net/wireless/ath/ath12k/Makefile | 2 + drivers/net/wireless/ath/ath12k/acpi.c | 396 + drivers/net/wireless/ath/ath12k/acpi.h | 76 + drivers/net/wireless/ath/ath12k/ce.h | 6 +- drivers/net/wireless/ath/ath12k/core.c | 121 +- drivers/net/wireless/ath/ath12k/core.h | 95 +- drivers/net/wireless/ath/ath12k/debugfs.c | 90 + drivers/net/wireless/ath/ath12k/debugfs.h | 30 + drivers/net/wireless/ath/ath12k/dp.c | 139 +- drivers/net/wireless/ath/ath12k/dp.h | 13 +- drivers/net/wireless/ath/ath12k/dp_mon.c | 6 +- drivers/net/wireless/ath/ath12k/dp_rx.c | 303 +- drivers/net/wireless/ath/ath12k/dp_rx.h | 5 +- drivers/net/wireless/ath/ath12k/dp_tx.c | 49 +- drivers/net/wireless/ath/ath12k/hal.h | 2 +- drivers/net/wireless/ath/ath12k/hal_desc.h | 48 +- drivers/net/wireless/ath/ath12k/hif.h | 14 +- drivers/net/wireless/ath/ath12k/htc.c | 4 +- drivers/net/wireless/ath/ath12k/hw.c | 20 +- drivers/net/wireless/ath/ath12k/hw.h | 7 +- drivers/net/wireless/ath/ath12k/mac.c | 1140 +- drivers/net/wireless/ath/ath12k/mac.h | 4 + drivers/net/wireless/ath/ath12k/mhi.c | 92 +- drivers/net/wireless/ath/ath12k/mhi.h | 5 +- drivers/net/wireless/ath/ath12k/p2p.c | 3 +- drivers/net/wireless/ath/ath12k/p2p.h | 1 + drivers/net/wireless/ath/ath12k/pci.c | 43 +- drivers/net/wireless/ath/ath12k/pci.h | 2 +- drivers/net/wireless/ath/ath12k/qmi.c | 45 +- drivers/net/wireless/ath/ath12k/qmi.h | 2 + drivers/net/wireless/ath/ath12k/reg.c | 55 +- drivers/net/wireless/ath/ath12k/trace.h | 16 +- drivers/net/wireless/ath/ath12k/wmi.c | 218 +- drivers/net/wireless/ath/ath12k/wmi.h | 113 +- drivers/net/wireless/ath/ath6kl/htc_mbox.c | 3 +- drivers/net/wireless/ath/ath6kl/htc_pipe.c | 3 +- drivers/net/wireless/ath/ath6kl/sdio.c | 20 +- drivers/net/wireless/ath/ath6kl/trace.h | 4 +- drivers/net/wireless/ath/ath9k/ath9k.h | 1 + drivers/net/wireless/ath/ath9k/eeprom_4k.c | 2 +- drivers/net/wireless/ath/ath9k/eeprom_9287.c | 4 +- drivers/net/wireless/ath/ath9k/eeprom_def.c | 6 +- drivers/net/wireless/ath/ath9k/pci.c | 2 - drivers/net/wireless/ath/ath9k/xmit.c | 10 +- drivers/net/wireless/ath/trace.h | 4 +- drivers/net/wireless/ath/wcn36xx/main.c | 4 +- drivers/net/wireless/ath/wcn36xx/txrx.c | 4 +- drivers/net/wireless/ath/wcn36xx/wcn36xx.h | 7 +- drivers/net/wireless/ath/wil6210/cfg80211.c | 25 +- drivers/net/wireless/ath/wil6210/fw.h | 1 - drivers/net/wireless/ath/wil6210/fw_inc.c | 4 +- drivers/net/wireless/ath/wil6210/wmi.c | 19 +- drivers/net/wireless/ath/wil6210/wmi.h | 4 +- drivers/net/wireless/broadcom/b43/sysfs.c | 13 +- drivers/net/wireless/broadcom/b43legacy/sysfs.c | 16 +- .../wireless/broadcom/brcm80211/brcmfmac/Makefile | 4 +- .../broadcom/brcm80211/brcmfmac/bca/Makefile | 6 +- .../wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c | 1 - .../broadcom/brcm80211/brcmfmac/cfg80211.c | 6 +- .../broadcom/brcm80211/brcmfmac/cyw/Makefile | 6 +- .../broadcom/brcm80211/brcmfmac/tracepoint.h | 4 +- .../net/wireless/broadcom/brcm80211/brcmfmac/usb.c | 7 - .../broadcom/brcm80211/brcmfmac/wcc/Makefile | 6 +- .../wireless/broadcom/brcm80211/brcmsmac/Makefile | 6 +- .../wireless/broadcom/brcm80211/brcmsmac/ampdu.c | 6 - .../brcm80211/brcmsmac/brcms_trace_brcmsmac.h | 2 +- .../brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h | 2 +- .../brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h | 6 +- .../broadcom/brcm80211/brcmsmac/phy/phy_lcn.c | 18 +- .../wireless/broadcom/brcm80211/brcmutil/Makefile | 2 +- drivers/net/wireless/intel/iwlwifi/cfg/bz.c | 4 +- drivers/net/wireless/intel/iwlwifi/cfg/sc.c | 2 +- drivers/net/wireless/intel/iwlwifi/dvm/Makefile | 2 +- drivers/net/wireless/intel/iwlwifi/fw/acpi.c | 36 +- drivers/net/wireless/intel/iwlwifi/fw/acpi.h | 16 + drivers/net/wireless/intel/iwlwifi/fw/api/d3.h | 57 +- .../net/wireless/intel/iwlwifi/fw/api/datapath.h | 7 + .../net/wireless/intel/iwlwifi/fw/api/mac-cfg.h | 23 +- .../net/wireless/intel/iwlwifi/fw/api/nvm-reg.h | 61 +- .../net/wireless/intel/iwlwifi/fw/api/offload.h | 4 +- drivers/net/wireless/intel/iwlwifi/fw/api/phy.h | 7 +- drivers/net/wireless/intel/iwlwifi/fw/api/power.h | 44 +- drivers/net/wireless/intel/iwlwifi/fw/api/scan.h | 33 +- drivers/net/wireless/intel/iwlwifi/fw/api/tx.h | 13 +- drivers/net/wireless/intel/iwlwifi/fw/dbg.c | 10 +- drivers/net/wireless/intel/iwlwifi/fw/file.h | 3 + drivers/net/wireless/intel/iwlwifi/fw/regulatory.c | 127 +- drivers/net/wireless/intel/iwlwifi/fw/regulatory.h | 26 +- drivers/net/wireless/intel/iwlwifi/fw/runtime.h | 7 +- drivers/net/wireless/intel/iwlwifi/fw/uefi.c | 23 + drivers/net/wireless/intel/iwlwifi/fw/uefi.h | 24 +- drivers/net/wireless/intel/iwlwifi/iwl-config.h | 7 +- .../wireless/intel/iwlwifi/iwl-context-info-gen3.h | 5 +- .../net/wireless/intel/iwlwifi/iwl-devtrace-msg.h | 2 +- drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h | 2 +- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 6 - drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 28 +- drivers/net/wireless/intel/iwlwifi/iwl-prph.h | 9 +- drivers/net/wireless/intel/iwlwifi/mei/Makefile | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/Makefile | 3 +- drivers/net/wireless/intel/iwlwifi/mvm/coex.c | 88 +- drivers/net/wireless/intel/iwlwifi/mvm/constants.h | 15 +- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 243 +- .../net/wireless/intel/iwlwifi/mvm/debugfs-vif.c | 98 +- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 9 + .../net/wireless/intel/iwlwifi/mvm/ftm-initiator.c | 17 +- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 99 +- drivers/net/wireless/intel/iwlwifi/mvm/link.c | 809 ++ drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 30 +- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 349 +- .../net/wireless/intel/iwlwifi/mvm/mld-mac80211.c | 434 +- drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c | 25 +- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 236 +- .../net/wireless/intel/iwlwifi/mvm/offloading.c | 8 +- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 62 +- drivers/net/wireless/intel/iwlwifi/mvm/power.c | 16 +- drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c | 9 +- drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 147 +- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 36 +- drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 585 +- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 86 + drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 24 + .../net/wireless/intel/iwlwifi/mvm/tests/Makefile | 3 + .../net/wireless/intel/iwlwifi/mvm/tests/links.c | 435 + .../net/wireless/intel/iwlwifi/mvm/tests/module.c | 10 + .../net/wireless/intel/iwlwifi/mvm/tests/scan.c | 110 + .../net/wireless/intel/iwlwifi/mvm/time-event.c | 22 +- drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 7 +- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 7 +- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 29 +- .../wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c | 31 +- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 54 +- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 11 +- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 27 +- drivers/net/wireless/intel/iwlwifi/tests/Makefile | 2 +- drivers/net/wireless/intel/iwlwifi/tests/devinfo.c | 26 +- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 2 + drivers/net/wireless/marvell/mwifiex/sdio.c | 4 +- drivers/net/wireless/marvell/mwl8k.c | 92 +- drivers/net/wireless/mediatek/mt76/dma.c | 5 +- drivers/net/wireless/mediatek/mt76/mac80211.c | 1 + drivers/net/wireless/mediatek/mt76/mt76.h | 4 +- drivers/net/wireless/mediatek/mt76/mt76_connac.h | 10 +- .../net/wireless/mediatek/mt76/mt76_connac3_mac.c | 85 + .../net/wireless/mediatek/mt76/mt76_connac3_mac.h | 22 + .../net/wireless/mediatek/mt76/mt76_connac_mcu.c | 9 +- .../net/wireless/mediatek/mt76/mt76_connac_mcu.h | 15 + .../net/wireless/mediatek/mt76/mt7915/debugfs.c | 3 +- drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c | 29 +- drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h | 47 +- drivers/net/wireless/mediatek/mt76/mt7915/init.c | 10 +- drivers/net/wireless/mediatek/mt76/mt7915/mac.c | 15 +- drivers/net/wireless/mediatek/mt76/mt7915/main.c | 6 +- drivers/net/wireless/mediatek/mt76/mt7915/mcu.c | 158 +- drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h | 4 + drivers/net/wireless/mediatek/mt76/mt7915/soc.c | 1 - drivers/net/wireless/mediatek/mt76/mt7921/main.c | 32 +- drivers/net/wireless/mediatek/mt76/mt7921/mcu.c | 79 +- drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h | 6 + drivers/net/wireless/mediatek/mt76/mt7921/pci.c | 19 +- drivers/net/wireless/mediatek/mt76/mt7925/mac.c | 15 +- drivers/net/wireless/mediatek/mt76/mt7925/mcu.c | 4 +- drivers/net/wireless/mediatek/mt76/mt792x.h | 7 + drivers/net/wireless/mediatek/mt76/mt7996/mac.c | 8 +- drivers/net/wireless/mediatek/mt76/mt7996/main.c | 34 +- drivers/net/wireless/mediatek/mt76/mt7996/mcu.c | 54 +- drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h | 3 +- drivers/net/wireless/mediatek/mt76/sdio.c | 14 +- drivers/net/wireless/mediatek/mt76/testmode.c | 2 +- drivers/net/wireless/mediatek/mt76/tx.c | 2 +- drivers/net/wireless/mediatek/mt76/usb.c | 3 +- drivers/net/wireless/microchip/wilc1000/netdev.h | 7 + drivers/net/wireless/microchip/wilc1000/sdio.c | 5 +- drivers/net/wireless/quantenna/qtnfmac/bus.h | 2 +- drivers/net/wireless/quantenna/qtnfmac/core.c | 16 +- drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c | 12 +- .../wireless/quantenna/qtnfmac/pcie/pearl_pcie.c | 6 +- .../wireless/quantenna/qtnfmac/pcie/topaz_pcie.c | 6 +- .../net/wireless/realtek/rtl818x/rtl8180/Makefile | 2 +- .../net/wireless/realtek/rtl818x/rtl8187/Makefile | 2 +- drivers/net/wireless/realtek/rtl8xxxu/8188e.c | 1885 +++ drivers/net/wireless/realtek/rtl8xxxu/8188f.c | 1765 +++ drivers/net/wireless/realtek/rtl8xxxu/8192c.c | 661 ++ drivers/net/wireless/realtek/rtl8xxxu/8192e.c | 1767 +++ drivers/net/wireless/realtek/rtl8xxxu/8192f.c | 2091 ++++ drivers/net/wireless/realtek/rtl8xxxu/8710b.c | 1875 +++ drivers/net/wireless/realtek/rtl8xxxu/8723a.c | 535 + drivers/net/wireless/realtek/rtl8xxxu/8723b.c | 1767 +++ drivers/net/wireless/realtek/rtl8xxxu/Makefile | 6 +- drivers/net/wireless/realtek/rtl8xxxu/core.c | 8291 +++++++++++++ drivers/net/wireless/realtek/rtl8xxxu/regs.h | 1381 +++ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 4 +- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c | 1901 --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c | 1766 --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c | 630 - .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c | 1783 --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c | 2107 ---- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c | 1891 --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c | 526 - .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c | 1760 --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 8331 ------------- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 1381 --- drivers/net/wireless/realtek/rtlwifi/Kconfig | 4 + drivers/net/wireless/realtek/rtlwifi/Makefile | 1 + drivers/net/wireless/realtek/rtlwifi/cam.c | 5 +- drivers/net/wireless/realtek/rtlwifi/cam.h | 6 +- drivers/net/wireless/realtek/rtlwifi/efuse.c | 2 +- drivers/net/wireless/realtek/rtlwifi/efuse.h | 2 +- .../net/wireless/realtek/rtlwifi/rtl8192cu/hw.c | 195 +- .../net/wireless/realtek/rtlwifi/rtl8192d/Makefile | 11 + .../net/wireless/realtek/rtlwifi/rtl8192d/def.h | 175 + .../wireless/realtek/rtlwifi/rtl8192d/dm_common.c | 1061 ++ .../wireless/realtek/rtlwifi/rtl8192d/dm_common.h | 79 + .../wireless/realtek/rtlwifi/rtl8192d/fw_common.c | 370 + .../wireless/realtek/rtlwifi/rtl8192d/fw_common.h | 49 + .../wireless/realtek/rtlwifi/rtl8192d/hw_common.c | 1225 ++ .../wireless/realtek/rtlwifi/rtl8192d/hw_common.h | 24 + .../net/wireless/realtek/rtlwifi/rtl8192d/main.c | 9 + .../wireless/realtek/rtlwifi/rtl8192d/phy_common.c | 856 ++ .../wireless/realtek/rtlwifi/rtl8192d/phy_common.h | 111 + .../net/wireless/realtek/rtlwifi/rtl8192d/reg.h | 1393 +++ .../wireless/realtek/rtlwifi/rtl8192d/rf_common.c | 359 + .../wireless/realtek/rtlwifi/rtl8192d/rf_common.h | 13 + .../wireless/realtek/rtlwifi/rtl8192d/trx_common.c | 516 + .../wireless/realtek/rtlwifi/rtl8192d/trx_common.h | 405 + .../net/wireless/realtek/rtlwifi/rtl8192de/def.h | 175 - .../net/wireless/realtek/rtlwifi/rtl8192de/dm.c | 1072 +- .../net/wireless/realtek/rtlwifi/rtl8192de/dm.h | 91 +- .../net/wireless/realtek/rtlwifi/rtl8192de/fw.c | 375 +- .../net/wireless/realtek/rtlwifi/rtl8192de/fw.h | 37 - .../net/wireless/realtek/rtlwifi/rtl8192de/hw.c | 1168 +- .../net/wireless/realtek/rtlwifi/rtl8192de/hw.h | 11 - .../net/wireless/realtek/rtlwifi/rtl8192de/led.c | 2 +- .../net/wireless/realtek/rtlwifi/rtl8192de/phy.c | 918 +- .../net/wireless/realtek/rtlwifi/rtl8192de/phy.h | 59 +- .../net/wireless/realtek/rtlwifi/rtl8192de/reg.h | 1273 -- .../net/wireless/realtek/rtlwifi/rtl8192de/rf.c | 375 +- .../net/wireless/realtek/rtlwifi/rtl8192de/rf.h | 5 - .../net/wireless/realtek/rtlwifi/rtl8192de/sw.c | 12 +- .../net/wireless/realtek/rtlwifi/rtl8192de/trx.c | 514 +- .../net/wireless/realtek/rtlwifi/rtl8192de/trx.h | 396 - .../net/wireless/realtek/rtlwifi/rtl8723be/phy.c | 45 +- drivers/net/wireless/realtek/rtlwifi/usb.c | 3 + drivers/net/wireless/realtek/rtlwifi/wifi.h | 33 +- drivers/net/wireless/realtek/rtw88/Kconfig | 22 + drivers/net/wireless/realtek/rtw88/Makefile | 9 + drivers/net/wireless/realtek/rtw88/coex.c | 4 +- drivers/net/wireless/realtek/rtw88/debug.h | 1 + drivers/net/wireless/realtek/rtw88/fw.c | 14 +- drivers/net/wireless/realtek/rtw88/fw.h | 2 + drivers/net/wireless/realtek/rtw88/mac.c | 20 +- drivers/net/wireless/realtek/rtw88/mac80211.c | 2 + drivers/net/wireless/realtek/rtw88/main.c | 18 +- drivers/net/wireless/realtek/rtw88/main.h | 5 + drivers/net/wireless/realtek/rtw88/pci.c | 5 +- drivers/net/wireless/realtek/rtw88/reg.h | 1 + drivers/net/wireless/realtek/rtw88/rtw8703b.c | 2110 ++++ drivers/net/wireless/realtek/rtw88/rtw8703b.h | 102 + .../net/wireless/realtek/rtw88/rtw8703b_tables.c | 902 ++ .../net/wireless/realtek/rtw88/rtw8703b_tables.h | 14 + drivers/net/wireless/realtek/rtw88/rtw8723cs.c | 34 + drivers/net/wireless/realtek/rtw88/rtw8723d.c | 674 +- drivers/net/wireless/realtek/rtw88/rtw8723d.h | 269 +- drivers/net/wireless/realtek/rtw88/rtw8723x.c | 721 ++ drivers/net/wireless/realtek/rtw88/rtw8723x.h | 518 + drivers/net/wireless/realtek/rtw88/rtw8821c.c | 1 + drivers/net/wireless/realtek/rtw88/rtw8822b.c | 1 + drivers/net/wireless/realtek/rtw88/rtw8822c.c | 1 + drivers/net/wireless/realtek/rtw88/rx.h | 2 + drivers/net/wireless/realtek/rtw88/usb.c | 6 +- drivers/net/wireless/realtek/rtw89/Kconfig | 15 + drivers/net/wireless/realtek/rtw89/Makefile | 12 +- drivers/net/wireless/realtek/rtw89/acpi.c | 47 + drivers/net/wireless/realtek/rtw89/acpi.h | 21 +- drivers/net/wireless/realtek/rtw89/cam.c | 120 +- drivers/net/wireless/realtek/rtw89/cam.h | 71 +- drivers/net/wireless/realtek/rtw89/coex.c | 2112 +++- drivers/net/wireless/realtek/rtw89/coex.h | 108 + drivers/net/wireless/realtek/rtw89/core.c | 17 +- drivers/net/wireless/realtek/rtw89/core.h | 351 +- drivers/net/wireless/realtek/rtw89/debug.c | 2 +- drivers/net/wireless/realtek/rtw89/fw.c | 412 +- drivers/net/wireless/realtek/rtw89/fw.h | 497 +- drivers/net/wireless/realtek/rtw89/mac.c | 55 +- drivers/net/wireless/realtek/rtw89/mac.h | 7 + drivers/net/wireless/realtek/rtw89/mac80211.c | 26 + drivers/net/wireless/realtek/rtw89/mac_be.c | 5 + drivers/net/wireless/realtek/rtw89/pci.c | 75 +- drivers/net/wireless/realtek/rtw89/pci.h | 8 + drivers/net/wireless/realtek/rtw89/phy.c | 19 +- drivers/net/wireless/realtek/rtw89/phy_be.c | 18 + drivers/net/wireless/realtek/rtw89/reg.h | 7 +- drivers/net/wireless/realtek/rtw89/regd.c | 156 +- drivers/net/wireless/realtek/rtw89/rtw8851b.c | 2 + drivers/net/wireless/realtek/rtw89/rtw8852a.c | 1 + drivers/net/wireless/realtek/rtw89/rtw8852b.c | 15 + drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c | 2 +- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 13 +- .../net/wireless/realtek/rtw89/rtw8852c_table.c | 2706 +---- drivers/net/wireless/realtek/rtw89/rtw8922a.c | 159 +- drivers/net/wireless/realtek/rtw89/sar.h | 4 +- drivers/net/wireless/realtek/rtw89/wow.c | 704 +- drivers/net/wireless/realtek/rtw89/wow.h | 57 + drivers/net/wireless/rsi/rsi_91x_sdio.c | 31 +- drivers/net/wireless/silabs/wfx/bus_sdio.c | 1 - drivers/net/wireless/ti/wl1251/cmd.h | 2 - drivers/net/wireless/ti/wl1251/sdio.c | 20 +- drivers/net/wireless/ti/wl1251/wl12xx_80211.h | 1 - drivers/net/wireless/ti/wlcore/cmd.c | 7 - drivers/net/wireless/ti/wlcore/cmd.h | 2 - drivers/net/wireless/ti/wlcore/main.c | 17 +- drivers/net/wireless/ti/wlcore/sysfs.c | 11 +- drivers/net/wireless/ti/wlcore/tx.c | 7 +- drivers/net/wireless/ti/wlcore/wl12xx_80211.h | 1 - drivers/net/wireless/ti/wlcore/wlcore_i.h | 6 + drivers/net/wireless/virtual/mac80211_hwsim.c | 53 +- drivers/net/wireless/virtual/virt_wifi.c | 20 +- drivers/net/wwan/iosm/iosm_ipc_devlink.c | 3 +- drivers/net/wwan/mhi_wwan_mbim.c | 1 - drivers/net/wwan/t7xx/t7xx_netdev.c | 20 +- drivers/net/wwan/t7xx/t7xx_netdev.h | 2 +- drivers/net/xen-netback/common.h | 5 +- drivers/net/xen-netback/interface.c | 4 +- drivers/net/xen-netback/netback.c | 12 +- drivers/net/xen-netfront.c | 2 +- drivers/nfc/nfcmrvl/spi.c | 1 - drivers/nfc/st95hf/core.c | 28 +- drivers/ntb/hw/idt/ntb_hw_idt.c | 2 +- drivers/nvdimm/btt.c | 12 +- drivers/nvdimm/core.c | 30 - drivers/nvdimm/nd.h | 1 - drivers/nvdimm/virtio_pmem.c | 1 - drivers/nvme/host/apple.c | 1 + drivers/nvme/host/auth.c | 6 +- drivers/nvme/host/core.c | 110 +- drivers/nvme/host/fabrics.c | 51 +- drivers/nvme/host/fabrics.h | 2 +- drivers/nvme/host/fc.c | 4 +- drivers/nvme/host/ioctl.c | 43 +- drivers/nvme/host/multipath.c | 21 +- drivers/nvme/host/nvme.h | 8 +- drivers/nvme/host/pci.c | 5 +- drivers/nvme/host/rdma.c | 23 +- drivers/nvme/host/tcp.c | 30 +- drivers/nvme/target/auth.c | 36 +- drivers/nvme/target/configfs.c | 22 +- drivers/nvme/target/fabrics-cmd-auth.c | 49 +- drivers/nvme/target/fabrics-cmd.c | 11 +- drivers/nvme/target/nvmet.h | 8 +- drivers/nvme/target/rdma.c | 16 +- drivers/nvme/target/zns.c | 10 +- drivers/nvmem/core.c | 2 +- drivers/nvmem/layouts.c | 6 +- drivers/nvmem/layouts/onie-tlv.c | 1 - drivers/nvmem/layouts/sl28vpd.c | 1 - drivers/nvmem/lpc18xx_eeprom.c | 6 +- drivers/nvmem/meson-mx-efuse.c | 6 +- drivers/nvmem/rockchip-otp.c | 1 + drivers/nvmem/sc27xx-efuse.c | 1 + drivers/nvmem/sprd-efuse.c | 1 + drivers/of/address.c | 113 +- drivers/of/base.c | 34 +- drivers/of/device.c | 42 +- drivers/of/dynamic.c | 37 +- drivers/of/of_private.h | 1 + drivers/of/of_reserved_mem.c | 22 +- drivers/of/of_test.c | 1 + drivers/of/overlay.c | 11 +- drivers/of/property.c | 81 +- drivers/of/resolver.c | 35 +- drivers/of/unittest.c | 14 +- drivers/opp/core.c | 6 +- drivers/opp/of.c | 17 +- drivers/opp/ti-opp-supply.c | 6 +- drivers/parisc/ccio-dma.c | 2 +- drivers/parisc/sba_iommu.c | 2 +- drivers/parport/parport_mfc3.c | 3 +- drivers/parport/procfs.c | 24 +- drivers/pci/access.c | 40 +- drivers/pci/controller/cadence/pcie-cadence-ep.c | 7 +- drivers/pci/controller/dwc/pci-dra7xx.c | 9 + drivers/pci/controller/dwc/pci-imx6.c | 10 + drivers/pci/controller/dwc/pci-keystone.c | 167 +- drivers/pci/controller/dwc/pci-layerscape-ep.c | 9 + drivers/pci/controller/dwc/pcie-artpec6.c | 15 +- drivers/pci/controller/dwc/pcie-designware-ep.c | 147 +- drivers/pci/controller/dwc/pcie-designware-plat.c | 11 + drivers/pci/controller/dwc/pcie-designware.h | 14 +- drivers/pci/controller/dwc/pcie-dw-rockchip.c | 2 +- drivers/pci/controller/dwc/pcie-keembay.c | 18 +- drivers/pci/controller/dwc/pcie-qcom-ep.c | 10 +- drivers/pci/controller/dwc/pcie-rcar-gen4.c | 28 +- drivers/pci/controller/dwc/pcie-tegra194.c | 6 +- drivers/pci/controller/dwc/pcie-uniphier-ep.c | 15 +- drivers/pci/controller/pci-hyperv.c | 4 +- drivers/pci/controller/pci-loongson.c | 13 + drivers/pci/controller/pcie-mt7621.c | 2 +- drivers/pci/controller/pcie-rcar-ep.c | 2 + drivers/pci/controller/pcie-rcar-host.c | 6 +- drivers/pci/controller/pcie-rockchip-ep.c | 4 +- drivers/pci/controller/pcie-rockchip.c | 2 +- drivers/pci/doe.c | 12 +- drivers/pci/endpoint/functions/pci-epf-test.c | 94 +- drivers/pci/endpoint/functions/pci-epf-vntb.c | 19 +- drivers/pci/endpoint/pci-ep-cfs.c | 9 + drivers/pci/endpoint/pci-epc-core.c | 22 + drivers/pci/endpoint/pci-epf-core.c | 9 +- drivers/pci/hotplug/TODO | 12 +- drivers/pci/msi/api.c | 58 +- drivers/pci/msi/irqdomain.c | 59 - drivers/pci/msi/msi.c | 15 +- drivers/pci/pci.c | 129 +- drivers/pci/pci.h | 2 - drivers/pci/pcie/Kconfig | 2 +- drivers/pci/pcie/aer_inject.c | 2 +- drivers/pci/pcie/aspm.c | 182 +- drivers/pci/pcie/err.c | 12 +- drivers/pci/pcie/portdrv.c | 8 +- drivers/pci/probe.c | 5 +- drivers/pci/quirks.c | 20 + drivers/pci/setup-bus.c | 6 +- drivers/peci/core.c | 4 +- drivers/peci/device.c | 2 +- drivers/peci/internal.h | 6 +- drivers/perf/alibaba_uncore_drw_pmu.c | 23 +- drivers/perf/amlogic/meson_ddr_pmu_core.c | 1 + drivers/perf/arm-cci.c | 13 +- drivers/perf/arm-ccn.c | 12 +- drivers/perf/arm-cmn.c | 11 +- drivers/perf/arm_cspmu/arm_cspmu.c | 19 +- drivers/perf/arm_cspmu/arm_cspmu.h | 7 +- drivers/perf/arm_dmc620_pmu.c | 1 + drivers/perf/arm_dsu_pmu.c | 31 +- drivers/perf/arm_pmu_platform.c | 1 + drivers/perf/arm_pmuv3.c | 10 +- drivers/perf/arm_smmuv3_pmu.c | 1 + drivers/perf/arm_spe_pmu.c | 1 + drivers/perf/cxl_pmu.c | 15 +- drivers/perf/dwc_pcie_pmu.c | 10 +- drivers/perf/fsl_imx8_ddr_perf.c | 1 + drivers/perf/hisilicon/hisi_pcie_pmu.c | 23 +- drivers/perf/hisilicon/hisi_uncore_pmu.c | 21 +- drivers/perf/hisilicon/hisi_uncore_pmu.h | 4 +- drivers/perf/hisilicon/hns3_pmu.c | 13 +- drivers/perf/qcom_l2_pmu.c | 9 +- drivers/perf/qcom_l3_pmu.c | 12 +- drivers/perf/riscv_pmu.c | 5 +- drivers/perf/riscv_pmu_legacy.c | 1 + drivers/perf/riscv_pmu_sbi.c | 358 +- drivers/perf/thunderx2_pmu.c | 30 +- drivers/perf/xgene_pmu.c | 12 +- drivers/phy/cadence/phy-cadence-torrent.c | 3 + drivers/phy/freescale/Kconfig | 6 + drivers/phy/freescale/Makefile | 1 + drivers/phy/freescale/phy-fsl-samsung-hdmi.c | 718 ++ drivers/phy/mediatek/Kconfig | 11 + drivers/phy/mediatek/Makefile | 1 + drivers/phy/mediatek/phy-mtk-xfi-tphy.c | 451 + drivers/phy/phy-core.c | 26 +- drivers/phy/qualcomm/phy-qcom-edp.c | 373 +- drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c | 17 + drivers/phy/qualcomm/phy-qcom-qmp-combo.c | 2 - drivers/phy/qualcomm/phy-qcom-qmp-pcie.c | 106 +- drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h | 4 + .../qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h | 6 + drivers/phy/qualcomm/phy-qcom-qmp-ufs.c | 144 +- drivers/phy/qualcomm/phy-qcom-qmp-usb.c | 47 + drivers/phy/rockchip/Kconfig | 14 + drivers/phy/rockchip/Makefile | 1 + drivers/phy/rockchip/phy-rockchip-naneng-combphy.c | 4 +- drivers/phy/rockchip/phy-rockchip-snps-pcie3.c | 49 +- drivers/phy/rockchip/phy-rockchip-usbdp.c | 1608 +++ drivers/phy/samsung/Makefile | 1 + drivers/phy/samsung/phy-exynos7-ufs.c | 1 + drivers/phy/samsung/phy-exynosautov9-ufs.c | 1 + drivers/phy/samsung/phy-fsd-ufs.c | 1 + drivers/phy/samsung/phy-gs101-ufs.c | 182 + drivers/phy/samsung/phy-samsung-ufs.c | 28 +- drivers/phy/samsung/phy-samsung-ufs.h | 6 + drivers/phy/xilinx/phy-zynqmp.c | 20 +- drivers/pinctrl/Kconfig | 15 +- drivers/pinctrl/Makefile | 1 + drivers/pinctrl/bcm/pinctrl-bcm2835.c | 83 +- drivers/pinctrl/core.c | 12 +- drivers/pinctrl/freescale/pinctrl-imx8ulp.c | 1 + drivers/pinctrl/freescale/pinctrl-mxs.c | 4 +- drivers/pinctrl/mediatek/pinctrl-mt6765.c | 1 + drivers/pinctrl/mediatek/pinctrl-mt6779.c | 1 + drivers/pinctrl/mvebu/pinctrl-armada-37xx.c | 3 - drivers/pinctrl/pinconf-generic.c | 2 +- drivers/pinctrl/pinctrl-aw9523.c | 131 +- drivers/pinctrl/pinctrl-cy8c95x0.c | 2 +- drivers/pinctrl/pinctrl-loongson2.c | 1 + drivers/pinctrl/pinctrl-max77620.c | 2 - drivers/pinctrl/pinctrl-rk805.c | 69 + drivers/pinctrl/pinctrl-rockchip.c | 17 +- drivers/pinctrl/pinctrl-scmi.c | 571 + drivers/pinctrl/pinctrl-single.c | 57 +- drivers/pinctrl/pinctrl-tps6594.c | 278 +- drivers/pinctrl/pinmux.c | 26 +- drivers/pinctrl/pxa/pinctrl-pxa2xx.c | 55 +- drivers/pinctrl/pxa/pinctrl-pxa2xx.h | 15 +- drivers/pinctrl/qcom/pinctrl-sm7150.c | 1 + drivers/pinctrl/qcom/pinctrl-spmi-gpio.c | 4 + drivers/pinctrl/realtek/pinctrl-rtd1315e.c | 1 + drivers/pinctrl/realtek/pinctrl-rtd1319d.c | 1 + drivers/pinctrl/renesas/pfc-r8a779g0.c | 712 +- drivers/pinctrl/renesas/pfc-r8a779h0.c | 112 + drivers/pinctrl/renesas/pinctrl-rzg2l.c | 2 +- drivers/pinctrl/samsung/pinctrl-exynos.c | 112 + drivers/pinctrl/samsung/pinctrl-samsung.c | 95 +- drivers/pinctrl/samsung/pinctrl-samsung.h | 2 + drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c | 1 - drivers/pinctrl/ti/pinctrl-ti-iodelay.c | 11 +- drivers/platform/Kconfig | 2 + drivers/platform/Makefile | 1 + drivers/platform/arm64/Kconfig | 35 + drivers/platform/arm64/Makefile | 8 + drivers/platform/arm64/acer-aspire1-ec.c | 562 + drivers/platform/chrome/Kconfig | 1 + drivers/platform/chrome/cros_ec_chardev.c | 9 +- drivers/platform/chrome/cros_ec_debugfs.c | 10 +- drivers/platform/chrome/cros_ec_lightbar.c | 9 +- drivers/platform/chrome/cros_ec_lpc.c | 81 +- drivers/platform/chrome/cros_ec_proto_test.c | 72 +- drivers/platform/chrome/cros_ec_sensorhub.c | 9 +- drivers/platform/chrome/cros_ec_sysfs.c | 9 +- drivers/platform/chrome/cros_ec_vbc.c | 9 +- drivers/platform/chrome/cros_hps_i2c.c | 4 +- drivers/platform/chrome/cros_kbd_led_backlight.c | 11 +- drivers/platform/chrome/wilco_ec/Kconfig | 1 + drivers/platform/chrome/wilco_ec/core.c | 9 +- drivers/platform/chrome/wilco_ec/debugfs.c | 9 +- drivers/platform/chrome/wilco_ec/event.c | 2 - drivers/platform/chrome/wilco_ec/sysfs.c | 2 +- drivers/platform/chrome/wilco_ec/telemetry.c | 9 +- drivers/platform/mips/cpu_hwmon.c | 3 + drivers/platform/surface/aggregator/core.c | 42 +- .../platform/surface/surface_aggregator_registry.c | 43 +- .../platform/surface/surface_platform_profile.c | 88 +- drivers/platform/x86/Kconfig | 60 + drivers/platform/x86/Makefile | 9 + drivers/platform/x86/amd/hsmp.c | 2 +- drivers/platform/x86/amd/pmc/Kconfig | 15 + drivers/platform/x86/amd/pmc/Makefile | 1 + drivers/platform/x86/amd/pmc/mp2_stb.c | 280 + drivers/platform/x86/amd/pmc/pmc.c | 5 + drivers/platform/x86/amd/pmc/pmc.h | 15 + drivers/platform/x86/amd/pmf/core.c | 1 + drivers/platform/x86/amilo-rfkill.c | 1 + drivers/platform/x86/asus-laptop.c | 45 +- drivers/platform/x86/asus-wmi.c | 483 +- drivers/platform/x86/classmate-laptop.c | 9 +- drivers/platform/x86/dell/Kconfig | 15 + drivers/platform/x86/dell/Makefile | 1 + drivers/platform/x86/dell/dell-rbtn.c | 1 - drivers/platform/x86/dell/dell-smbios-base.c | 11 +- drivers/platform/x86/dell/dell-uart-backlight.c | 398 + drivers/platform/x86/eeepc-laptop.c | 1 - drivers/platform/x86/firmware_attributes_class.c | 1 + drivers/platform/x86/fujitsu-laptop.c | 18 +- drivers/platform/x86/hp/hp-wmi.c | 12 +- drivers/platform/x86/huawei-wmi.c | 8 +- drivers/platform/x86/ibm_rtl.c | 1 + drivers/platform/x86/ideapad-laptop.c | 140 +- drivers/platform/x86/inspur_platform_profile.c | 1 + drivers/platform/x86/intel/hid.c | 1 + drivers/platform/x86/intel/ifs/load.c | 2 + drivers/platform/x86/intel/ifs/runtest.c | 27 +- drivers/platform/x86/intel/pmc/arl.c | 2 +- drivers/platform/x86/intel/pmc/core.c | 38 + drivers/platform/x86/intel/pmc/core.h | 9 + drivers/platform/x86/intel/pmc/lnl.c | 477 +- drivers/platform/x86/intel/pmc/pltdrv.c | 1 + drivers/platform/x86/intel/rst.c | 2 +- drivers/platform/x86/intel/sdsi.c | 118 +- drivers/platform/x86/intel/smartconnect.c | 2 +- .../x86/intel/speed_select_if/isst_if_common.c | 1 + .../x86/intel/speed_select_if/isst_tpmi_core.c | 383 +- drivers/platform/x86/intel/tpmi.c | 32 +- drivers/platform/x86/intel/vbtn.c | 4 +- drivers/platform/x86/intel_ips.c | 2 +- drivers/platform/x86/lenovo-wmi-camera.c | 127 + .../x86/lenovo-yoga-tab2-pro-1380-fastcharger.c | 338 + drivers/platform/x86/lg-laptop.c | 1 - drivers/platform/x86/meegopad_anx7428.c | 150 + drivers/platform/x86/msi-laptop.c | 20 +- drivers/platform/x86/msi-wmi-platform.c | 428 + drivers/platform/x86/p2sb.c | 15 +- drivers/platform/x86/quickstart.c | 246 + drivers/platform/x86/samsung-laptop.c | 10 +- .../x86/siemens/simatic-ipc-batt-apollolake.c | 1 + .../x86/siemens/simatic-ipc-batt-elkhartlake.c | 1 + .../platform/x86/siemens/simatic-ipc-batt-f7188x.c | 1 + drivers/platform/x86/siemens/simatic-ipc-batt.c | 1 + drivers/platform/x86/siemens/simatic-ipc.c | 1 + drivers/platform/x86/sony-laptop.c | 2 - drivers/platform/x86/think-lmi.c | 13 +- drivers/platform/x86/thinkpad_acpi.c | 882 +- drivers/platform/x86/toshiba_acpi.c | 10 +- drivers/platform/x86/toshiba_bluetooth.c | 1 - drivers/platform/x86/toshiba_haps.c | 1 - drivers/platform/x86/touchscreen_dmi.c | 136 +- drivers/platform/x86/uv_sysfs.c | 22 +- drivers/platform/x86/wireless-hotkey.c | 2 +- drivers/platform/x86/wmi.c | 62 - drivers/platform/x86/x86-android-tablets/Kconfig | 2 + drivers/platform/x86/x86-android-tablets/core.c | 8 +- drivers/platform/x86/x86-android-tablets/lenovo.c | 10 +- drivers/platform/x86/x86-android-tablets/other.c | 133 +- .../x86/x86-android-tablets/x86-android-tablets.h | 2 +- drivers/platform/x86/xiaomi-wmi.c | 12 +- drivers/platform/x86/xo1-rfkill.c | 1 + drivers/pmdomain/core.c | 11 +- drivers/pmdomain/imx/gpcv2.c | 11 + drivers/pmdomain/mediatek/mt8188-pm-domains.h | 14 +- drivers/pmdomain/mediatek/mtk-scpsys.c | 1 - drivers/pmdomain/renesas/Makefile | 4 +- drivers/pmdomain/renesas/r8a7796-sysc.c | 67 - drivers/pmdomain/renesas/r8a77960-sysc.c | 49 + drivers/pmdomain/renesas/r8a77961-sysc.c | 47 + drivers/pmdomain/renesas/rcar-sysc.c | 70 +- drivers/pmdomain/renesas/rcar-sysc.h | 9 +- drivers/pnp/driver.c | 6 + drivers/pnp/isapnp/Kconfig | 2 +- drivers/power/supply/ab8500_charger.c | 16 +- drivers/power/supply/bq27xxx_battery.c | 118 +- drivers/power/supply/cros_peripheral_charger.c | 11 +- drivers/power/supply/ingenic-battery.c | 10 +- drivers/power/supply/sbs-manager.c | 2 +- drivers/power/supply/test_power.c | 36 + drivers/powercap/dtpm_cpu.c | 8 +- drivers/powercap/intel_rapl_common.c | 607 +- drivers/powercap/intel_rapl_tpmi.c | 3 + drivers/pps/clients/pps_parport.c | 6 +- drivers/ptp/ptp_clockmatrix.c | 6 +- drivers/ptp/ptp_dte.c | 6 +- drivers/ptp/ptp_idt82p33.c | 6 +- drivers/ptp/ptp_ines.c | 5 +- drivers/ptp/ptp_qoriq.c | 5 +- drivers/ptp/ptp_vmw.c | 1 - drivers/pwm/Kconfig | 4 - drivers/pwm/Makefile | 1 - drivers/pwm/core.c | 604 +- drivers/pwm/pwm-atmel-tcb.c | 12 +- drivers/pwm/pwm-bcm2835.c | 30 +- drivers/pwm/pwm-meson.c | 198 +- drivers/pwm/pwm-pca9685.c | 4 +- drivers/pwm/pwm-sti.c | 126 +- drivers/pwm/pwm-stm32.c | 14 +- drivers/pwm/sysfs.c | 545 - drivers/rapidio/Kconfig | 17 +- drivers/regulator/Kconfig | 12 +- drivers/regulator/Makefile | 1 + drivers/regulator/axp20x-regulator.c | 99 +- drivers/regulator/core.c | 4 +- drivers/regulator/da9121-regulator.c | 4 +- drivers/regulator/da9211-regulator.c | 2 +- drivers/regulator/devres.c | 59 + drivers/regulator/isl9305.c | 2 +- drivers/regulator/max8973-regulator.c | 2 +- drivers/regulator/mt6311-regulator.c | 2 +- drivers/regulator/pca9450-regulator.c | 196 +- drivers/regulator/pf8x00-regulator.c | 2 +- drivers/regulator/pfuze100-regulator.c | 2 +- drivers/regulator/rk808-regulator.c | 218 +- drivers/regulator/rohm-regulator.c | 4 + drivers/regulator/rpi-panel-attiny-regulator.c | 2 +- drivers/regulator/rtmv20-regulator.c | 2 +- drivers/regulator/rtq2208-regulator.c | 122 +- drivers/regulator/rtq6752-regulator.c | 2 +- drivers/regulator/sun20i-regulator.c | 157 + drivers/regulator/tps51632-regulator.c | 2 +- drivers/regulator/tps62360-regulator.c | 2 +- drivers/regulator/tps6594-regulator.c | 332 +- drivers/remoteproc/imx_rproc.c | 10 +- drivers/remoteproc/mtk_common.h | 11 +- drivers/remoteproc/mtk_scp.c | 248 +- drivers/remoteproc/mtk_scp_ipi.c | 7 +- drivers/remoteproc/remoteproc_internal.h | 2 +- drivers/remoteproc/remoteproc_sysfs.c | 2 +- drivers/remoteproc/stm32_rproc.c | 2 +- drivers/remoteproc/ti_k3_r5_remoteproc.c | 13 +- drivers/remoteproc/xlnx_r5_remoteproc.c | 329 +- drivers/reset/Kconfig | 4 +- drivers/reset/hisilicon/hi6220_reset.c | 1 + drivers/reset/reset-mpfs.c | 95 +- drivers/rpmsg/qcom_glink_ssr.c | 1 + drivers/rpmsg/rpmsg_char.c | 2 +- drivers/rpmsg/rpmsg_core.c | 16 +- drivers/rpmsg/rpmsg_ctrl.c | 2 +- drivers/rpmsg/rpmsg_internal.h | 2 +- drivers/rpmsg/virtio_rpmsg_bus.c | 1 - drivers/rtc/Kconfig | 10 + drivers/rtc/Makefile | 1 + drivers/rtc/interface.c | 9 +- drivers/rtc/lib_test.c | 33 +- drivers/rtc/rtc-abx80x.c | 12 +- drivers/rtc/rtc-cmos.c | 10 +- drivers/rtc/rtc-cros-ec.c | 9 +- drivers/rtc/rtc-isl1208.c | 11 +- drivers/rtc/rtc-mcp795.c | 1 - drivers/rtc/rtc-nct3018y.c | 15 +- drivers/rtc/rtc-pcf8563.c | 9 +- drivers/rtc/rtc-rx6110.c | 4 +- drivers/rtc/rtc-rx8111.c | 368 + drivers/rtc/rtc-tps6594.c | 4 - drivers/s390/block/dasd_devmap.c | 10 +- drivers/s390/block/dasd_eckd.c | 6 +- drivers/s390/block/dasd_ioctl.c | 2 +- drivers/s390/char/Makefile | 2 +- drivers/s390/char/vmlogrdr.c | 20 +- drivers/s390/cio/airq.c | 1 - drivers/s390/cio/chp.c | 141 +- drivers/s390/cio/chp.h | 2 + drivers/s390/cio/chsc.c | 122 +- drivers/s390/cio/chsc.h | 5 + drivers/s390/cio/cio.c | 1 - drivers/s390/cio/css.c | 14 +- drivers/s390/cio/css.h | 13 +- drivers/s390/cio/idset.c | 12 +- drivers/s390/crypto/Makefile | 2 +- drivers/s390/crypto/ap_bus.c | 238 +- drivers/s390/crypto/ap_bus.h | 22 + drivers/s390/crypto/ap_queue.c | 4 +- drivers/s390/crypto/vfio_ap_ops.c | 224 +- drivers/s390/crypto/vfio_ap_private.h | 6 +- drivers/s390/crypto/zcrypt_api.c | 9 +- drivers/s390/crypto/zcrypt_ccamisc.c | 12 +- drivers/s390/crypto/zcrypt_ep11misc.c | 6 +- drivers/s390/net/ctcm_main.c | 2 +- drivers/s390/net/ism_drv.c | 2 +- drivers/s390/net/netiucv.c | 20 +- drivers/s390/net/smsgiucv_app.c | 21 +- drivers/scsi/FlashPoint.c | 1 - drivers/scsi/Kconfig | 4 +- drivers/scsi/a3000.c | 8 +- drivers/scsi/a4000t.c | 8 +- drivers/scsi/aha152x.c | 8 +- drivers/scsi/aic7xxx/Kconfig.aic79xx | 75 +- drivers/scsi/aic7xxx/Kconfig.aic7xxx | 97 +- drivers/scsi/aic7xxx/Makefile | 12 +- drivers/scsi/aic94xx/aic94xx_init.c | 29 +- drivers/scsi/arcmsr/arcmsr_hba.c | 2 +- drivers/scsi/atari_scsi.c | 8 +- drivers/scsi/bfa/bfad_attr.c | 28 +- drivers/scsi/bnx2fc/bnx2fc_tgt.c | 4 +- drivers/scsi/csiostor/csio_init.c | 3 - drivers/scsi/cxlflash/lunmgt.c | 6 +- drivers/scsi/cxlflash/main.c | 18 +- drivers/scsi/cxlflash/superpipe.c | 40 +- drivers/scsi/cxlflash/superpipe.h | 11 +- drivers/scsi/cxlflash/vlun.c | 9 +- drivers/scsi/hisi_sas/hisi_sas.h | 3 +- drivers/scsi/hisi_sas/hisi_sas_main.c | 7 +- drivers/scsi/hisi_sas/hisi_sas_v1_hw.c | 20 +- drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 26 +- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 31 +- drivers/scsi/hosts.c | 6 + drivers/scsi/hpsa.c | 2 +- drivers/scsi/hptiop.c | 8 +- drivers/scsi/ibmvscsi/ibmvfc.c | 5 +- drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | 11 +- drivers/scsi/imm.c | 12 +- drivers/scsi/ipr.c | 12 +- drivers/scsi/isci/init.c | 29 +- drivers/scsi/iscsi_tcp.c | 2 +- drivers/scsi/libsas/sas_ata.c | 84 + drivers/scsi/libsas/sas_expander.c | 35 +- drivers/scsi/libsas/sas_internal.h | 15 - drivers/scsi/libsas/sas_scsi_host.c | 7 +- drivers/scsi/lpfc/lpfc.h | 62 +- drivers/scsi/lpfc/lpfc_attr.c | 36 +- drivers/scsi/lpfc/lpfc_bsg.c | 3 +- drivers/scsi/lpfc/lpfc_ct.c | 24 +- drivers/scsi/lpfc/lpfc_els.c | 43 +- drivers/scsi/lpfc/lpfc_hbadisc.c | 135 +- drivers/scsi/lpfc/lpfc_hw4.h | 8 + drivers/scsi/lpfc/lpfc_init.c | 119 +- drivers/scsi/lpfc/lpfc_nportdisc.c | 63 +- drivers/scsi/lpfc/lpfc_nvme.c | 27 +- drivers/scsi/lpfc/lpfc_nvmet.c | 9 +- drivers/scsi/lpfc/lpfc_scsi.c | 71 +- drivers/scsi/lpfc/lpfc_scsi.h | 32 +- drivers/scsi/lpfc/lpfc_sli.c | 233 +- drivers/scsi/lpfc/lpfc_version.h | 2 +- drivers/scsi/mac_scsi.c | 8 +- drivers/scsi/megaraid/Kconfig.megaraid | 113 +- drivers/scsi/megaraid/megaraid_sas.h | 2 +- drivers/scsi/megaraid/megaraid_sas_base.c | 33 +- drivers/scsi/megaraid/megaraid_sas_fusion.c | 3 +- drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h | 3 + drivers/scsi/mpi3mr/mpi/mpi30_image.h | 20 +- drivers/scsi/mpi3mr/mpi/mpi30_ioc.h | 20 +- drivers/scsi/mpi3mr/mpi/mpi30_transport.h | 2 +- drivers/scsi/mpi3mr/mpi3mr.h | 15 +- drivers/scsi/mpi3mr/mpi3mr_app.c | 33 +- drivers/scsi/mpi3mr/mpi3mr_fw.c | 42 +- drivers/scsi/mpi3mr/mpi3mr_os.c | 86 +- drivers/scsi/mpi3mr/mpi3mr_transport.c | 16 +- drivers/scsi/mpt3sas/mpt3sas_base.c | 4 +- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 22 +- drivers/scsi/mpt3sas/mpt3sas_transport.c | 14 +- drivers/scsi/mvsas/mv_init.c | 36 +- drivers/scsi/pm8001/pm8001_ctl.c | 5 + drivers/scsi/pm8001/pm8001_init.c | 21 +- drivers/scsi/pm8001/pm8001_sas.h | 1 + drivers/scsi/pmcraid.c | 13 +- drivers/scsi/ppa.c | 8 +- drivers/scsi/qedf/qedf_main.c | 2 +- drivers/scsi/qla2xxx/Kconfig | 42 +- drivers/scsi/qla2xxx/qla_attr.c | 11 +- drivers/scsi/qla2xxx/qla_bsg.c | 98 +- drivers/scsi/qla2xxx/qla_def.h | 17 +- drivers/scsi/qla2xxx/qla_gbl.h | 6 +- drivers/scsi/qla2xxx/qla_gs.c | 467 +- drivers/scsi/qla2xxx/qla_init.c | 92 +- drivers/scsi/qla2xxx/qla_inline.h | 8 + drivers/scsi/qla2xxx/qla_mid.c | 2 +- drivers/scsi/qla2xxx/qla_mr.c | 6 +- drivers/scsi/qla2xxx/qla_nvme.c | 5 +- drivers/scsi/qla2xxx/qla_os.c | 28 +- drivers/scsi/qla2xxx/qla_sup.c | 108 +- drivers/scsi/qla4xxx/ql4_mbx.c | 17 +- drivers/scsi/qla4xxx/ql4_os.c | 14 +- drivers/scsi/scsi_debug.c | 6 +- drivers/scsi/scsi_debugfs.c | 56 +- drivers/scsi/scsi_devinfo.c | 18 +- drivers/scsi/scsi_lib.c | 41 +- drivers/scsi/scsi_scan.c | 74 +- drivers/scsi/scsi_sysfs.c | 5 +- drivers/scsi/scsi_transport_fc.c | 15 +- drivers/scsi/scsi_transport_iscsi.c | 7 +- drivers/scsi/scsi_transport_sas.c | 4 +- drivers/scsi/scsicam.c | 2 +- drivers/scsi/sd.c | 14 +- drivers/scsi/sd.h | 19 - drivers/scsi/sd_zbc.c | 335 +- drivers/scsi/ses.c | 1 - drivers/scsi/smartpqi/smartpqi_init.c | 16 +- drivers/scsi/snic/snic_attrs.c | 11 +- drivers/scsi/sr.c | 1 - drivers/scsi/sr_ioctl.c | 2 +- drivers/scsi/st.c | 1 - drivers/scsi/virtio_scsi.c | 1 - drivers/scsi/vmw_pvscsi.c | 2 +- drivers/scsi/wd33c93.c | 4 +- drivers/slimbus/qcom-ctrl.c | 6 +- drivers/slimbus/qcom-ngd-ctrl.c | 14 +- drivers/soc/Makefile | 2 +- drivers/soc/canaan/Kconfig | 4 +- drivers/soc/hisilicon/Kconfig | 2 +- drivers/soc/hisilicon/kunpeng_hccs.c | 6 + drivers/soc/litex/Kconfig | 2 +- drivers/soc/litex/litex_soc_ctrl.c | 4 +- drivers/soc/mediatek/mtk-cmdq-helper.c | 158 +- drivers/soc/mediatek/mtk-mutex.c | 42 + drivers/soc/mediatek/mtk-socinfo.c | 14 +- drivers/soc/pxa/ssp.c | 2 +- drivers/soc/qcom/cmd-db.c | 9 +- drivers/soc/qcom/icc-bwmon.c | 12 +- drivers/soc/qcom/pdr_interface.c | 8 +- drivers/soc/qcom/pmic_glink.c | 20 +- drivers/soc/qcom/pmic_pdcharger_ulog.c | 4 + drivers/soc/qcom/pmic_pdcharger_ulog.h | 2 +- drivers/soc/qcom/qcom_stats.c | 4 + drivers/soc/qcom/rpm_master_stats.c | 4 + drivers/soc/qcom/rpmh-rsc.c | 9 +- drivers/soc/qcom/rpmh.c | 1 - drivers/soc/qcom/socinfo.c | 3 + drivers/soc/qcom/trace-aoss.h | 4 +- drivers/soc/qcom/trace-rpmh.h | 4 +- drivers/soc/renesas/Kconfig | 6 + drivers/soc/renesas/renesas-soc.c | 20 +- drivers/soc/samsung/exynos-asv.c | 10 +- drivers/soc/tegra/fuse/fuse-tegra.c | 4 +- drivers/soc/tegra/pmc.c | 2 + drivers/soc/xilinx/xlnx_event_manager.c | 15 +- drivers/soc/xilinx/zynqmp_power.c | 4 +- drivers/soundwire/amd_init.c | 36 +- drivers/soundwire/amd_init.h | 8 + drivers/soundwire/amd_manager.c | 16 +- drivers/soundwire/bus.c | 14 +- drivers/soundwire/bus_type.c | 5 +- drivers/soundwire/cadence_master.c | 36 +- drivers/soundwire/intel.c | 68 +- drivers/soundwire/intel.h | 7 + drivers/soundwire/intel_ace2x.c | 117 +- drivers/soundwire/intel_auxdevice.c | 45 +- drivers/soundwire/intel_auxdevice.h | 1 + drivers/soundwire/intel_init.c | 14 + drivers/soundwire/mipi_disco.c | 30 +- drivers/soundwire/qcom.c | 28 +- drivers/soundwire/sysfs_local.h | 4 +- drivers/soundwire/sysfs_slave.c | 64 +- drivers/soundwire/sysfs_slave_dpn.c | 3 + drivers/spi/Kconfig | 22 +- drivers/spi/Makefile | 1 + drivers/spi/atmel-quadspi.c | 11 +- drivers/spi/spi-airoha-snfi.c | 1129 ++ drivers/spi/spi-altera-platform.c | 1 - drivers/spi/spi-amd.c | 112 + drivers/spi/spi-armada-3700.c | 8 +- drivers/spi/spi-atmel.c | 8 +- drivers/spi/spi-au1550.c | 29 +- drivers/spi/spi-bitbang.c | 23 +- drivers/spi/spi-cadence-quadspi.c | 109 +- drivers/spi/spi-cadence-xspi.c | 8 +- drivers/spi/spi-coldfire-qspi.c | 1 - drivers/spi/spi-cs42l43.c | 127 +- drivers/spi/spi-dw-core.c | 20 +- drivers/spi/spi-dw-mmio.c | 13 +- drivers/spi/spi-dw.h | 2 +- drivers/spi/spi-fsl-cpm.c | 14 +- drivers/spi/spi-fsl-cpm.h | 5 +- drivers/spi/spi-fsl-dspi.c | 1 - drivers/spi/spi-fsl-lpspi.c | 14 +- drivers/spi/spi-fsl-spi.c | 7 +- drivers/spi/spi-imx.c | 20 +- drivers/spi/spi-loopback-test.c | 1 - drivers/spi/spi-microchip-core.c | 139 +- drivers/spi/spi-mt65xx.c | 32 +- drivers/spi/spi-mt7621.c | 95 +- drivers/spi/spi-mux.c | 2 + drivers/spi/spi-oc-tiny.c | 2 - drivers/spi/spi-omap2-mcspi.c | 82 +- drivers/spi/spi-pic32-sqi.c | 6 +- drivers/spi/spi-pic32.c | 6 +- drivers/spi/spi-pxa2xx-dma.c | 38 +- drivers/spi/spi-pxa2xx-pci.c | 10 +- drivers/spi/spi-pxa2xx.c | 223 +- drivers/spi/spi-pxa2xx.h | 42 +- drivers/spi/spi-rspi.c | 12 +- drivers/spi/spi-s3c64xx.c | 6 +- drivers/spi/spi-sun4i.c | 9 +- drivers/spi/spi-sun6i.c | 17 +- drivers/spi/spi-xlp.c | 8 +- drivers/spi/spi.c | 153 +- drivers/spi/spidev.c | 1 + drivers/spmi/spmi-pmic-arb.c | 964 +- drivers/spmi/spmi.c | 2 +- drivers/ssb/main.c | 2 +- drivers/staging/Kconfig | 4 - drivers/staging/Makefile | 2 - drivers/staging/axis-fifo/axis-fifo.c | 10 +- drivers/staging/fbtft/fb_seps525.c | 7 +- drivers/staging/fbtft/fb_ssd1351.c | 4 +- drivers/staging/fbtft/fbtft-core.c | 5 +- drivers/staging/greybus/audio_manager_module.c | 2 +- drivers/staging/greybus/camera.c | 58 +- drivers/staging/greybus/fw-management.c | 12 +- drivers/staging/greybus/loopback.c | 1 + drivers/staging/iio/impedance-analyzer/ad5933.c | 26 +- drivers/staging/ks7010/ks7010_sdio.c | 2 +- drivers/staging/media/atomisp/Makefile | 1 - drivers/staging/media/atomisp/i2c/Kconfig | 15 - drivers/staging/media/atomisp/i2c/Makefile | 5 - drivers/staging/media/atomisp/i2c/atomisp-gc2235.c | 2 +- drivers/staging/media/atomisp/i2c/atomisp-lm3554.c | 955 -- .../staging/media/atomisp/i2c/atomisp-mt9m114.c | 4 +- drivers/staging/media/atomisp/i2c/atomisp-ov2722.c | 14 +- drivers/staging/media/atomisp/i2c/ov2722.h | 1 - .../staging/media/atomisp/include/linux/atomisp.h | 81 - .../atomisp/include/linux/atomisp_gmin_platform.h | 6 +- .../media/atomisp/include/linux/atomisp_platform.h | 41 +- .../staging/media/atomisp/include/media/lm3554.h | 132 - drivers/staging/media/atomisp/pci/atomisp_cmd.c | 281 +- drivers/staging/media/atomisp/pci/atomisp_cmd.h | 13 +- .../media/atomisp/pci/atomisp_compat_css20.c | 31 +- drivers/staging/media/atomisp/pci/atomisp_csi2.c | 26 +- drivers/staging/media/atomisp/pci/atomisp_fops.c | 49 +- .../media/atomisp/pci/atomisp_gmin_platform.c | 123 +- .../staging/media/atomisp/pci/atomisp_internal.h | 19 +- drivers/staging/media/atomisp/pci/atomisp_ioctl.c | 493 +- drivers/staging/media/atomisp/pci/atomisp_subdev.c | 102 +- drivers/staging/media/atomisp/pci/atomisp_subdev.h | 17 +- drivers/staging/media/atomisp/pci/atomisp_tpg.c | 164 - drivers/staging/media/atomisp/pci/atomisp_tpg.h | 39 - drivers/staging/media/atomisp/pci/atomisp_v4l2.c | 183 +- drivers/staging/media/atomisp/pci/bits.h | 4 +- drivers/staging/media/atomisp/pci/defs.h | 37 - .../pci/hive_isp_css_common/host/dma_local.h | 1 - .../pci/hive_isp_css_common/host/input_system.c | 38 - drivers/staging/media/atomisp/pci/hive_types.h | 19 - drivers/staging/media/atomisp/pci/hmm/hmm.c | 2 +- drivers/staging/media/atomisp/pci/ia_css.h | 1 - .../media/atomisp/pci/ia_css_frame_public.h | 8 - .../media/atomisp/pci/ia_css_stream_public.h | 17 - drivers/staging/media/atomisp/pci/ia_css_tpg.h | 79 - .../atomisp/pci/isp2400_input_system_global.h | 1 - .../atomisp/pci/isp2400_input_system_public.h | 15 - .../atomisp/pci/isp2401_input_system_global.h | 1 - .../atomisp/pci/runtime/debug/src/ia_css_debug.c | 20 +- .../media/atomisp/pci/runtime/ifmtr/src/ifmtr.c | 11 - .../atomisp/pci/runtime/isys/src/virtual_isys.c | 28 +- drivers/staging/media/atomisp/pci/sh_css.c | 137 +- .../staging/media/atomisp/pci/sh_css_internal.h | 1 - drivers/staging/media/atomisp/pci/sh_css_mipi.c | 2 +- drivers/staging/media/atomisp/pci/sh_css_sp.c | 127 +- drivers/staging/media/atomisp/pci/sh_css_sp.h | 7 - drivers/staging/media/atomisp/pci/system_global.h | 12 - drivers/staging/media/imx/Kconfig | 1 - drivers/staging/media/ipu3/ipu3-css-fw.c | 4 +- drivers/staging/media/ipu3/ipu3-css-fw.h | 2 + drivers/staging/media/ipu3/ipu3-css.c | 1 - drivers/staging/media/ipu3/ipu3.c | 10 +- drivers/staging/media/max96712/max96712.c | 2 +- drivers/staging/media/starfive/camss/stf-camss.c | 6 +- drivers/staging/media/starfive/camss/stf-isp.c | 10 +- drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c | 3 +- drivers/staging/media/tegra-video/tegra20.c | 10 +- drivers/staging/media/tegra-video/vi.c | 12 +- drivers/staging/nvec/TODO | 1 - drivers/staging/nvec/nvec.c | 11 +- drivers/staging/nvec/nvec_kbd.c | 9 +- drivers/staging/nvec/nvec_ps2.c | 31 +- .../Documentation/devicetree/pi433-overlay.dtso | 48 - .../pi433/Documentation/devicetree/pi433.txt | 62 - drivers/staging/pi433/Documentation/pi433.txt | 274 - drivers/staging/pi433/Kconfig | 17 - drivers/staging/pi433/Makefile | 4 - drivers/staging/pi433/TODO | 8 - drivers/staging/pi433/pi433_if.c | 1438 --- drivers/staging/pi433/pi433_if.h | 148 - drivers/staging/pi433/rf69.c | 832 -- drivers/staging/pi433/rf69.h | 66 - drivers/staging/pi433/rf69_enum.h | 126 - drivers/staging/pi433/rf69_registers.h | 478 - drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c | 11 +- drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c | 16 +- drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h | 4 +- drivers/staging/rtl8192e/rtl8192e/rtl_core.c | 2 +- drivers/staging/rtl8192e/rtl8192e/rtl_dm.c | 10 +- drivers/staging/rtl8192e/rtl819x_HT.h | 36 +- drivers/staging/rtl8192e/rtl819x_HTProc.c | 108 +- drivers/staging/rtl8192e/rtllib.h | 20 +- drivers/staging/rtl8192e/rtllib_rx.c | 2 +- drivers/staging/rtl8192e/rtllib_softmac_wx.c | 2 +- drivers/staging/rtl8192e/rtllib_tx.c | 2 +- drivers/staging/rtl8192e/rtllib_wx.c | 8 +- drivers/staging/rtl8712/mlme_linux.c | 18 +- drivers/staging/rtl8712/os_intfs.c | 3 +- drivers/staging/rtl8712/rtl8712_led.c | 2 +- drivers/staging/rtl8712/rtl8712_recv.c | 6 +- drivers/staging/rtl8712/rtl8712_recv.h | 2 +- drivers/staging/rtl8723bs/Makefile | 2 +- drivers/staging/rtl8723bs/core/rtw_mlme.c | 92 +- drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c | 5 +- drivers/staging/rtl8723bs/os_dep/os_intfs.c | 2 +- drivers/staging/rts5208/rtsx.c | 24 +- drivers/staging/rts5208/rtsx_scsi.c | 10 +- drivers/staging/vc04_services/Kconfig | 36 +- drivers/staging/vc04_services/Makefile | 1 - .../staging/vc04_services/bcm2835-audio/Kconfig | 2 +- .../vc04_services/bcm2835-audio/bcm2835-vchiq.c | 5 +- .../vc04_services/bcm2835-camera/bcm2835-camera.c | 4 +- .../include/linux/raspberrypi/vchiq.h | 4 +- drivers/staging/vc04_services/interface/TODO | 15 - .../vc04_services/interface/vchiq_arm/vchiq_arm.c | 271 +- .../vc04_services/interface/vchiq_arm/vchiq_arm.h | 41 +- .../vc04_services/interface/vchiq_arm/vchiq_bus.c | 13 + .../vc04_services/interface/vchiq_arm/vchiq_bus.h | 3 + .../interface/vchiq_arm/vchiq_connected.c | 74 - .../interface/vchiq_arm/vchiq_connected.h | 12 - .../vc04_services/interface/vchiq_arm/vchiq_core.c | 67 +- .../vc04_services/interface/vchiq_arm/vchiq_core.h | 22 +- .../interface/vchiq_arm/vchiq_debugfs.c | 11 +- .../interface/vchiq_arm/vchiq_debugfs.h | 2 +- .../vc04_services/interface/vchiq_arm/vchiq_dev.c | 40 +- .../staging/vc04_services/vchiq-mmal/mmal-vchiq.c | 14 +- .../staging/vc04_services/vchiq-mmal/mmal-vchiq.h | 32 +- drivers/staging/vt6655/rf.h | 4 - drivers/staging/vt6655/srom.c | 1 - drivers/staging/wlan-ng/Kconfig | 13 - drivers/staging/wlan-ng/Makefile | 8 - drivers/staging/wlan-ng/README | 8 - drivers/staging/wlan-ng/TODO | 16 - drivers/staging/wlan-ng/cfg80211.c | 718 -- drivers/staging/wlan-ng/hfa384x.h | 1236 -- drivers/staging/wlan-ng/hfa384x_usb.c | 3880 ------ drivers/staging/wlan-ng/p80211conv.c | 643 - drivers/staging/wlan-ng/p80211conv.h | 141 - drivers/staging/wlan-ng/p80211hdr.h | 189 - drivers/staging/wlan-ng/p80211ioctl.h | 69 - drivers/staging/wlan-ng/p80211metadef.h | 227 - drivers/staging/wlan-ng/p80211metastruct.h | 236 - drivers/staging/wlan-ng/p80211mgmt.h | 199 - drivers/staging/wlan-ng/p80211msg.h | 39 - drivers/staging/wlan-ng/p80211netdev.c | 988 -- drivers/staging/wlan-ng/p80211netdev.h | 212 - drivers/staging/wlan-ng/p80211req.c | 223 - drivers/staging/wlan-ng/p80211req.h | 33 - drivers/staging/wlan-ng/p80211types.h | 292 - drivers/staging/wlan-ng/p80211wep.c | 207 - drivers/staging/wlan-ng/prism2fw.c | 1213 -- drivers/staging/wlan-ng/prism2mgmt.c | 1315 -- drivers/staging/wlan-ng/prism2mgmt.h | 89 - drivers/staging/wlan-ng/prism2mib.c | 742 -- drivers/staging/wlan-ng/prism2sta.c | 1945 --- drivers/staging/wlan-ng/prism2usb.c | 299 - drivers/target/target_core_device.c | 1 - drivers/target/target_core_file.c | 4 +- drivers/tee/Kconfig | 1 + drivers/tee/Makefile | 1 + drivers/tee/amdtee/amdtee_private.h | 2 +- drivers/tee/amdtee/call.c | 2 +- drivers/tee/amdtee/core.c | 3 +- drivers/tee/amdtee/shm_pool.c | 2 +- drivers/tee/optee/call.c | 2 +- drivers/tee/optee/core.c | 66 +- drivers/tee/optee/device.c | 2 +- drivers/tee/optee/ffa_abi.c | 8 +- drivers/tee/optee/notif.c | 2 +- drivers/tee/optee/optee_private.h | 14 +- drivers/tee/optee/rpc.c | 2 +- drivers/tee/optee/smc_abi.c | 17 +- drivers/tee/tee_core.c | 2 +- drivers/tee/tee_private.h | 35 - drivers/tee/tee_shm.c | 67 +- drivers/tee/tee_shm_pool.c | 2 +- drivers/tee/tstee/Kconfig | 11 + drivers/tee/tstee/Makefile | 3 + drivers/tee/tstee/core.c | 480 + drivers/tee/tstee/tstee_private.h | 92 + drivers/thermal/amlogic_thermal.c | 10 + drivers/thermal/armada_thermal.c | 9 +- drivers/thermal/broadcom/bcm2835_thermal.c | 19 +- drivers/thermal/cpufreq_cooling.c | 3 - drivers/thermal/gov_bang_bang.c | 97 +- drivers/thermal/gov_fair_share.c | 82 +- drivers/thermal/gov_power_allocator.c | 46 +- drivers/thermal/gov_step_wise.c | 102 +- drivers/thermal/gov_user_space.c | 10 +- .../intel/int340x_thermal/acpi_thermal_rel.c | 4 +- .../intel/int340x_thermal/int3400_thermal.c | 10 +- .../intel/int340x_thermal/int3403_thermal.c | 1 + .../intel/int340x_thermal/processor_thermal_mbox.c | 1 + .../processor_thermal_power_floor.c | 1 + .../intel/int340x_thermal/processor_thermal_rapl.c | 1 + .../intel/int340x_thermal/processor_thermal_rfim.c | 1 + .../int340x_thermal/processor_thermal_wt_hint.c | 1 + .../int340x_thermal/processor_thermal_wt_req.c | 1 + drivers/thermal/intel/intel_hfi.c | 113 +- drivers/thermal/intel/intel_soc_dts_iosf.c | 1 + drivers/thermal/k3_bandgap.c | 1 - drivers/thermal/loongson2_thermal.c | 117 +- drivers/thermal/mediatek/lvts_thermal.c | 431 +- drivers/thermal/qcom/qcom-spmi-temp-alarm.c | 1 - drivers/thermal/qcom/tsens-v2.c | 1 + drivers/thermal/qcom/tsens.c | 31 + drivers/thermal/qcom/tsens.h | 5 + drivers/thermal/rcar_gen3_thermal.c | 165 +- drivers/thermal/thermal_core.c | 338 +- drivers/thermal/thermal_core.h | 140 +- drivers/thermal/thermal_debugfs.c | 96 +- drivers/thermal/thermal_debugfs.h | 4 +- drivers/thermal/thermal_helpers.c | 10 +- drivers/thermal/thermal_netlink.c | 68 +- drivers/thermal/thermal_netlink.h | 26 + drivers/thermal/thermal_sysfs.c | 20 +- drivers/thermal/thermal_trace.h | 12 +- drivers/thermal/thermal_trace_ipa.h | 2 + drivers/thermal/thermal_trip.c | 38 +- drivers/thunderbolt/debugfs.c | 2 +- drivers/thunderbolt/icm.c | 1 + drivers/thunderbolt/retimer.c | 12 +- drivers/thunderbolt/tb.c | 9 +- drivers/thunderbolt/tb_msgs.h | 6 - drivers/thunderbolt/trace.h | 13 +- drivers/thunderbolt/tunnel.c | 39 +- drivers/thunderbolt/usb4.c | 22 +- drivers/thunderbolt/xdomain.c | 2 +- drivers/tty/amiserial.c | 8 +- drivers/tty/hvc/hvc_iucv.c | 15 +- drivers/tty/hvc/hvc_xen.c | 2 +- drivers/tty/n_gsm.c | 2 +- drivers/tty/serial/8250/8250.h | 3 - drivers/tty/serial/8250/8250_alpha.c | 21 - drivers/tty/serial/8250/8250_bcm7271.c | 14 +- drivers/tty/serial/8250/8250_core.c | 7 +- drivers/tty/serial/8250/8250_dma.c | 31 +- drivers/tty/serial/8250/8250_dw.c | 45 +- drivers/tty/serial/8250/8250_exar.c | 1009 +- drivers/tty/serial/8250/8250_mtk.c | 2 +- drivers/tty/serial/8250/8250_of.c | 37 + drivers/tty/serial/8250/8250_omap.c | 49 +- drivers/tty/serial/8250/8250_pci.c | 2 +- drivers/tty/serial/8250/8250_pci1xxxx.c | 50 +- drivers/tty/serial/8250/8250_pnp.c | 65 +- drivers/tty/serial/8250/8250_port.c | 29 +- drivers/tty/serial/8250/Makefile | 2 - drivers/tty/serial/Kconfig | 55 +- drivers/tty/serial/Makefile | 4 +- drivers/tty/serial/amba-pl011.c | 62 +- drivers/tty/serial/ar933x_uart.c | 18 +- drivers/tty/serial/arc_uart.c | 8 +- drivers/tty/serial/atmel_serial.c | 150 +- drivers/tty/serial/clps711x.c | 12 +- drivers/tty/serial/cpm_uart.c | 20 +- drivers/tty/serial/digicolor-usart.c | 12 +- drivers/tty/serial/dz.c | 13 +- drivers/tty/serial/fsl_linflexuart.c | 17 +- drivers/tty/serial/fsl_lpuart.c | 45 +- drivers/tty/serial/icom.c | 25 +- drivers/tty/serial/imx.c | 54 +- drivers/tty/serial/ip22zilog.c | 26 +- drivers/tty/serial/jsm/jsm_cls.c | 29 +- drivers/tty/serial/jsm/jsm_neo.c | 38 +- drivers/tty/serial/max3100.c | 320 +- drivers/tty/serial/max310x.c | 39 +- drivers/tty/serial/men_z135_uart.c | 26 +- drivers/tty/serial/meson_uart.c | 12 +- drivers/tty/serial/milbeaut_usio.c | 15 +- drivers/tty/serial/msm_serial.c | 122 +- drivers/tty/serial/mvebu-uart.c | 8 +- drivers/tty/serial/mxs-auart.c | 23 +- drivers/tty/serial/omap-serial.c | 1 - drivers/tty/serial/pch_uart.c | 21 +- drivers/tty/serial/pic32_uart.c | 17 +- drivers/tty/serial/pmac_zilog.c | 33 +- drivers/tty/serial/qcom_geni_serial.c | 83 +- drivers/tty/serial/rda-uart.c | 17 +- drivers/tty/serial/samsung_tty.c | 54 +- drivers/tty/serial/sb1250-duart.c | 13 +- drivers/tty/serial/sc16is7xx.c | 301 +- drivers/tty/serial/sc16is7xx.h | 41 + drivers/tty/serial/sc16is7xx_i2c.c | 67 + drivers/tty/serial/sc16is7xx_spi.c | 90 + drivers/tty/serial/sccnxp.c | 16 +- drivers/tty/serial/serial-tegra.c | 43 +- drivers/tty/serial/serial_core.c | 150 +- drivers/tty/serial/serial_port.c | 9 +- drivers/tty/serial/sh-sci.c | 63 +- drivers/tty/serial/sifive.c | 4 +- drivers/tty/serial/sprd_serial.c | 20 +- drivers/tty/serial/st-asc.c | 4 +- drivers/tty/serial/stm32-usart.c | 52 +- drivers/tty/serial/sunhv.c | 35 +- drivers/tty/serial/sunplus-uart.c | 16 +- drivers/tty/serial/sunsab.c | 30 +- drivers/tty/serial/sunsu.c | 15 +- drivers/tty/serial/sunzilog.c | 27 +- drivers/tty/serial/tegra-tcu.c | 10 +- drivers/tty/serial/timbuart.c | 17 +- drivers/tty/serial/uartlite.c | 13 +- drivers/tty/serial/ucc_uart.c | 20 +- drivers/tty/serial/xilinx_uartps.c | 35 +- drivers/tty/serial/zs.c | 13 +- drivers/tty/sysrq.c | 13 +- drivers/tty/vt/conmakehash.c | 15 +- drivers/tty/vt/vc_screen.c | 2 +- drivers/ufs/core/ufs-mcq.c | 10 +- drivers/ufs/core/ufs_bsg.c | 3 +- drivers/ufs/core/ufshcd.c | 364 +- drivers/ufs/host/ufs-exynos.c | 205 +- drivers/ufs/host/ufs-exynos.h | 24 +- drivers/ufs/host/ufs-mediatek-sip.h | 94 + drivers/ufs/host/ufs-mediatek.c | 131 +- drivers/ufs/host/ufs-mediatek.h | 90 +- drivers/ufs/host/ufs-qcom.c | 18 +- drivers/uio/Kconfig | 18 - drivers/uio/Makefile | 1 - drivers/uio/uio.c | 24 +- drivers/uio/uio_fsl_elbc_gpcm.c | 6 +- drivers/uio/uio_hv_generic.c | 19 +- drivers/uio/uio_pdrv_genirq.c | 10 +- drivers/uio/uio_pruss.c | 255 - drivers/usb/cdns3/cdns3-trace.h | 26 +- drivers/usb/cdns3/cdnsp-trace.h | 10 +- drivers/usb/chipidea/ci_hdrc_imx.c | 2 +- drivers/usb/chipidea/ci_hdrc_npcm.c | 6 +- drivers/usb/chipidea/trace.h | 4 +- drivers/usb/core/Makefile | 4 +- drivers/usb/core/config.c | 8 +- drivers/usb/core/hcd-pci.c | 3 +- drivers/usb/core/hcd.c | 4 +- drivers/usb/core/hub.c | 17 +- drivers/usb/core/hub.h | 2 +- drivers/usb/dwc2/core.c | 42 + drivers/usb/dwc2/core.h | 8 + drivers/usb/dwc2/core_intr.c | 26 +- drivers/usb/dwc2/debugfs.c | 1 + drivers/usb/dwc2/gadget.c | 28 +- drivers/usb/dwc2/hcd.c | 10 + drivers/usb/dwc2/hcd_queue.c | 52 +- drivers/usb/dwc2/hw.h | 14 + drivers/usb/dwc2/params.c | 43 + drivers/usb/dwc3/core.c | 320 +- drivers/usb/dwc3/core.h | 20 +- drivers/usb/dwc3/drd.c | 15 +- drivers/usb/dwc3/dwc3-exynos.c | 22 +- drivers/usb/dwc3/dwc3-qcom.c | 255 +- drivers/usb/dwc3/trace.h | 8 +- drivers/usb/fotg210/Makefile | 10 +- drivers/usb/gadget/function/f_fs.c | 20 +- drivers/usb/gadget/function/u_audio.c | 11 +- drivers/usb/gadget/function/u_ether.c | 2 +- drivers/usb/gadget/function/uvc_v4l2.c | 24 +- drivers/usb/gadget/udc/cdns2/cdns2-trace.h | 22 +- drivers/usb/gadget/udc/core.c | 9 + drivers/usb/gadget/udc/dummy_hcd.c | 37 +- drivers/usb/gadget/udc/mv_u3d_core.c | 4 +- drivers/usb/gadget/udc/omap_udc.c | 10 +- drivers/usb/gadget/udc/trace.h | 4 +- drivers/usb/host/ehci-dbg.c | 10 +- drivers/usb/host/ehci-exynos.c | 27 +- drivers/usb/host/ehci-q.c | 20 +- drivers/usb/host/ehci.h | 8 +- drivers/usb/host/ohci-exynos.c | 27 +- drivers/usb/host/xhci-dbgcap.c | 2 +- drivers/usb/host/xhci-mem.c | 26 +- drivers/usb/host/xhci-pci.c | 38 +- drivers/usb/host/xhci-ring.c | 118 +- drivers/usb/host/xhci.c | 38 +- drivers/usb/host/xhci.h | 18 +- drivers/usb/image/microtek.c | 8 +- drivers/usb/misc/Kconfig | 16 +- drivers/usb/misc/Makefile | 2 +- drivers/usb/misc/onboard_usb_dev.c | 550 + drivers/usb/misc/onboard_usb_dev.h | 122 + drivers/usb/misc/onboard_usb_dev_pdevs.c | 144 + drivers/usb/misc/onboard_usb_hub.c | 507 - drivers/usb/misc/onboard_usb_hub.h | 90 - drivers/usb/misc/onboard_usb_hub_pdevs.c | 143 - drivers/usb/misc/uss720.c | 20 +- drivers/usb/mtu3/mtu3_trace.h | 8 +- drivers/usb/musb/musb_gadget.c | 9 - drivers/usb/musb/musb_trace.h | 12 +- drivers/usb/phy/phy-fsl-usb.c | 1 - drivers/usb/phy/phy-generic.c | 1 + drivers/usb/renesas_usbhs/common.c | 41 +- drivers/usb/renesas_usbhs/rza.h | 1 + drivers/usb/renesas_usbhs/rza2.c | 13 + drivers/usb/storage/scsiglue.c | 57 +- drivers/usb/storage/uas.c | 29 +- drivers/usb/storage/usb.c | 10 + drivers/usb/typec/altmodes/displayport.c | 1 - drivers/usb/typec/altmodes/nvidia.c | 1 - drivers/usb/typec/mux/Kconfig | 2 +- drivers/usb/typec/mux/gpio-sbu-mux.c | 8 +- drivers/usb/typec/mux/nb7vpq904m.c | 7 +- drivers/usb/typec/mux/ptn36502.c | 55 +- drivers/usb/typec/stusb160x.c | 2 +- drivers/usb/typec/tipd/core.c | 5 +- drivers/usb/typec/ucsi/ucsi.c | 162 +- drivers/usb/typec/ucsi/ucsi.h | 8 +- drivers/usb/typec/ucsi/ucsi_acpi.c | 87 +- drivers/usb/typec/ucsi/ucsi_glink.c | 78 +- drivers/usb/typec/ucsi/ucsi_stm32g0.c | 1 + drivers/vdpa/vdpa.c | 2 +- drivers/vdpa/vdpa_user/vduse_dev.c | 10 + drivers/vdpa/virtio_pci/vp_vdpa.c | 27 +- drivers/vfio/cdx/Makefile | 2 +- drivers/vfio/cdx/intr.c | 217 + drivers/vfio/cdx/main.c | 63 +- drivers/vfio/cdx/private.h | 18 + drivers/vfio/pci/Kconfig | 2 + drivers/vfio/pci/Makefile | 2 + drivers/vfio/pci/pds/dirty.c | 1 + drivers/vfio/pci/qat/Kconfig | 12 + drivers/vfio/pci/qat/Makefile | 3 + drivers/vfio/pci/qat/main.c | 702 ++ drivers/vfio/pci/vfio_pci_core.c | 3 + drivers/vfio/pci/vfio_pci_intrs.c | 57 +- drivers/vfio/platform/vfio_amba.c | 1 - drivers/vfio/vfio_iommu_type1.c | 4 +- drivers/vhost/net.c | 8 +- drivers/vhost/scsi.c | 53 +- drivers/vhost/vdpa.c | 6 +- drivers/vhost/vhost.c | 12 - drivers/vhost/vhost.h | 1 - drivers/vhost/vsock.c | 4 +- drivers/video/backlight/aat2870_bl.c | 7 - drivers/video/backlight/ams369fg06.c | 2 +- drivers/video/backlight/backlight.c | 39 +- drivers/video/backlight/bd6107.c | 12 +- drivers/video/backlight/corgi_lcd.c | 2 +- drivers/video/backlight/gpio_backlight.c | 12 +- drivers/video/backlight/hx8357.c | 2 +- drivers/video/backlight/ili922x.c | 2 +- drivers/video/backlight/ili9320.c | 2 +- drivers/video/backlight/jornada720_lcd.c | 2 +- drivers/video/backlight/l4f00242t03.c | 2 +- drivers/video/backlight/lcd.c | 27 +- drivers/video/backlight/lms283gf05.c | 2 +- drivers/video/backlight/lms501kf03.c | 2 +- drivers/video/backlight/lp8788_bl.c | 151 +- drivers/video/backlight/ltv350qv.c | 2 +- drivers/video/backlight/lv5207lp.c | 12 +- drivers/video/backlight/mp3309c.c | 6 - drivers/video/backlight/omap1_bl.c | 47 +- drivers/video/backlight/otm3225a.c | 3 +- drivers/video/backlight/platform_lcd.c | 2 +- drivers/video/backlight/pwm_bl.c | 12 - drivers/video/backlight/sky81452-backlight.c | 8 +- drivers/video/backlight/tdo24m.c | 2 +- drivers/video/fbdev/Kconfig | 22 +- drivers/video/fbdev/atmel_lcdfb.c | 1 - drivers/video/fbdev/au1200fb.c | 2 +- drivers/video/fbdev/clps711x-fb.c | 2 +- drivers/video/fbdev/core/fb_backlight.c | 6 + drivers/video/fbdev/core/fbcon.c | 2 +- drivers/video/fbdev/fsl-diu-fb.c | 2 +- drivers/video/fbdev/imxfb.c | 2 +- drivers/video/fbdev/offb.c | 3 +- drivers/video/fbdev/omap/lcd_ams_delta.c | 2 +- .../fbdev/omap2/omapfb/displays/panel-dsi-cm.c | 7 +- .../omap2/omapfb/displays/panel-sony-acx565akm.c | 10 +- drivers/video/fbdev/omap2/omapfb/dss/dsi.c | 3 +- drivers/video/fbdev/omap2/omapfb/dss/dss-of.c | 20 +- drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c | 3 +- drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c | 3 +- drivers/video/fbdev/omap2/omapfb/dss/venc.c | 3 +- drivers/video/fbdev/pxafb.c | 2 +- drivers/video/fbdev/sh_mobile_lcdcfb.c | 7 - drivers/video/fbdev/ssd1307fb.c | 31 +- drivers/video/fbdev/uvesafb.c | 2 +- drivers/video/fbdev/vesafb.c | 2 +- drivers/video/hdmi.c | 10 +- drivers/video/logo/pnmtologo.c | 2 - drivers/virt/Kconfig | 1 - drivers/virt/acrn/ioreq.c | 2 +- drivers/virt/acrn/mm.c | 4 +- drivers/virt/coco/sev-guest/sev-guest.c | 28 +- drivers/virt/vmgenid.c | 150 +- drivers/virtio/Kconfig | 10 + drivers/virtio/Makefile | 1 + drivers/virtio/virtio.c | 8 + drivers/virtio/virtio_balloon.c | 76 +- drivers/virtio/virtio_debug.c | 114 + drivers/virtio/virtio_input.c | 1 - drivers/virtio/virtio_mem.c | 70 +- drivers/virtio/virtio_mmio.c | 6 +- drivers/virtio/virtio_ring.c | 7 +- drivers/w1/masters/w1-gpio.c | 62 +- drivers/watchdog/Kconfig | 70 +- drivers/watchdog/Makefile | 1 + drivers/watchdog/lenovo_se10_wdt.c | 308 + drivers/watchdog/menz69_wdt.c | 1 + drivers/watchdog/mtx-1_wdt.c | 1 - drivers/watchdog/octeon-wdt-main.c | 6 +- drivers/watchdog/omap_wdt.c | 1 + drivers/watchdog/rzg2l_wdt.c | 22 +- drivers/watchdog/simatic-ipc-wdt.c | 1 + drivers/watchdog/ts4800_wdt.c | 1 + drivers/watchdog/twl4030_wdt.c | 1 + drivers/xen/grant-dma-ops.c | 2 +- drivers/xen/pvcalls-back.c | 6 +- drivers/xen/swiotlb-xen.c | 4 +- drivers/xen/xenbus/Makefile | 14 +- drivers/zorro/zorro.c | 14 +- 5056 files changed, 278239 insertions(+), 175339 deletions(-) create mode 100644 drivers/accel/qaic/qaic_debugfs.c create mode 100644 drivers/accel/qaic/qaic_debugfs.h create mode 100644 drivers/accel/qaic/sahara.c create mode 100644 drivers/accel/qaic/sahara.h delete mode 100644 drivers/acpi/acpi_cmos_rtc.c delete mode 100644 drivers/acpi/acpi_lpss.c delete mode 100644 drivers/acpi/blacklist.c create mode 100644 drivers/acpi/nhlt.c create mode 100644 drivers/acpi/x86/Makefile create mode 100644 drivers/acpi/x86/blacklist.c create mode 100644 drivers/acpi/x86/cmos_rtc.c create mode 100644 drivers/acpi/x86/lpss.c create mode 100644 drivers/bluetooth/btintel_pcie.c create mode 100644 drivers/bluetooth/btintel_pcie.h create mode 100644 drivers/bus/stm32_etzpc.c create mode 100644 drivers/bus/stm32_firewall.c create mode 100644 drivers/bus/stm32_firewall.h create mode 100644 drivers/bus/stm32_rifsc.c create mode 100644 drivers/char/tpm/tpm-buf.c create mode 100644 drivers/char/tpm/tpm2-sessions.c create mode 100644 drivers/clk/imx/clk-imx95-blk-ctl.c create mode 100644 drivers/clk/meson/vclk.c create mode 100644 drivers/clk/meson/vclk.h create mode 100644 drivers/clk/sophgo/Kconfig create mode 100644 drivers/clk/sophgo/Makefile create mode 100644 drivers/clk/sophgo/clk-cv1800.c create mode 100644 drivers/clk/sophgo/clk-cv1800.h create mode 100644 drivers/clk/sophgo/clk-cv18xx-common.c create mode 100644 drivers/clk/sophgo/clk-cv18xx-common.h create mode 100644 drivers/clk/sophgo/clk-cv18xx-ip.c create mode 100644 drivers/clk/sophgo/clk-cv18xx-ip.h create mode 100644 drivers/clk/sophgo/clk-cv18xx-pll.c create mode 100644 drivers/clk/sophgo/clk-cv18xx-pll.h create mode 100644 drivers/clk/stm32/clk-stm32mp25.c create mode 100644 drivers/clk/stm32/stm32mp25_rcc.h create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h create mode 100644 drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h create mode 100644 drivers/crypto/intel/qat/qat_common/qat_mig_dev.c create mode 100644 drivers/crypto/tegra/Makefile create mode 100644 drivers/crypto/tegra/tegra-se-aes.c create mode 100644 drivers/crypto/tegra/tegra-se-hash.c create mode 100644 drivers/crypto/tegra/tegra-se-key.c create mode 100644 drivers/crypto/tegra/tegra-se-main.c create mode 100644 drivers/crypto/tegra/tegra-se.h create mode 100644 drivers/dma/fsl-edma-trace.c create mode 100644 drivers/dma/fsl-edma-trace.h create mode 100644 drivers/firewire/core-trace.c create mode 100644 drivers/firewire/packet-header-definitions.h create mode 100644 drivers/firewire/packet-serdes-test.c create mode 100644 drivers/firmware/arm_scmi/pinctrl.c create mode 100644 drivers/fpga/xilinx-core.c create mode 100644 drivers/fpga/xilinx-core.h create mode 100644 drivers/fpga/xilinx-selectmap.c create mode 100644 drivers/gpio/gpio-graniterapids.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c create mode 100644 drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c create mode 100644 drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/Makefile create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c create mode 100644 drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h create mode 100644 drivers/gpu/drm/ast/ast_ddc.c create mode 100644 drivers/gpu/drm/ast/ast_ddc.h delete mode 100644 drivers/gpu/drm/ast/ast_i2c.c create mode 100644 drivers/gpu/drm/bridge/microchip-lvds.c create mode 100644 drivers/gpu/drm/drm_displayid_internal.h create mode 100644 drivers/gpu/drm/drm_panic.c create mode 100644 drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h create mode 100644 drivers/gpu/drm/i915/display/intel_display_conversion.h create mode 100644 drivers/gpu/drm/i915/display/intel_dmc_wl.c create mode 100644 drivers/gpu/drm/i915/display/intel_dmc_wl.h create mode 100644 drivers/gpu/drm/i915/display/intel_fbc_regs.h create mode 100644 drivers/gpu/drm/i915/display/intel_fixed.h create mode 100644 drivers/gpu/drm/i915/display/intel_sprite_regs.h create mode 100644 drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h delete mode 100644 drivers/gpu/drm/i915/i915_fixed.h create mode 100644 drivers/gpu/drm/mediatek/mtk_crtc.c create mode 100644 drivers/gpu/drm/mediatek/mtk_crtc.h create mode 100644 drivers/gpu/drm/mediatek/mtk_ddp_comp.c create mode 100644 drivers/gpu/drm/mediatek/mtk_ddp_comp.h delete mode 100644 drivers/gpu/drm/mediatek/mtk_drm_crtc.c delete mode 100644 drivers/gpu/drm/mediatek/mtk_drm_crtc.h delete mode 100644 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c delete mode 100644 drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h delete mode 100644 drivers/gpu/drm/mediatek/mtk_drm_gem.c delete mode 100644 drivers/gpu/drm/mediatek/mtk_drm_gem.h delete mode 100644 drivers/gpu/drm/mediatek/mtk_drm_plane.c delete mode 100644 drivers/gpu/drm/mediatek/mtk_drm_plane.h create mode 100644 drivers/gpu/drm/mediatek/mtk_gem.c create mode 100644 drivers/gpu/drm/mediatek/mtk_gem.h create mode 100644 drivers/gpu/drm/mediatek/mtk_plane.c create mode 100644 drivers/gpu/drm/mediatek/mtk_plane.h create mode 100644 drivers/gpu/drm/msm/.gitignore delete mode 100644 drivers/gpu/drm/msm/adreno/a2xx.xml.h create mode 100644 drivers/gpu/drm/msm/adreno/a2xx_gpummu.c delete mode 100644 drivers/gpu/drm/msm/adreno/a3xx.xml.h delete mode 100644 drivers/gpu/drm/msm/adreno/a4xx.xml.h delete mode 100644 drivers/gpu/drm/msm/adreno/a5xx.xml.h delete mode 100644 drivers/gpu/drm/msm/adreno/a6xx.xml.h delete mode 100644 drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h delete mode 100644 drivers/gpu/drm/msm/adreno/adreno_common.xml.h create mode 100644 drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h delete mode 100644 drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h delete mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h delete mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h delete mode 100644 drivers/gpu/drm/msm/disp/mdp_common.xml.h create mode 100644 drivers/gpu/drm/msm/disp/mdp_format.h delete mode 100644 drivers/gpu/drm/msm/dsi/dsi.xml.h delete mode 100644 drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h delete mode 100644 drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h delete mode 100644 drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h delete mode 100644 drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h delete mode 100644 drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h delete mode 100644 drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h delete mode 100644 drivers/gpu/drm/msm/dsi/mmss_cc.xml.h delete mode 100644 drivers/gpu/drm/msm/dsi/sfpb.xml.h delete mode 100644 drivers/gpu/drm/msm/hdmi/hdmi.xml.h delete mode 100644 drivers/gpu/drm/msm/hdmi/qfprom.xml.h delete mode 100644 drivers/gpu/drm/msm/msm_gpummu.c create mode 100644 drivers/gpu/drm/msm/registers/.gitignore create mode 100644 drivers/gpu/drm/msm/registers/adreno/a2xx.xml create mode 100644 drivers/gpu/drm/msm/registers/adreno/a3xx.xml create mode 100644 drivers/gpu/drm/msm/registers/adreno/a4xx.xml create mode 100644 drivers/gpu/drm/msm/registers/adreno/a5xx.xml create mode 100644 drivers/gpu/drm/msm/registers/adreno/a6xx.xml create mode 100644 drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml create mode 100644 drivers/gpu/drm/msm/registers/adreno/adreno_common.xml create mode 100644 drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml create mode 100644 drivers/gpu/drm/msm/registers/display/dsi.xml create mode 100644 drivers/gpu/drm/msm/registers/display/dsi_phy_10nm.xml create mode 100644 drivers/gpu/drm/msm/registers/display/dsi_phy_14nm.xml create mode 100644 drivers/gpu/drm/msm/registers/display/dsi_phy_20nm.xml create mode 100644 drivers/gpu/drm/msm/registers/display/dsi_phy_28nm.xml create mode 100644 drivers/gpu/drm/msm/registers/display/dsi_phy_28nm_8960.xml create mode 100644 drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml create mode 100644 drivers/gpu/drm/msm/registers/display/edp.xml create mode 100644 drivers/gpu/drm/msm/registers/display/hdmi.xml create mode 100644 drivers/gpu/drm/msm/registers/display/mdp4.xml create mode 100644 drivers/gpu/drm/msm/registers/display/mdp5.xml create mode 100644 drivers/gpu/drm/msm/registers/display/mdp_common.xml create mode 100644 drivers/gpu/drm/msm/registers/display/msm.xml create mode 100644 drivers/gpu/drm/msm/registers/display/sfpb.xml create mode 100644 drivers/gpu/drm/msm/registers/freedreno_copyright.xml create mode 100644 drivers/gpu/drm/msm/registers/gen_header.py create mode 100644 drivers/gpu/drm/msm/registers/rules-fd.xsd create mode 100644 drivers/gpu/drm/panel/panel-lg-sw43408.c create mode 100644 drivers/gpu/drm/panel/panel-raydium-rm69380.c create mode 100644 drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.c delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.h create mode 100644 drivers/gpu/drm/panthor/Kconfig create mode 100644 drivers/gpu/drm/panthor/Makefile create mode 100644 drivers/gpu/drm/panthor/panthor_devfreq.c create mode 100644 drivers/gpu/drm/panthor/panthor_devfreq.h create mode 100644 drivers/gpu/drm/panthor/panthor_device.c create mode 100644 drivers/gpu/drm/panthor/panthor_device.h create mode 100644 drivers/gpu/drm/panthor/panthor_drv.c create mode 100644 drivers/gpu/drm/panthor/panthor_fw.c create mode 100644 drivers/gpu/drm/panthor/panthor_fw.h create mode 100644 drivers/gpu/drm/panthor/panthor_gem.c create mode 100644 drivers/gpu/drm/panthor/panthor_gem.h create mode 100644 drivers/gpu/drm/panthor/panthor_gpu.c create mode 100644 drivers/gpu/drm/panthor/panthor_gpu.h create mode 100644 drivers/gpu/drm/panthor/panthor_heap.c create mode 100644 drivers/gpu/drm/panthor/panthor_heap.h create mode 100644 drivers/gpu/drm/panthor/panthor_mmu.c create mode 100644 drivers/gpu/drm/panthor/panthor_mmu.h create mode 100644 drivers/gpu/drm/panthor/panthor_regs.h create mode 100644 drivers/gpu/drm/panthor/panthor_sched.c create mode 100644 drivers/gpu/drm/panthor/panthor_sched.h delete mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h delete mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_fixed.h delete mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h delete mode 100644 drivers/gpu/drm/xe/compat-i915-headers/intel_uc_fw.h create mode 100644 drivers/gpu/drm/xe/instructions/xe_gfx_state_commands.h create mode 100644 drivers/gpu/drm/xe/regs/xe_gtt_defs.h create mode 100644 drivers/gpu/drm/xe/tests/xe_guc_id_mgr_test.c create mode 100644 drivers/gpu/drm/xe/tests/xe_live_test_mod.c create mode 100644 drivers/gpu/drm/xe/xe_gt_sriov_pf.c create mode 100644 drivers/gpu/drm/xe/xe_gt_sriov_pf.h create mode 100644 drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c create mode 100644 drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h create mode 100644 drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h create mode 100644 drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c create mode 100644 drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h create mode 100644 drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h create mode 100644 drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c create mode 100644 drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.h create mode 100644 drivers/gpu/drm/xe/xe_gt_sriov_pf_policy_types.h create mode 100644 drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h create mode 100644 drivers/gpu/drm/xe/xe_guc_id_mgr.c create mode 100644 drivers/gpu/drm/xe/xe_guc_id_mgr.h create mode 100644 drivers/gpu/drm/xe/xe_guc_klv_helpers.c create mode 100644 drivers/gpu/drm/xe/xe_guc_klv_helpers.h create mode 100644 drivers/gpu/drm/xe/xe_hmm.c create mode 100644 drivers/gpu/drm/xe/xe_hmm.h create mode 100644 drivers/gpu/drm/xe/xe_sriov_pf.c create mode 100644 drivers/gpu/drm/xe/xe_sriov_pf.h create mode 100644 drivers/gpu/drm/xe/xe_sriov_pf_helpers.h create mode 100644 drivers/hid/bpf/progs/FR-TEC__Raptor-Mach-2.bpf.c create mode 100644 drivers/hid/bpf/progs/HP__Elite-Presenter.bpf.c create mode 100644 drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c create mode 100644 drivers/hid/bpf/progs/IOGEAR__Kaliber-MMOmentum.bpf.c create mode 100644 drivers/hid/bpf/progs/Makefile create mode 100644 drivers/hid/bpf/progs/Microsoft__XBox-Elite-2.bpf.c create mode 100644 drivers/hid/bpf/progs/README create mode 100644 drivers/hid/bpf/progs/Wacom__ArtPen.bpf.c create mode 100644 drivers/hid/bpf/progs/XPPen__Artist24.bpf.c create mode 100644 drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c create mode 100644 drivers/hid/bpf/progs/hid_bpf.h create mode 100644 drivers/hid/bpf/progs/hid_bpf_helpers.h create mode 100644 drivers/hid/hid-winwing.c create mode 100644 drivers/hid/intel-ish-hid/ishtp/loader.c create mode 100644 drivers/hid/intel-ish-hid/ishtp/loader.h delete mode 100644 drivers/hv/hv_fcopy.c create mode 100644 drivers/hwmon/lenovo-ec-sensors.c create mode 100644 drivers/hwmon/pmbus/adp1050.c create mode 100644 drivers/hwmon/pmbus/xdp710.c create mode 100644 drivers/i2c/busses/i2c-viai2c-common.c create mode 100644 drivers/i2c/busses/i2c-viai2c-common.h create mode 100644 drivers/i2c/busses/i2c-viai2c-wmt.c create mode 100644 drivers/i2c/busses/i2c-viai2c-zhaoxin.c delete mode 100644 drivers/i2c/busses/i2c-wmt.c create mode 100644 drivers/iio/adc/ad7173.c create mode 100644 drivers/iio/adc/ad7944.c create mode 100644 drivers/iio/dac/ad9739a.c create mode 100644 drivers/iio/dac/adi-axi-dac.c create mode 100644 drivers/iio/industrialio-acpi.c create mode 100644 drivers/iio/light/apds9306.c create mode 100644 drivers/iommu/amd/pasid.c create mode 100644 drivers/iommu/amd/ppr.c create mode 100644 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c create mode 100644 drivers/iommu/intel/cache.c create mode 100644 drivers/iommu/iommu-pages.h create mode 100644 drivers/irqchip/irq-riscv-aplic-direct.c create mode 100644 drivers/irqchip/irq-riscv-aplic-main.c create mode 100644 drivers/irqchip/irq-riscv-aplic-main.h create mode 100644 drivers/irqchip/irq-riscv-aplic-msi.c create mode 100644 drivers/irqchip/irq-riscv-imsic-early.c create mode 100644 drivers/irqchip/irq-riscv-imsic-platform.c create mode 100644 drivers/irqchip/irq-riscv-imsic-state.c create mode 100644 drivers/irqchip/irq-riscv-imsic-state.h delete mode 100644 drivers/leds/trigger/ledtrig-audio.c create mode 100644 drivers/mailbox/arm_mhuv3.c create mode 100644 drivers/media/pci/intel/ipu6/Kconfig create mode 100644 drivers/media/pci/intel/ipu6/Makefile create mode 100644 drivers/media/pci/intel/ipu6/ipu6-bus.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6-bus.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-buttress.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6-buttress.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-cpd.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6-cpd.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-dma.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6-dma.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-fw-com.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6-fw-com.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-fw-isys.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6-fw-isys.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-jsl-phy.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-mcd-phy.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-queue.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-queue.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-video.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys-video.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6-isys.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-mmu.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6-mmu.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-platform-isys-csi2-reg.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-platform-regs.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6.c create mode 100644 drivers/media/pci/intel/ipu6/ipu6.h create mode 100644 drivers/media/platform/broadcom/Kconfig create mode 100644 drivers/media/platform/broadcom/Makefile create mode 100644 drivers/media/platform/broadcom/bcm2835-unicam-regs.h create mode 100644 drivers/media/platform/broadcom/bcm2835-unicam.c delete mode 100644 drivers/media/platform/qcom/camss/camss-vfe-170.c create mode 100644 drivers/media/platform/qcom/camss/camss-vfe-17x.c create mode 100644 drivers/misc/ntsync.c create mode 100644 drivers/net/dsa/microchip/ksz_dcb.c create mode 100644 drivers/net/dsa/microchip/ksz_dcb.h create mode 100644 drivers/net/ethernet/intel/ice/devlink/devlink.c create mode 100644 drivers/net/ethernet/intel/ice/devlink/devlink.h create mode 100644 drivers/net/ethernet/intel/ice/devlink/devlink_port.c create mode 100644 drivers/net/ethernet/intel/ice/devlink/devlink_port.h create mode 100644 drivers/net/ethernet/intel/ice/ice_adapter.c create mode 100644 drivers/net/ethernet/intel/ice/ice_adapter.h delete mode 100644 drivers/net/ethernet/intel/ice/ice_devlink.c delete mode 100644 drivers/net/ethernet/intel/ice/ice_devlink.h create mode 100644 drivers/net/ethernet/intel/libeth/Kconfig create mode 100644 drivers/net/ethernet/intel/libeth/Makefile create mode 100644 drivers/net/ethernet/intel/libeth/rx.c create mode 100644 drivers/net/ethernet/intel/libie/Kconfig create mode 100644 drivers/net/ethernet/intel/libie/Makefile create mode 100644 drivers/net/ethernet/intel/libie/rx.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/dim.h create mode 100644 drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c create mode 100644 drivers/net/ethernet/ti/icssg/icssg_common.c create mode 100644 drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c create mode 100644 drivers/net/pfcp.c create mode 100644 drivers/net/phy/air_en8811h.c create mode 100644 drivers/net/pse-pd/pd692x0.c create mode 100644 drivers/net/pse-pd/tps23881.c delete mode 100644 drivers/net/team/team.c create mode 100644 drivers/net/team/team_core.c create mode 100644 drivers/net/team/team_nl.c create mode 100644 drivers/net/team/team_nl.h create mode 100644 drivers/net/wireless/ath/ath11k/p2p.c create mode 100644 drivers/net/wireless/ath/ath11k/p2p.h create mode 100644 drivers/net/wireless/ath/ath12k/acpi.c create mode 100644 drivers/net/wireless/ath/ath12k/acpi.h create mode 100644 drivers/net/wireless/ath/ath12k/debugfs.c create mode 100644 drivers/net/wireless/ath/ath12k/debugfs.h create mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile create mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c create mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c create mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c create mode 100644 drivers/net/wireless/realtek/rtl8xxxu/8188e.c create mode 100644 drivers/net/wireless/realtek/rtl8xxxu/8188f.c create mode 100644 drivers/net/wireless/realtek/rtl8xxxu/8192c.c create mode 100644 drivers/net/wireless/realtek/rtl8xxxu/8192e.c create mode 100644 drivers/net/wireless/realtek/rtl8xxxu/8192f.c create mode 100644 drivers/net/wireless/realtek/rtl8xxxu/8710b.c create mode 100644 drivers/net/wireless/realtek/rtl8xxxu/8723a.c create mode 100644 drivers/net/wireless/realtek/rtl8xxxu/8723b.c create mode 100644 drivers/net/wireless/realtek/rtl8xxxu/core.c create mode 100644 drivers/net/wireless/realtek/rtl8xxxu/regs.h delete mode 100644 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c delete mode 100644 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c delete mode 100644 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c delete mode 100644 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c delete mode 100644 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c delete mode 100644 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c delete mode 100644 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c delete mode 100644 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c delete mode 100644 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c delete mode 100644 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/Makefile create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/def.h create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.c create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.h create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.c create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.h create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/main.c create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.c create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.h create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/reg.h create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.c create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.h create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c create mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h delete mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h delete mode 100644 drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h create mode 100644 drivers/net/wireless/realtek/rtw88/rtw8703b.c create mode 100644 drivers/net/wireless/realtek/rtw88/rtw8703b.h create mode 100644 drivers/net/wireless/realtek/rtw88/rtw8703b_tables.c create mode 100644 drivers/net/wireless/realtek/rtw88/rtw8703b_tables.h create mode 100644 drivers/net/wireless/realtek/rtw88/rtw8723cs.c create mode 100644 drivers/net/wireless/realtek/rtw88/rtw8723x.c create mode 100644 drivers/net/wireless/realtek/rtw88/rtw8723x.h create mode 100644 drivers/phy/freescale/phy-fsl-samsung-hdmi.c create mode 100644 drivers/phy/mediatek/phy-mtk-xfi-tphy.c create mode 100644 drivers/phy/rockchip/phy-rockchip-usbdp.c create mode 100644 drivers/phy/samsung/phy-gs101-ufs.c create mode 100644 drivers/pinctrl/pinctrl-scmi.c create mode 100644 drivers/platform/arm64/Kconfig create mode 100644 drivers/platform/arm64/Makefile create mode 100644 drivers/platform/arm64/acer-aspire1-ec.c create mode 100644 drivers/platform/x86/amd/pmc/mp2_stb.c create mode 100644 drivers/platform/x86/dell/dell-uart-backlight.c create mode 100644 drivers/platform/x86/lenovo-wmi-camera.c create mode 100644 drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c create mode 100644 drivers/platform/x86/meegopad_anx7428.c create mode 100644 drivers/platform/x86/msi-wmi-platform.c create mode 100644 drivers/platform/x86/quickstart.c delete mode 100644 drivers/pmdomain/renesas/r8a7796-sysc.c create mode 100644 drivers/pmdomain/renesas/r8a77960-sysc.c create mode 100644 drivers/pmdomain/renesas/r8a77961-sysc.c delete mode 100644 drivers/pwm/sysfs.c create mode 100644 drivers/regulator/sun20i-regulator.c create mode 100644 drivers/rtc/rtc-rx8111.c create mode 100644 drivers/spi/spi-airoha-snfi.c delete mode 100644 drivers/staging/media/atomisp/i2c/atomisp-lm3554.c delete mode 100644 drivers/staging/media/atomisp/include/media/lm3554.h delete mode 100644 drivers/staging/media/atomisp/pci/atomisp_tpg.c delete mode 100644 drivers/staging/media/atomisp/pci/atomisp_tpg.h delete mode 100644 drivers/staging/media/atomisp/pci/defs.h delete mode 100644 drivers/staging/media/atomisp/pci/ia_css_tpg.h delete mode 100644 drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dtso delete mode 100644 drivers/staging/pi433/Documentation/devicetree/pi433.txt delete mode 100644 drivers/staging/pi433/Documentation/pi433.txt delete mode 100644 drivers/staging/pi433/Kconfig delete mode 100644 drivers/staging/pi433/Makefile delete mode 100644 drivers/staging/pi433/TODO delete mode 100644 drivers/staging/pi433/pi433_if.c delete mode 100644 drivers/staging/pi433/pi433_if.h delete mode 100644 drivers/staging/pi433/rf69.c delete mode 100644 drivers/staging/pi433/rf69.h delete mode 100644 drivers/staging/pi433/rf69_enum.h delete mode 100644 drivers/staging/pi433/rf69_registers.h delete mode 100644 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c delete mode 100644 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h delete mode 100644 drivers/staging/wlan-ng/Kconfig delete mode 100644 drivers/staging/wlan-ng/Makefile delete mode 100644 drivers/staging/wlan-ng/README delete mode 100644 drivers/staging/wlan-ng/TODO delete mode 100644 drivers/staging/wlan-ng/cfg80211.c delete mode 100644 drivers/staging/wlan-ng/hfa384x.h delete mode 100644 drivers/staging/wlan-ng/hfa384x_usb.c delete mode 100644 drivers/staging/wlan-ng/p80211conv.c delete mode 100644 drivers/staging/wlan-ng/p80211conv.h delete mode 100644 drivers/staging/wlan-ng/p80211hdr.h delete mode 100644 drivers/staging/wlan-ng/p80211ioctl.h delete mode 100644 drivers/staging/wlan-ng/p80211metadef.h delete mode 100644 drivers/staging/wlan-ng/p80211metastruct.h delete mode 100644 drivers/staging/wlan-ng/p80211mgmt.h delete mode 100644 drivers/staging/wlan-ng/p80211msg.h delete mode 100644 drivers/staging/wlan-ng/p80211netdev.c delete mode 100644 drivers/staging/wlan-ng/p80211netdev.h delete mode 100644 drivers/staging/wlan-ng/p80211req.c delete mode 100644 drivers/staging/wlan-ng/p80211req.h delete mode 100644 drivers/staging/wlan-ng/p80211types.h delete mode 100644 drivers/staging/wlan-ng/p80211wep.c delete mode 100644 drivers/staging/wlan-ng/prism2fw.c delete mode 100644 drivers/staging/wlan-ng/prism2mgmt.c delete mode 100644 drivers/staging/wlan-ng/prism2mgmt.h delete mode 100644 drivers/staging/wlan-ng/prism2mib.c delete mode 100644 drivers/staging/wlan-ng/prism2sta.c delete mode 100644 drivers/staging/wlan-ng/prism2usb.c create mode 100644 drivers/tee/tstee/Kconfig create mode 100644 drivers/tee/tstee/Makefile create mode 100644 drivers/tee/tstee/core.c create mode 100644 drivers/tee/tstee/tstee_private.h delete mode 100644 drivers/tty/serial/8250/8250_alpha.c create mode 100644 drivers/tty/serial/sc16is7xx.h create mode 100644 drivers/tty/serial/sc16is7xx_i2c.c create mode 100644 drivers/tty/serial/sc16is7xx_spi.c create mode 100644 drivers/ufs/host/ufs-mediatek-sip.h delete mode 100644 drivers/uio/uio_pruss.c create mode 100644 drivers/usb/misc/onboard_usb_dev.c create mode 100644 drivers/usb/misc/onboard_usb_dev.h create mode 100644 drivers/usb/misc/onboard_usb_dev_pdevs.c delete mode 100644 drivers/usb/misc/onboard_usb_hub.c delete mode 100644 drivers/usb/misc/onboard_usb_hub.h delete mode 100644 drivers/usb/misc/onboard_usb_hub_pdevs.c create mode 100644 drivers/vfio/cdx/intr.c create mode 100644 drivers/vfio/pci/qat/Kconfig create mode 100644 drivers/vfio/pci/qat/Makefile create mode 100644 drivers/vfio/pci/qat/main.c create mode 100644 drivers/virtio/virtio_debug.c create mode 100644 drivers/watchdog/lenovo_se10_wdt.c (limited to 'drivers') diff --git a/drivers/Makefile b/drivers/Makefile index 3bf5cab4b4..fe9ceb0d22 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -6,11 +6,6 @@ # Rewritten to use lists instead of if-statements. # -# Some driver Makefiles miss $(srctree)/ for include directive. -ifdef building_out_of_srctree -MAKEFLAGS += --include-dir=$(srctree) -endif - obj-y += cache/ obj-y += irqchip/ obj-y += bus/ diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c index d09d29775b..e07e447d08 100644 --- a/drivers/accel/ivpu/ivpu_debugfs.c +++ b/drivers/accel/ivpu/ivpu_debugfs.c @@ -3,6 +3,8 @@ * Copyright (C) 2020-2023 Intel Corporation */ +#include + #include #include #include diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c index fe61612992..128aef8e5a 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.c +++ b/drivers/accel/ivpu/ivpu_mmu_context.c @@ -6,6 +6,7 @@ #include #include #include +#include #include diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile index 3f7f6dfde7..35e8835156 100644 --- a/drivers/accel/qaic/Makefile +++ b/drivers/accel/qaic/Makefile @@ -10,4 +10,7 @@ qaic-y := \ qaic_control.o \ qaic_data.o \ qaic_drv.o \ - qaic_timesync.o + qaic_timesync.o \ + sahara.o + +qaic-$(CONFIG_DEBUG_FS) += qaic_debugfs.o diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h index 9256653b30..02561b6cec 100644 --- a/drivers/accel/qaic/qaic.h +++ b/drivers/accel/qaic/qaic.h @@ -153,6 +153,14 @@ struct qaic_device { struct mhi_device *qts_ch; /* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */ struct workqueue_struct *qts_wq; + /* Head of list of page allocated by MHI bootlog device */ + struct list_head bootlog; + /* MHI bootlog channel device */ + struct mhi_device *bootlog_ch; + /* Work queue for tasks related to MHI bootlog device */ + struct workqueue_struct *bootlog_wq; + /* Synchronizes access of pages in MHI bootlog device */ + struct mutex bootlog_mutex; }; struct qaic_drm_device { @@ -280,6 +288,7 @@ int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr); void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr); void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id); void release_dbc(struct qaic_device *qdev, u32 dbc_id); +void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail); void wake_all_cntl(struct qaic_device *qdev); void qaic_dev_reset_clean_local_state(struct qaic_device *qdev); diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index 2459fe4a3f..e86e71c1cd 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -1981,3 +1981,12 @@ void release_dbc(struct qaic_device *qdev, u32 dbc_id) dbc->in_use = false; wake_up(&dbc->dbc_release); } + +void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail) +{ + if (!dbc || !head || !tail) + return; + + *head = readl(dbc->dbc_base + REQHP_OFF); + *tail = readl(dbc->dbc_base + REQTP_OFF); +} diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c new file mode 100644 index 0000000000..20b653d99e --- /dev/null +++ b/drivers/accel/qaic/qaic_debugfs.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qaic.h" +#include "qaic_debugfs.h" + +#define BOOTLOG_POOL_SIZE 16 +#define BOOTLOG_MSG_SIZE 512 +#define QAIC_DBC_DIR_NAME 9 + +struct bootlog_msg { + /* Buffer for bootlog messages */ + char str[BOOTLOG_MSG_SIZE]; + /* Root struct of device, used to access device resources */ + struct qaic_device *qdev; + /* Work struct to schedule work coming on QAIC_LOGGING channel */ + struct work_struct work; +}; + +struct bootlog_page { + /* Node in list of bootlog pages maintained by root device struct */ + struct list_head node; + /* Total size of the buffer that holds the bootlogs. It is PAGE_SIZE */ + unsigned int size; + /* Offset for the next bootlog */ + unsigned int offset; +}; + +static int bootlog_show(struct seq_file *s, void *unused) +{ + struct bootlog_page *page; + struct qaic_device *qdev; + void *page_end; + void *log; + + qdev = s->private; + mutex_lock(&qdev->bootlog_mutex); + list_for_each_entry(page, &qdev->bootlog, node) { + log = page + 1; + page_end = (void *)page + page->offset; + while (log < page_end) { + seq_printf(s, "%s", (char *)log); + log += strlen(log) + 1; + } + } + mutex_unlock(&qdev->bootlog_mutex); + + return 0; +} + +static int bootlog_fops_open(struct inode *inode, struct file *file) +{ + return single_open(file, bootlog_show, inode->i_private); +} + +static const struct file_operations bootlog_fops = { + .owner = THIS_MODULE, + .open = bootlog_fops_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int read_dbc_fifo_size(struct seq_file *s, void *unused) +{ + struct dma_bridge_chan *dbc = s->private; + + seq_printf(s, "%u\n", dbc->nelem); + return 0; +} + +static int fifo_size_open(struct inode *inode, struct file *file) +{ + return single_open(file, read_dbc_fifo_size, inode->i_private); +} + +static const struct file_operations fifo_size_fops = { + .owner = THIS_MODULE, + .open = fifo_size_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int read_dbc_queued(struct seq_file *s, void *unused) +{ + struct dma_bridge_chan *dbc = s->private; + u32 tail = 0, head = 0; + + qaic_data_get_fifo_info(dbc, &head, &tail); + + if (head == U32_MAX || tail == U32_MAX) + seq_printf(s, "%u\n", 0); + else if (head > tail) + seq_printf(s, "%u\n", dbc->nelem - head + tail); + else + seq_printf(s, "%u\n", tail - head); + + return 0; +} + +static int queued_open(struct inode *inode, struct file *file) +{ + return single_open(file, read_dbc_queued, inode->i_private); +} + +static const struct file_operations queued_fops = { + .owner = THIS_MODULE, + .open = queued_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void qaic_debugfs_init(struct qaic_drm_device *qddev) +{ + struct qaic_device *qdev = qddev->qdev; + struct dentry *debugfs_root; + struct dentry *debugfs_dir; + char name[QAIC_DBC_DIR_NAME]; + u32 i; + + debugfs_root = to_drm(qddev)->debugfs_root; + + debugfs_create_file("bootlog", 0400, debugfs_root, qdev, &bootlog_fops); + /* + * 256 dbcs per device is likely the max we will ever see and lets static checking see a + * reasonable range. + */ + for (i = 0; i < qdev->num_dbc && i < 256; ++i) { + snprintf(name, QAIC_DBC_DIR_NAME, "dbc%03u", i); + debugfs_dir = debugfs_create_dir(name, debugfs_root); + debugfs_create_file("fifo_size", 0400, debugfs_dir, &qdev->dbc[i], &fifo_size_fops); + debugfs_create_file("queued", 0400, debugfs_dir, &qdev->dbc[i], &queued_fops); + } +} + +static struct bootlog_page *alloc_bootlog_page(struct qaic_device *qdev) +{ + struct bootlog_page *page; + + page = (struct bootlog_page *)devm_get_free_pages(&qdev->pdev->dev, GFP_KERNEL, 0); + if (!page) + return page; + + page->size = PAGE_SIZE; + page->offset = sizeof(*page); + list_add_tail(&page->node, &qdev->bootlog); + + return page; +} + +static int reset_bootlog(struct qaic_device *qdev) +{ + struct bootlog_page *page; + struct bootlog_page *i; + + mutex_lock(&qdev->bootlog_mutex); + list_for_each_entry_safe(page, i, &qdev->bootlog, node) { + list_del(&page->node); + devm_free_pages(&qdev->pdev->dev, (unsigned long)page); + } + + page = alloc_bootlog_page(qdev); + mutex_unlock(&qdev->bootlog_mutex); + if (!page) + return -ENOMEM; + + return 0; +} + +static void *bootlog_get_space(struct qaic_device *qdev, unsigned int size) +{ + struct bootlog_page *page; + + page = list_last_entry(&qdev->bootlog, struct bootlog_page, node); + + if (size_add(size, sizeof(*page)) > page->size) + return NULL; + + if (page->offset + size > page->size) { + page = alloc_bootlog_page(qdev); + if (!page) + return NULL; + } + + return (void *)page + page->offset; +} + +static void bootlog_commit(struct qaic_device *qdev, unsigned int size) +{ + struct bootlog_page *page; + + page = list_last_entry(&qdev->bootlog, struct bootlog_page, node); + + page->offset += size; +} + +static void bootlog_log(struct work_struct *work) +{ + struct bootlog_msg *msg = container_of(work, struct bootlog_msg, work); + unsigned int len = strlen(msg->str) + 1; + struct qaic_device *qdev = msg->qdev; + void *log; + + mutex_lock(&qdev->bootlog_mutex); + log = bootlog_get_space(qdev, len); + if (log) { + memcpy(log, msg, len); + bootlog_commit(qdev, len); + } + mutex_unlock(&qdev->bootlog_mutex); + + if (mhi_queue_buf(qdev->bootlog_ch, DMA_FROM_DEVICE, msg, BOOTLOG_MSG_SIZE, MHI_EOT)) + devm_kfree(&qdev->pdev->dev, msg); +} + +static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) +{ + struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev)); + struct bootlog_msg *msg; + int i, ret; + + qdev->bootlog_wq = alloc_ordered_workqueue("qaic_bootlog", 0); + if (!qdev->bootlog_wq) { + ret = -ENOMEM; + goto out; + } + + ret = reset_bootlog(qdev); + if (ret) + goto destroy_workqueue; + + ret = mhi_prepare_for_transfer(mhi_dev); + if (ret) + goto destroy_workqueue; + + for (i = 0; i < BOOTLOG_POOL_SIZE; i++) { + msg = devm_kzalloc(&qdev->pdev->dev, sizeof(*msg), GFP_KERNEL); + if (!msg) { + ret = -ENOMEM; + goto mhi_unprepare; + } + + msg->qdev = qdev; + INIT_WORK(&msg->work, bootlog_log); + + ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, msg, BOOTLOG_MSG_SIZE, MHI_EOT); + if (ret) + goto mhi_unprepare; + } + + dev_set_drvdata(&mhi_dev->dev, qdev); + qdev->bootlog_ch = mhi_dev; + return 0; + +mhi_unprepare: + mhi_unprepare_from_transfer(mhi_dev); +destroy_workqueue: + flush_workqueue(qdev->bootlog_wq); + destroy_workqueue(qdev->bootlog_wq); +out: + return ret; +} + +static void qaic_bootlog_mhi_remove(struct mhi_device *mhi_dev) +{ + struct qaic_device *qdev; + + qdev = dev_get_drvdata(&mhi_dev->dev); + + mhi_unprepare_from_transfer(qdev->bootlog_ch); + flush_workqueue(qdev->bootlog_wq); + destroy_workqueue(qdev->bootlog_wq); + qdev->bootlog_ch = NULL; +} + +static void qaic_bootlog_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) +{ +} + +static void qaic_bootlog_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) +{ + struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev); + struct bootlog_msg *msg = mhi_result->buf_addr; + + if (mhi_result->transaction_status) { + devm_kfree(&qdev->pdev->dev, msg); + return; + } + + /* Force a null at the end of the transferred string */ + msg->str[mhi_result->bytes_xferd - 1] = 0; + + queue_work(qdev->bootlog_wq, &msg->work); +} + +static const struct mhi_device_id qaic_bootlog_mhi_match_table[] = { + { .chan = "QAIC_LOGGING", }, + {}, +}; + +static struct mhi_driver qaic_bootlog_mhi_driver = { + .id_table = qaic_bootlog_mhi_match_table, + .remove = qaic_bootlog_mhi_remove, + .probe = qaic_bootlog_mhi_probe, + .ul_xfer_cb = qaic_bootlog_mhi_ul_xfer_cb, + .dl_xfer_cb = qaic_bootlog_mhi_dl_xfer_cb, + .driver = { + .name = "qaic_bootlog", + }, +}; + +int qaic_bootlog_register(void) +{ + return mhi_driver_register(&qaic_bootlog_mhi_driver); +} + +void qaic_bootlog_unregister(void) +{ + mhi_driver_unregister(&qaic_bootlog_mhi_driver); +} diff --git a/drivers/accel/qaic/qaic_debugfs.h b/drivers/accel/qaic/qaic_debugfs.h new file mode 100644 index 0000000000..05e74f84cf --- /dev/null +++ b/drivers/accel/qaic/qaic_debugfs.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ + +#ifndef __QAIC_DEBUGFS_H__ +#define __QAIC_DEBUGFS_H__ + +#include + +#ifdef CONFIG_DEBUG_FS +int qaic_bootlog_register(void); +void qaic_bootlog_unregister(void); +void qaic_debugfs_init(struct qaic_drm_device *qddev); +#else +static inline int qaic_bootlog_register(void) { return 0; } +static inline void qaic_bootlog_unregister(void) {} +static inline void qaic_debugfs_init(struct qaic_drm_device *qddev) {} +#endif /* CONFIG_DEBUG_FS */ +#endif /* __QAIC_DEBUGFS_H__ */ diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index d1a632dbae..580b29ed19 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -28,7 +28,9 @@ #include "mhi_controller.h" #include "qaic.h" +#include "qaic_debugfs.h" #include "qaic_timesync.h" +#include "sahara.h" MODULE_IMPORT_NS(DMA_BUF); @@ -229,8 +231,12 @@ static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id) qddev->partition_id = partition_id; ret = drm_dev_register(drm, 0); - if (ret) + if (ret) { pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret); + return ret; + } + + qaic_debugfs_init(qddev); return ret; } @@ -380,6 +386,9 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de if (ret) return NULL; ret = drmm_mutex_init(drm, &qdev->cntl_mutex); + if (ret) + return NULL; + ret = drmm_mutex_init(drm, &qdev->bootlog_mutex); if (ret) return NULL; @@ -399,6 +408,7 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de qddev->qdev = qdev; INIT_LIST_HEAD(&qdev->cntl_xfer_list); + INIT_LIST_HEAD(&qdev->bootlog); INIT_LIST_HEAD(&qddev->users); for (i = 0; i < qdev->num_dbc; ++i) { @@ -635,12 +645,24 @@ static int __init qaic_init(void) goto free_pci; } + ret = sahara_register(); + if (ret) { + pr_debug("qaic: sahara_register failed %d\n", ret); + goto free_mhi; + } + ret = qaic_timesync_init(); if (ret) pr_debug("qaic: qaic_timesync_init failed %d\n", ret); + ret = qaic_bootlog_register(); + if (ret) + pr_debug("qaic: qaic_bootlog_register failed %d\n", ret); + return 0; +free_mhi: + mhi_driver_unregister(&qaic_mhi_driver); free_pci: pci_unregister_driver(&qaic_pci_driver); return ret; @@ -664,7 +686,9 @@ static void __exit qaic_exit(void) * reinitializing the link_up state after the cleanup is done. */ link_up = true; + qaic_bootlog_unregister(); qaic_timesync_deinit(); + sahara_unregister(); mhi_driver_unregister(&qaic_mhi_driver); pci_unregister_driver(&qaic_pci_driver); } diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c new file mode 100644 index 0000000000..bf94bbab6b --- /dev/null +++ b/drivers/accel/qaic/sahara.c @@ -0,0 +1,449 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sahara.h" + +#define SAHARA_HELLO_CMD 0x1 /* Min protocol version 1.0 */ +#define SAHARA_HELLO_RESP_CMD 0x2 /* Min protocol version 1.0 */ +#define SAHARA_READ_DATA_CMD 0x3 /* Min protocol version 1.0 */ +#define SAHARA_END_OF_IMAGE_CMD 0x4 /* Min protocol version 1.0 */ +#define SAHARA_DONE_CMD 0x5 /* Min protocol version 1.0 */ +#define SAHARA_DONE_RESP_CMD 0x6 /* Min protocol version 1.0 */ +#define SAHARA_RESET_CMD 0x7 /* Min protocol version 1.0 */ +#define SAHARA_RESET_RESP_CMD 0x8 /* Min protocol version 1.0 */ +#define SAHARA_MEM_DEBUG_CMD 0x9 /* Min protocol version 2.0 */ +#define SAHARA_MEM_READ_CMD 0xa /* Min protocol version 2.0 */ +#define SAHARA_CMD_READY_CMD 0xb /* Min protocol version 2.1 */ +#define SAHARA_SWITCH_MODE_CMD 0xc /* Min protocol version 2.1 */ +#define SAHARA_EXECUTE_CMD 0xd /* Min protocol version 2.1 */ +#define SAHARA_EXECUTE_RESP_CMD 0xe /* Min protocol version 2.1 */ +#define SAHARA_EXECUTE_DATA_CMD 0xf /* Min protocol version 2.1 */ +#define SAHARA_MEM_DEBUG64_CMD 0x10 /* Min protocol version 2.5 */ +#define SAHARA_MEM_READ64_CMD 0x11 /* Min protocol version 2.5 */ +#define SAHARA_READ_DATA64_CMD 0x12 /* Min protocol version 2.8 */ +#define SAHARA_RESET_STATE_CMD 0x13 /* Min protocol version 2.9 */ +#define SAHARA_WRITE_DATA_CMD 0x14 /* Min protocol version 3.0 */ + +#define SAHARA_PACKET_MAX_SIZE 0xffffU /* MHI_MAX_MTU */ +#define SAHARA_TRANSFER_MAX_SIZE 0x80000 +#define SAHARA_NUM_TX_BUF DIV_ROUND_UP(SAHARA_TRANSFER_MAX_SIZE,\ + SAHARA_PACKET_MAX_SIZE) +#define SAHARA_IMAGE_ID_NONE U32_MAX + +#define SAHARA_VERSION 2 +#define SAHARA_SUCCESS 0 + +#define SAHARA_MODE_IMAGE_TX_PENDING 0x0 +#define SAHARA_MODE_IMAGE_TX_COMPLETE 0x1 +#define SAHARA_MODE_MEMORY_DEBUG 0x2 +#define SAHARA_MODE_COMMAND 0x3 + +#define SAHARA_HELLO_LENGTH 0x30 +#define SAHARA_READ_DATA_LENGTH 0x14 +#define SAHARA_END_OF_IMAGE_LENGTH 0x10 +#define SAHARA_DONE_LENGTH 0x8 +#define SAHARA_RESET_LENGTH 0x8 + +struct sahara_packet { + __le32 cmd; + __le32 length; + + union { + struct { + __le32 version; + __le32 version_compat; + __le32 max_length; + __le32 mode; + } hello; + struct { + __le32 version; + __le32 version_compat; + __le32 status; + __le32 mode; + } hello_resp; + struct { + __le32 image; + __le32 offset; + __le32 length; + } read_data; + struct { + __le32 image; + __le32 status; + } end_of_image; + }; +}; + +struct sahara_context { + struct sahara_packet *tx[SAHARA_NUM_TX_BUF]; + struct sahara_packet *rx; + struct work_struct work; + struct mhi_device *mhi_dev; + const char **image_table; + u32 table_size; + u32 active_image_id; + const struct firmware *firmware; +}; + +static const char *aic100_image_table[] = { + [1] = "qcom/aic100/fw1.bin", + [2] = "qcom/aic100/fw2.bin", + [4] = "qcom/aic100/fw4.bin", + [5] = "qcom/aic100/fw5.bin", + [6] = "qcom/aic100/fw6.bin", + [8] = "qcom/aic100/fw8.bin", + [9] = "qcom/aic100/fw9.bin", + [10] = "qcom/aic100/fw10.bin", +}; + +static int sahara_find_image(struct sahara_context *context, u32 image_id) +{ + int ret; + + if (image_id == context->active_image_id) + return 0; + + if (context->active_image_id != SAHARA_IMAGE_ID_NONE) { + dev_err(&context->mhi_dev->dev, "image id %d is not valid as %d is active\n", + image_id, context->active_image_id); + return -EINVAL; + } + + if (image_id >= context->table_size || !context->image_table[image_id]) { + dev_err(&context->mhi_dev->dev, "request for unknown image: %d\n", image_id); + return -EINVAL; + } + + /* + * This image might be optional. The device may continue without it. + * Only the device knows. Suppress error messages that could suggest an + * a problem when we were actually able to continue. + */ + ret = firmware_request_nowarn(&context->firmware, + context->image_table[image_id], + &context->mhi_dev->dev); + if (ret) { + dev_dbg(&context->mhi_dev->dev, "request for image id %d / file %s failed %d\n", + image_id, context->image_table[image_id], ret); + return ret; + } + + context->active_image_id = image_id; + + return 0; +} + +static void sahara_release_image(struct sahara_context *context) +{ + if (context->active_image_id != SAHARA_IMAGE_ID_NONE) + release_firmware(context->firmware); + context->active_image_id = SAHARA_IMAGE_ID_NONE; +} + +static void sahara_send_reset(struct sahara_context *context) +{ + int ret; + + context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD); + context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH); + + ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0], + SAHARA_RESET_LENGTH, MHI_EOT); + if (ret) + dev_err(&context->mhi_dev->dev, "Unable to send reset response %d\n", ret); +} + +static void sahara_hello(struct sahara_context *context) +{ + int ret; + + dev_dbg(&context->mhi_dev->dev, + "HELLO cmd received. length:%d version:%d version_compat:%d max_length:%d mode:%d\n", + le32_to_cpu(context->rx->length), + le32_to_cpu(context->rx->hello.version), + le32_to_cpu(context->rx->hello.version_compat), + le32_to_cpu(context->rx->hello.max_length), + le32_to_cpu(context->rx->hello.mode)); + + if (le32_to_cpu(context->rx->length) != SAHARA_HELLO_LENGTH) { + dev_err(&context->mhi_dev->dev, "Malformed hello packet - length %d\n", + le32_to_cpu(context->rx->length)); + return; + } + if (le32_to_cpu(context->rx->hello.version) != SAHARA_VERSION) { + dev_err(&context->mhi_dev->dev, "Unsupported hello packet - version %d\n", + le32_to_cpu(context->rx->hello.version)); + return; + } + + if (le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_PENDING && + le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE) { + dev_err(&context->mhi_dev->dev, "Unsupported hello packet - mode %d\n", + le32_to_cpu(context->rx->hello.mode)); + return; + } + + context->tx[0]->cmd = cpu_to_le32(SAHARA_HELLO_RESP_CMD); + context->tx[0]->length = cpu_to_le32(SAHARA_HELLO_LENGTH); + context->tx[0]->hello_resp.version = cpu_to_le32(SAHARA_VERSION); + context->tx[0]->hello_resp.version_compat = cpu_to_le32(SAHARA_VERSION); + context->tx[0]->hello_resp.status = cpu_to_le32(SAHARA_SUCCESS); + context->tx[0]->hello_resp.mode = context->rx->hello_resp.mode; + + ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0], + SAHARA_HELLO_LENGTH, MHI_EOT); + if (ret) + dev_err(&context->mhi_dev->dev, "Unable to send hello response %d\n", ret); +} + +static void sahara_read_data(struct sahara_context *context) +{ + u32 image_id, data_offset, data_len, pkt_data_len; + int ret; + int i; + + dev_dbg(&context->mhi_dev->dev, + "READ_DATA cmd received. length:%d image:%d offset:%d data_length:%d\n", + le32_to_cpu(context->rx->length), + le32_to_cpu(context->rx->read_data.image), + le32_to_cpu(context->rx->read_data.offset), + le32_to_cpu(context->rx->read_data.length)); + + if (le32_to_cpu(context->rx->length) != SAHARA_READ_DATA_LENGTH) { + dev_err(&context->mhi_dev->dev, "Malformed read_data packet - length %d\n", + le32_to_cpu(context->rx->length)); + return; + } + + image_id = le32_to_cpu(context->rx->read_data.image); + data_offset = le32_to_cpu(context->rx->read_data.offset); + data_len = le32_to_cpu(context->rx->read_data.length); + + ret = sahara_find_image(context, image_id); + if (ret) { + sahara_send_reset(context); + return; + } + + /* + * Image is released when the device is done with it via + * SAHARA_END_OF_IMAGE_CMD. sahara_send_reset() will either cause the + * device to retry the operation with a modification, or decide to be + * done with the image and trigger SAHARA_END_OF_IMAGE_CMD. + * release_image() is called from SAHARA_END_OF_IMAGE_CMD. processing + * and is not needed here on error. + */ + + if (data_len > SAHARA_TRANSFER_MAX_SIZE) { + dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data len %d exceeds max xfer size %d\n", + data_len, SAHARA_TRANSFER_MAX_SIZE); + sahara_send_reset(context); + return; + } + + if (data_offset >= context->firmware->size) { + dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d exceeds file size %zu\n", + data_offset, context->firmware->size); + sahara_send_reset(context); + return; + } + + if (size_add(data_offset, data_len) > context->firmware->size) { + dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d and length %d exceeds file size %zu\n", + data_offset, data_len, context->firmware->size); + sahara_send_reset(context); + return; + } + + for (i = 0; i < SAHARA_NUM_TX_BUF && data_len; ++i) { + pkt_data_len = min(data_len, SAHARA_PACKET_MAX_SIZE); + + memcpy(context->tx[i], &context->firmware->data[data_offset], pkt_data_len); + + data_offset += pkt_data_len; + data_len -= pkt_data_len; + + ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, + context->tx[i], pkt_data_len, + !data_len ? MHI_EOT : MHI_CHAIN); + if (ret) { + dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n", + ret); + return; + } + } +} + +static void sahara_end_of_image(struct sahara_context *context) +{ + int ret; + + dev_dbg(&context->mhi_dev->dev, + "END_OF_IMAGE cmd received. length:%d image:%d status:%d\n", + le32_to_cpu(context->rx->length), + le32_to_cpu(context->rx->end_of_image.image), + le32_to_cpu(context->rx->end_of_image.status)); + + if (le32_to_cpu(context->rx->length) != SAHARA_END_OF_IMAGE_LENGTH) { + dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - length %d\n", + le32_to_cpu(context->rx->length)); + return; + } + + if (context->active_image_id != SAHARA_IMAGE_ID_NONE && + le32_to_cpu(context->rx->end_of_image.image) != context->active_image_id) { + dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - image %d is not the active image\n", + le32_to_cpu(context->rx->end_of_image.image)); + return; + } + + sahara_release_image(context); + + if (le32_to_cpu(context->rx->end_of_image.status)) + return; + + context->tx[0]->cmd = cpu_to_le32(SAHARA_DONE_CMD); + context->tx[0]->length = cpu_to_le32(SAHARA_DONE_LENGTH); + + ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0], + SAHARA_DONE_LENGTH, MHI_EOT); + if (ret) + dev_dbg(&context->mhi_dev->dev, "Unable to send done response %d\n", ret); +} + +static void sahara_processing(struct work_struct *work) +{ + struct sahara_context *context = container_of(work, struct sahara_context, work); + int ret; + + switch (le32_to_cpu(context->rx->cmd)) { + case SAHARA_HELLO_CMD: + sahara_hello(context); + break; + case SAHARA_READ_DATA_CMD: + sahara_read_data(context); + break; + case SAHARA_END_OF_IMAGE_CMD: + sahara_end_of_image(context); + break; + case SAHARA_DONE_RESP_CMD: + /* Intentional do nothing as we don't need to exit an app */ + break; + default: + dev_err(&context->mhi_dev->dev, "Unknown command %d\n", + le32_to_cpu(context->rx->cmd)); + break; + } + + ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx, + SAHARA_PACKET_MAX_SIZE, MHI_EOT); + if (ret) + dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret); +} + +static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) +{ + struct sahara_context *context; + int ret; + int i; + + context = devm_kzalloc(&mhi_dev->dev, sizeof(*context), GFP_KERNEL); + if (!context) + return -ENOMEM; + + context->rx = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL); + if (!context->rx) + return -ENOMEM; + + /* + * AIC100 defines SAHARA_TRANSFER_MAX_SIZE as the largest value it + * will request for READ_DATA. This is larger than + * SAHARA_PACKET_MAX_SIZE, and we need 9x SAHARA_PACKET_MAX_SIZE to + * cover SAHARA_TRANSFER_MAX_SIZE. When the remote side issues a + * READ_DATA, it requires a transfer of the exact size requested. We + * can use MHI_CHAIN to link multiple buffers into a single transfer + * but the remote side will not consume the buffers until it sees an + * EOT, thus we need to allocate enough buffers to put in the tx fifo + * to cover an entire READ_DATA request of the max size. + */ + for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) { + context->tx[i] = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL); + if (!context->tx[i]) + return -ENOMEM; + } + + context->mhi_dev = mhi_dev; + INIT_WORK(&context->work, sahara_processing); + context->image_table = aic100_image_table; + context->table_size = ARRAY_SIZE(aic100_image_table); + context->active_image_id = SAHARA_IMAGE_ID_NONE; + dev_set_drvdata(&mhi_dev->dev, context); + + ret = mhi_prepare_for_transfer(mhi_dev); + if (ret) + return ret; + + ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, context->rx, SAHARA_PACKET_MAX_SIZE, MHI_EOT); + if (ret) { + mhi_unprepare_from_transfer(mhi_dev); + return ret; + } + + return 0; +} + +static void sahara_mhi_remove(struct mhi_device *mhi_dev) +{ + struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev); + + cancel_work_sync(&context->work); + sahara_release_image(context); + mhi_unprepare_from_transfer(mhi_dev); +} + +static void sahara_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) +{ +} + +static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) +{ + struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev); + + if (!mhi_result->transaction_status) + schedule_work(&context->work); +} + +static const struct mhi_device_id sahara_mhi_match_table[] = { + { .chan = "QAIC_SAHARA", }, + {}, +}; + +static struct mhi_driver sahara_mhi_driver = { + .id_table = sahara_mhi_match_table, + .remove = sahara_mhi_remove, + .probe = sahara_mhi_probe, + .ul_xfer_cb = sahara_mhi_ul_xfer_cb, + .dl_xfer_cb = sahara_mhi_dl_xfer_cb, + .driver = { + .name = "sahara", + }, +}; + +int sahara_register(void) +{ + return mhi_driver_register(&sahara_mhi_driver); +} + +void sahara_unregister(void) +{ + mhi_driver_unregister(&sahara_mhi_driver); +} diff --git a/drivers/accel/qaic/sahara.h b/drivers/accel/qaic/sahara.h new file mode 100644 index 0000000000..640208acc0 --- /dev/null +++ b/drivers/accel/qaic/sahara.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ + +#ifndef __SAHARA_H__ +#define __SAHARA_H__ + +int sahara_register(void); +void sahara_unregister(void); +#endif /* __SAHARA_H__ */ diff --git a/drivers/accessibility/speakup/devsynth.c b/drivers/accessibility/speakup/devsynth.c index cb7e1114e8..e3d909bd04 100644 --- a/drivers/accessibility/speakup/devsynth.c +++ b/drivers/accessibility/speakup/devsynth.c @@ -39,13 +39,13 @@ static ssize_t speakup_file_write(struct file *fp, const char __user *buffer, static ssize_t speakup_file_writeu(struct file *fp, const char __user *buffer, size_t nbytes, loff_t *ppos) { - size_t count = nbytes, want; + size_t count = nbytes, consumed, want; const char __user *ptr = buffer; size_t bytes; unsigned long flags; unsigned char buf[256]; u16 ubuf[256]; - size_t in, in2, out; + size_t in, out; if (!synth) return -ENODEV; @@ -58,57 +58,24 @@ static ssize_t speakup_file_writeu(struct file *fp, const char __user *buffer, return -EFAULT; /* Convert to u16 */ - for (in = 0, out = 0; in < bytes; in++) { - unsigned char c = buf[in]; - int nbytes = 8 - fls(c ^ 0xff); - u32 value; - - switch (nbytes) { - case 8: /* 0xff */ - case 7: /* 0xfe */ - case 1: /* 0x80 */ - /* Invalid, drop */ - goto drop; - - case 0: - /* ASCII, copy */ - ubuf[out++] = c; - continue; + for (in = 0, out = 0; in < bytes; in += consumed) { + s32 value; - default: - /* 2..6-byte UTF-8 */ + value = synth_utf8_get(buf + in, bytes - in, &consumed, &want); + if (value == -1) { + /* Invalid or incomplete */ - if (bytes - in < nbytes) { + if (want > bytes - in) /* We don't have it all yet, stop here * and wait for the rest */ bytes = in; - want = nbytes; - continue; - } - - /* First byte */ - value = c & ((1u << (7 - nbytes)) - 1); - - /* Other bytes */ - for (in2 = 2; in2 <= nbytes; in2++) { - c = buf[in + 1]; - if ((c & 0xc0) != 0x80) { - /* Invalid, drop the head */ - want = 1; - goto drop; - } - value = (value << 6) | (c & 0x3f); - in++; - } - - if (value < 0x10000) - ubuf[out++] = value; - want = 1; - break; + + continue; } -drop: - /* empty statement */; + + if (value < 0x10000) + ubuf[out++] = value; } count -= bytes; diff --git a/drivers/accessibility/speakup/speakup.h b/drivers/accessibility/speakup/speakup.h index 364fde9974..54f1226ea0 100644 --- a/drivers/accessibility/speakup/speakup.h +++ b/drivers/accessibility/speakup/speakup.h @@ -76,7 +76,9 @@ int speakup_paste_selection(struct tty_struct *tty); void speakup_cancel_paste(void); void speakup_register_devsynth(void); void speakup_unregister_devsynth(void); +s32 synth_utf8_get(const char *buf, size_t count, size_t *consumed, size_t *want); void synth_write(const char *buf, size_t count); +void synth_writeu(const char *buf, size_t count); int synth_supports_indexing(void); extern struct vc_data *spk_sel_cons; diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c index 45f9061031..85062e605d 100644 --- a/drivers/accessibility/speakup/synth.c +++ b/drivers/accessibility/speakup/synth.c @@ -217,10 +217,95 @@ void synth_write(const char *_buf, size_t count) synth_start(); } +/* Consume one utf-8 character from buf (that contains up to count bytes), + * returns the unicode codepoint if valid, -1 otherwise. + * In all cases, returns the number of consumed bytes in *consumed, + * and the minimum number of bytes that would be needed for the next character + * in *want. + */ +s32 synth_utf8_get(const char *buf, size_t count, size_t *consumed, size_t *want) +{ + unsigned char c = buf[0]; + int nbytes = 8 - fls(c ^ 0xff); + u32 value; + size_t i; + + switch (nbytes) { + case 8: /* 0xff */ + case 7: /* 0xfe */ + case 1: /* 0x80 */ + /* Invalid, drop */ + *consumed = 1; + *want = 1; + return -1; + + case 0: + /* ASCII, take as such */ + *consumed = 1; + *want = 1; + return c; + + default: + /* 2..6-byte UTF-8 */ + + if (count < nbytes) { + /* We don't have it all */ + *consumed = 0; + *want = nbytes; + return -1; + } + + /* First byte */ + value = c & ((1u << (7 - nbytes)) - 1); + + /* Other bytes */ + for (i = 1; i < nbytes; i++) { + c = buf[i]; + if ((c & 0xc0) != 0x80) { + /* Invalid, drop the head */ + *consumed = i; + *want = 1; + return -1; + } + value = (value << 6) | (c & 0x3f); + } + + *consumed = nbytes; + *want = 1; + return value; + } +} + +void synth_writeu(const char *buf, size_t count) +{ + size_t i, consumed, want; + + /* Convert to u16 */ + for (i = 0; i < count; i++) { + s32 value; + + value = synth_utf8_get(buf + i, count - i, &consumed, &want); + if (value == -1) { + /* Invalid or incomplete */ + + if (want > count - i) + /* We don't have it all, stop */ + count = i; + + continue; + } + + if (value < 0x10000) + synth_buffer_add(value); + } + + synth_start(); +} + void synth_printf(const char *fmt, ...) { va_list args; - unsigned char buf[160], *p; + unsigned char buf[160]; int r; va_start(args, fmt); @@ -229,10 +314,7 @@ void synth_printf(const char *fmt, ...) if (r > sizeof(buf) - 1) r = sizeof(buf) - 1; - p = buf; - while (r--) - synth_buffer_add(*p++); - synth_start(); + synth_writeu(buf, r); } EXPORT_SYMBOL_GPL(synth_printf); diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index ff1689bb31..e3a7c2aedd 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -469,6 +469,9 @@ config ACPI_REDUCED_HARDWARE_ONLY If you are unsure what to do, do not enable this option. +config ACPI_NHLT + bool + source "drivers/acpi/nfit/Kconfig" source "drivers/acpi/numa/Kconfig" source "drivers/acpi/apei/Kconfig" diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 8cc8c0d9c8..39ea5cfa83 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -14,7 +14,6 @@ tables.o: $(src)/../../include/$(CONFIG_ACPI_CUSTOM_DSDT_FILE) ; endif obj-$(CONFIG_ACPI) += tables.o -obj-$(CONFIG_X86) += blacklist.o # # ACPI Core Subsystem (Interpreter) @@ -46,7 +45,6 @@ acpi-y += ec.o acpi-$(CONFIG_ACPI_DOCK) += dock.o acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o -acpi-$(CONFIG_PCI) += acpi_lpss.o acpi-y += acpi_apd.o acpi-y += acpi_platform.o acpi-y += acpi_pnp.o @@ -55,10 +53,6 @@ acpi-y += event.o acpi-y += evged.o acpi-y += sysfs.o acpi-y += property.o -acpi-$(CONFIG_X86) += acpi_cmos_rtc.o -acpi-$(CONFIG_X86) += x86/apple.o -acpi-$(CONFIG_X86) += x86/utils.o -acpi-$(CONFIG_X86) += x86/s2idle.o acpi-$(CONFIG_DEBUG_FS) += debugfs.o acpi-y += acpi_lpat.o acpi-$(CONFIG_ACPI_FPDT) += acpi_fpdt.o @@ -93,6 +87,7 @@ obj-$(CONFIG_ACPI_THERMAL_LIB) += thermal_lib.o obj-$(CONFIG_ACPI_THERMAL) += thermal.o obj-$(CONFIG_ACPI_PLATFORM_PROFILE) += platform_profile.o obj-$(CONFIG_ACPI_NFIT) += nfit/ +obj-$(CONFIG_ACPI_NHLT) += nhlt.o obj-$(CONFIG_ACPI_NUMA) += numa/ obj-$(CONFIG_ACPI) += acpi_memhotplug.o obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o @@ -132,3 +127,4 @@ obj-$(CONFIG_ARM64) += arm64/ obj-$(CONFIG_ACPI_VIOT) += viot.o obj-$(CONFIG_RISCV) += riscv/ +obj-$(CONFIG_X86) += x86/ diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c deleted file mode 100644 index 9b55d1593d..0000000000 --- a/drivers/acpi/acpi_cmos_rtc.c +++ /dev/null @@ -1,98 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * ACPI support for CMOS RTC Address Space access - * - * Copyright (C) 2013, Intel Corporation - * Authors: Lan Tianyu - */ - -#define pr_fmt(fmt) "ACPI: " fmt - -#include -#include -#include -#include -#include -#include - -#include "internal.h" - -static const struct acpi_device_id acpi_cmos_rtc_ids[] = { - { "PNP0B00" }, - { "PNP0B01" }, - { "PNP0B02" }, - {} -}; - -static acpi_status -acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address, - u32 bits, u64 *value64, - void *handler_context, void *region_context) -{ - int i; - u8 *value = (u8 *)value64; - - if (address > 0xff || !value64) - return AE_BAD_PARAMETER; - - if (function != ACPI_WRITE && function != ACPI_READ) - return AE_BAD_PARAMETER; - - spin_lock_irq(&rtc_lock); - - for (i = 0; i < DIV_ROUND_UP(bits, 8); ++i, ++address, ++value) - if (function == ACPI_READ) - *value = CMOS_READ(address); - else - CMOS_WRITE(*value, address); - - spin_unlock_irq(&rtc_lock); - - return AE_OK; -} - -int acpi_install_cmos_rtc_space_handler(acpi_handle handle) -{ - acpi_status status; - - status = acpi_install_address_space_handler(handle, - ACPI_ADR_SPACE_CMOS, - &acpi_cmos_rtc_space_handler, - NULL, NULL); - if (ACPI_FAILURE(status)) { - pr_err("Error installing CMOS-RTC region handler\n"); - return -ENODEV; - } - - return 1; -} -EXPORT_SYMBOL_GPL(acpi_install_cmos_rtc_space_handler); - -void acpi_remove_cmos_rtc_space_handler(acpi_handle handle) -{ - if (ACPI_FAILURE(acpi_remove_address_space_handler(handle, - ACPI_ADR_SPACE_CMOS, &acpi_cmos_rtc_space_handler))) - pr_err("Error removing CMOS-RTC region handler\n"); -} -EXPORT_SYMBOL_GPL(acpi_remove_cmos_rtc_space_handler); - -static int acpi_cmos_rtc_attach_handler(struct acpi_device *adev, const struct acpi_device_id *id) -{ - return acpi_install_cmos_rtc_space_handler(adev->handle); -} - -static void acpi_cmos_rtc_detach_handler(struct acpi_device *adev) -{ - acpi_remove_cmos_rtc_space_handler(adev->handle); -} - -static struct acpi_scan_handler cmos_rtc_handler = { - .ids = acpi_cmos_rtc_ids, - .attach = acpi_cmos_rtc_attach_handler, - .detach = acpi_cmos_rtc_detach_handler, -}; - -void __init acpi_cmos_rtc_init(void) -{ - acpi_scan_add_handler(&cmos_rtc_handler); -} diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c index 0555f68c2d..5fba4dab5d 100644 --- a/drivers/acpi/acpi_ipmi.c +++ b/drivers/acpi/acpi_ipmi.c @@ -22,6 +22,8 @@ MODULE_LICENSE("GPL"); /* the IPMI timeout is 5s */ #define IPMI_TIMEOUT (5000) #define ACPI_IPMI_MAX_MSG_LENGTH 64 +/* 2s should be suffient for SMI being selected */ +#define ACPI_IPMI_SMI_SELECTION_TIMEOUT (2 * HZ) struct acpi_ipmi_device { /* the device list attached to driver_data.ipmi_devices */ @@ -54,6 +56,7 @@ struct ipmi_driver_data { * to this selected global IPMI system interface. */ struct acpi_ipmi_device *selected_smi; + struct completion smi_selection_done; }; struct acpi_ipmi_msg { @@ -463,8 +466,10 @@ static void ipmi_register_bmc(int iface, struct device *dev) if (temp->handle == handle) goto err_lock; } - if (!driver_data.selected_smi) + if (!driver_data.selected_smi) { driver_data.selected_smi = ipmi_device; + complete(&driver_data.smi_selection_done); + } list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices); mutex_unlock(&driver_data.ipmi_lock); @@ -578,6 +583,20 @@ out_msg: return status; } +int acpi_wait_for_acpi_ipmi(void) +{ + long ret; + + ret = wait_for_completion_interruptible_timeout(&driver_data.smi_selection_done, + ACPI_IPMI_SMI_SELECTION_TIMEOUT); + + if (ret <= 0) + return -ETIMEDOUT; + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_wait_for_acpi_ipmi); + static int __init acpi_ipmi_init(void) { int result; @@ -586,6 +605,8 @@ static int __init acpi_ipmi_init(void) if (acpi_disabled) return 0; + init_completion(&driver_data.smi_selection_done); + status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT, ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler, diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c deleted file mode 100644 index 8e01792228..0000000000 --- a/drivers/acpi/acpi_lpss.c +++ /dev/null @@ -1,1357 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * ACPI support for Intel Lynxpoint LPSS. - * - * Copyright (C) 2013, Intel Corporation - * Authors: Mika Westerberg - * Rafael J. Wysocki - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "internal.h" - -#ifdef CONFIG_X86_INTEL_LPSS - -#include -#include -#include - -#define LPSS_ADDR(desc) ((unsigned long)&desc) - -#define LPSS_CLK_SIZE 0x04 -#define LPSS_LTR_SIZE 0x18 - -/* Offsets relative to LPSS_PRIVATE_OFFSET */ -#define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16)) -#define LPSS_RESETS 0x04 -#define LPSS_RESETS_RESET_FUNC BIT(0) -#define LPSS_RESETS_RESET_APB BIT(1) -#define LPSS_GENERAL 0x08 -#define LPSS_GENERAL_LTR_MODE_SW BIT(2) -#define LPSS_GENERAL_UART_RTS_OVRD BIT(3) -#define LPSS_SW_LTR 0x10 -#define LPSS_AUTO_LTR 0x14 -#define LPSS_LTR_SNOOP_REQ BIT(15) -#define LPSS_LTR_SNOOP_MASK 0x0000FFFF -#define LPSS_LTR_SNOOP_LAT_1US 0x800 -#define LPSS_LTR_SNOOP_LAT_32US 0xC00 -#define LPSS_LTR_SNOOP_LAT_SHIFT 5 -#define LPSS_LTR_SNOOP_LAT_CUTOFF 3000 -#define LPSS_LTR_MAX_VAL 0x3FF -#define LPSS_TX_INT 0x20 -#define LPSS_TX_INT_MASK BIT(1) - -#define LPSS_PRV_REG_COUNT 9 - -/* LPSS Flags */ -#define LPSS_CLK BIT(0) -#define LPSS_CLK_GATE BIT(1) -#define LPSS_CLK_DIVIDER BIT(2) -#define LPSS_LTR BIT(3) -#define LPSS_SAVE_CTX BIT(4) -/* - * For some devices the DSDT AML code for another device turns off the device - * before our suspend handler runs, causing us to read/save all 1-s (0xffffffff) - * as ctx register values. - * Luckily these devices always use the same ctx register values, so we can - * work around this by saving the ctx registers once on activation. - */ -#define LPSS_SAVE_CTX_ONCE BIT(5) -#define LPSS_NO_D3_DELAY BIT(6) - -struct lpss_private_data; - -struct lpss_device_desc { - unsigned int flags; - const char *clk_con_id; - unsigned int prv_offset; - size_t prv_size_override; - const struct property_entry *properties; - void (*setup)(struct lpss_private_data *pdata); - bool resume_from_noirq; -}; - -static const struct lpss_device_desc lpss_dma_desc = { - .flags = LPSS_CLK, -}; - -struct lpss_private_data { - struct acpi_device *adev; - void __iomem *mmio_base; - resource_size_t mmio_size; - unsigned int fixed_clk_rate; - struct clk *clk; - const struct lpss_device_desc *dev_desc; - u32 prv_reg_ctx[LPSS_PRV_REG_COUNT]; -}; - -/* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */ -static u32 pmc_atom_d3_mask = 0xfe000ffe; - -/* LPSS run time quirks */ -static unsigned int lpss_quirks; - -/* - * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device. - * - * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover - * it can be powered off automatically whenever the last LPSS device goes down. - * In case of no power any access to the DMA controller will hang the system. - * The behaviour is reproduced on some HP laptops based on Intel BayTrail as - * well as on ASuS T100TA transformer. - * - * This quirk overrides power state of entire LPSS island to keep DMA powered - * on whenever we have at least one other device in use. - */ -#define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0) - -/* UART Component Parameter Register */ -#define LPSS_UART_CPR 0xF4 -#define LPSS_UART_CPR_AFCE BIT(4) - -static void lpss_uart_setup(struct lpss_private_data *pdata) -{ - unsigned int offset; - u32 val; - - offset = pdata->dev_desc->prv_offset + LPSS_TX_INT; - val = readl(pdata->mmio_base + offset); - writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset); - - val = readl(pdata->mmio_base + LPSS_UART_CPR); - if (!(val & LPSS_UART_CPR_AFCE)) { - offset = pdata->dev_desc->prv_offset + LPSS_GENERAL; - val = readl(pdata->mmio_base + offset); - val |= LPSS_GENERAL_UART_RTS_OVRD; - writel(val, pdata->mmio_base + offset); - } -} - -static void lpss_deassert_reset(struct lpss_private_data *pdata) -{ - unsigned int offset; - u32 val; - - offset = pdata->dev_desc->prv_offset + LPSS_RESETS; - val = readl(pdata->mmio_base + offset); - val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC; - writel(val, pdata->mmio_base + offset); -} - -/* - * BYT PWM used for backlight control by the i915 driver on systems without - * the Crystal Cove PMIC. - */ -static struct pwm_lookup byt_pwm_lookup[] = { - PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0", - "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL, - "pwm-lpss-platform"), -}; - -static void byt_pwm_setup(struct lpss_private_data *pdata) -{ - /* Only call pwm_add_table for the first PWM controller */ - if (acpi_dev_uid_match(pdata->adev, 1)) - pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); -} - -#define LPSS_I2C_ENABLE 0x6c - -static void byt_i2c_setup(struct lpss_private_data *pdata) -{ - acpi_handle handle = pdata->adev->handle; - unsigned long long shared_host = 0; - acpi_status status; - u64 uid; - - /* Expected to always be successfull, but better safe then sorry */ - if (!acpi_dev_uid_to_integer(pdata->adev, &uid) && uid) { - /* Detect I2C bus shared with PUNIT and ignore its d3 status */ - status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); - if (ACPI_SUCCESS(status) && shared_host) - pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1)); - } - - lpss_deassert_reset(pdata); - - if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset)) - pdata->fixed_clk_rate = 133000000; - - writel(0, pdata->mmio_base + LPSS_I2C_ENABLE); -} - -/* - * BSW PWM1 is used for backlight control by the i915 driver - * BSW PWM2 is used for backlight control for fixed (etched into the glass) - * touch controls on some models. These touch-controls have specialized - * drivers which know they need the "pwm_soc_lpss_2" con-id. - */ -static struct pwm_lookup bsw_pwm_lookup[] = { - PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0", - "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL, - "pwm-lpss-platform"), - PWM_LOOKUP_WITH_MODULE("80862289:00", 0, NULL, - "pwm_soc_lpss_2", 0, PWM_POLARITY_NORMAL, - "pwm-lpss-platform"), -}; - -static void bsw_pwm_setup(struct lpss_private_data *pdata) -{ - /* Only call pwm_add_table for the first PWM controller */ - if (acpi_dev_uid_match(pdata->adev, 1)) - pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); -} - -static const struct property_entry lpt_spi_properties[] = { - PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_LPT_SSP), - { } -}; - -static const struct lpss_device_desc lpt_spi_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR - | LPSS_SAVE_CTX, - .prv_offset = 0x800, - .properties = lpt_spi_properties, -}; - -static const struct lpss_device_desc lpt_i2c_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX, - .prv_offset = 0x800, -}; - -static struct property_entry uart_properties[] = { - PROPERTY_ENTRY_U32("reg-io-width", 4), - PROPERTY_ENTRY_U32("reg-shift", 2), - PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"), - { }, -}; - -static const struct lpss_device_desc lpt_uart_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR - | LPSS_SAVE_CTX, - .clk_con_id = "baudclk", - .prv_offset = 0x800, - .setup = lpss_uart_setup, - .properties = uart_properties, -}; - -static const struct lpss_device_desc lpt_sdio_dev_desc = { - .flags = LPSS_LTR, - .prv_offset = 0x1000, - .prv_size_override = 0x1018, -}; - -static const struct lpss_device_desc byt_pwm_dev_desc = { - .flags = LPSS_SAVE_CTX, - .prv_offset = 0x800, - .setup = byt_pwm_setup, -}; - -static const struct lpss_device_desc bsw_pwm_dev_desc = { - .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY, - .prv_offset = 0x800, - .setup = bsw_pwm_setup, - .resume_from_noirq = true, -}; - -static const struct lpss_device_desc bsw_pwm2_dev_desc = { - .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY, - .prv_offset = 0x800, - .resume_from_noirq = true, -}; - -static const struct lpss_device_desc byt_uart_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, - .clk_con_id = "baudclk", - .prv_offset = 0x800, - .setup = lpss_uart_setup, - .properties = uart_properties, -}; - -static const struct lpss_device_desc bsw_uart_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX - | LPSS_NO_D3_DELAY, - .clk_con_id = "baudclk", - .prv_offset = 0x800, - .setup = lpss_uart_setup, - .properties = uart_properties, -}; - -static const struct property_entry byt_spi_properties[] = { - PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BYT_SSP), - { } -}; - -static const struct lpss_device_desc byt_spi_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, - .prv_offset = 0x400, - .properties = byt_spi_properties, -}; - -static const struct lpss_device_desc byt_sdio_dev_desc = { - .flags = LPSS_CLK, -}; - -static const struct lpss_device_desc byt_i2c_dev_desc = { - .flags = LPSS_CLK | LPSS_SAVE_CTX, - .prv_offset = 0x800, - .setup = byt_i2c_setup, - .resume_from_noirq = true, -}; - -static const struct lpss_device_desc bsw_i2c_dev_desc = { - .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, - .prv_offset = 0x800, - .setup = byt_i2c_setup, - .resume_from_noirq = true, -}; - -static const struct property_entry bsw_spi_properties[] = { - PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP), - PROPERTY_ENTRY_U32("num-cs", 2), - { } -}; - -static const struct lpss_device_desc bsw_spi_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX - | LPSS_NO_D3_DELAY, - .prv_offset = 0x400, - .setup = lpss_deassert_reset, - .properties = bsw_spi_properties, -}; - -static const struct x86_cpu_id lpss_cpu_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL), - {} -}; - -#else - -#define LPSS_ADDR(desc) (0UL) - -#endif /* CONFIG_X86_INTEL_LPSS */ - -static const struct acpi_device_id acpi_lpss_device_ids[] = { - /* Generic LPSS devices */ - { "INTL9C60", LPSS_ADDR(lpss_dma_desc) }, - - /* Lynxpoint LPSS devices */ - { "INT33C0", LPSS_ADDR(lpt_spi_dev_desc) }, - { "INT33C1", LPSS_ADDR(lpt_spi_dev_desc) }, - { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) }, - { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) }, - { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) }, - { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) }, - { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) }, - - /* BayTrail LPSS devices */ - { "80860F09", LPSS_ADDR(byt_pwm_dev_desc) }, - { "80860F0A", LPSS_ADDR(byt_uart_dev_desc) }, - { "80860F0E", LPSS_ADDR(byt_spi_dev_desc) }, - { "80860F14", LPSS_ADDR(byt_sdio_dev_desc) }, - { "80860F41", LPSS_ADDR(byt_i2c_dev_desc) }, - - /* Braswell LPSS devices */ - { "80862286", LPSS_ADDR(lpss_dma_desc) }, - { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) }, - { "80862289", LPSS_ADDR(bsw_pwm2_dev_desc) }, - { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) }, - { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) }, - { "808622C0", LPSS_ADDR(lpss_dma_desc) }, - { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) }, - - /* Broadwell LPSS devices */ - { "INT3430", LPSS_ADDR(lpt_spi_dev_desc) }, - { "INT3431", LPSS_ADDR(lpt_spi_dev_desc) }, - { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) }, - { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) }, - { "INT3434", LPSS_ADDR(lpt_uart_dev_desc) }, - { "INT3435", LPSS_ADDR(lpt_uart_dev_desc) }, - { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) }, - - /* Wildcat Point LPSS devices */ - { "INT3438", LPSS_ADDR(lpt_spi_dev_desc) }, - - { } -}; - -#ifdef CONFIG_X86_INTEL_LPSS - -/* LPSS main clock device. */ -static struct platform_device *lpss_clk_dev; - -static inline void lpt_register_clock_device(void) -{ - lpss_clk_dev = platform_device_register_simple("clk-lpss-atom", - PLATFORM_DEVID_NONE, - NULL, 0); -} - -static int register_device_clock(struct acpi_device *adev, - struct lpss_private_data *pdata) -{ - const struct lpss_device_desc *dev_desc = pdata->dev_desc; - const char *devname = dev_name(&adev->dev); - struct clk *clk; - struct lpss_clk_data *clk_data; - const char *parent, *clk_name; - void __iomem *prv_base; - - if (!lpss_clk_dev) - lpt_register_clock_device(); - - if (IS_ERR(lpss_clk_dev)) - return PTR_ERR(lpss_clk_dev); - - clk_data = platform_get_drvdata(lpss_clk_dev); - if (!clk_data) - return -ENODEV; - clk = clk_data->clk; - - if (!pdata->mmio_base - || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE) - return -ENODATA; - - parent = clk_data->name; - prv_base = pdata->mmio_base + dev_desc->prv_offset; - - if (pdata->fixed_clk_rate) { - clk = clk_register_fixed_rate(NULL, devname, parent, 0, - pdata->fixed_clk_rate); - goto out; - } - - if (dev_desc->flags & LPSS_CLK_GATE) { - clk = clk_register_gate(NULL, devname, parent, 0, - prv_base, 0, 0, NULL); - parent = devname; - } - - if (dev_desc->flags & LPSS_CLK_DIVIDER) { - /* Prevent division by zero */ - if (!readl(prv_base)) - writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base); - - clk_name = kasprintf(GFP_KERNEL, "%s-div", devname); - if (!clk_name) - return -ENOMEM; - clk = clk_register_fractional_divider(NULL, clk_name, parent, - 0, prv_base, 1, 15, 16, 15, - CLK_FRAC_DIVIDER_POWER_OF_TWO_PS, - NULL); - parent = clk_name; - - clk_name = kasprintf(GFP_KERNEL, "%s-update", devname); - if (!clk_name) { - kfree(parent); - return -ENOMEM; - } - clk = clk_register_gate(NULL, clk_name, parent, - CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, - prv_base, 31, 0, NULL); - kfree(parent); - kfree(clk_name); - } -out: - if (IS_ERR(clk)) - return PTR_ERR(clk); - - pdata->clk = clk; - clk_register_clkdev(clk, dev_desc->clk_con_id, devname); - return 0; -} - -struct lpss_device_links { - const char *supplier_hid; - const char *supplier_uid; - const char *consumer_hid; - const char *consumer_uid; - u32 flags; - const struct dmi_system_id *dep_missing_ids; -}; - -/* Please keep this list sorted alphabetically by vendor and model */ -static const struct dmi_system_id i2c1_dep_missing_dmi_ids[] = { - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"), - }, - }, - {} -}; - -/* - * The _DEP method is used to identify dependencies but instead of creating - * device links for every handle in _DEP, only links in the following list are - * created. That is necessary because, in the general case, _DEP can refer to - * devices that might not have drivers, or that are on different buses, or where - * the supplier is not enumerated until after the consumer is probed. - */ -static const struct lpss_device_links lpss_device_links[] = { - /* CHT External sdcard slot controller depends on PMIC I2C ctrl */ - {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME}, - /* CHT iGPU depends on PMIC I2C controller */ - {"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME}, - /* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */ - {"80860F41", "1", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME, - i2c1_dep_missing_dmi_ids}, - /* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */ - {"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME}, - /* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */ - {"80860F41", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME}, -}; - -static bool acpi_lpss_is_supplier(struct acpi_device *adev, - const struct lpss_device_links *link) -{ - return acpi_dev_hid_uid_match(adev, link->supplier_hid, link->supplier_uid); -} - -static bool acpi_lpss_is_consumer(struct acpi_device *adev, - const struct lpss_device_links *link) -{ - return acpi_dev_hid_uid_match(adev, link->consumer_hid, link->consumer_uid); -} - -struct hid_uid { - const char *hid; - const char *uid; -}; - -static int match_hid_uid(struct device *dev, const void *data) -{ - struct acpi_device *adev = ACPI_COMPANION(dev); - const struct hid_uid *id = data; - - if (!adev) - return 0; - - return acpi_dev_hid_uid_match(adev, id->hid, id->uid); -} - -static struct device *acpi_lpss_find_device(const char *hid, const char *uid) -{ - struct device *dev; - - struct hid_uid data = { - .hid = hid, - .uid = uid, - }; - - dev = bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid); - if (dev) - return dev; - - return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid); -} - -static void acpi_lpss_link_consumer(struct device *dev1, - const struct lpss_device_links *link) -{ - struct device *dev2; - - dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid); - if (!dev2) - return; - - if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids)) - || acpi_device_dep(ACPI_HANDLE(dev2), ACPI_HANDLE(dev1))) - device_link_add(dev2, dev1, link->flags); - - put_device(dev2); -} - -static void acpi_lpss_link_supplier(struct device *dev1, - const struct lpss_device_links *link) -{ - struct device *dev2; - - dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid); - if (!dev2) - return; - - if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids)) - || acpi_device_dep(ACPI_HANDLE(dev1), ACPI_HANDLE(dev2))) - device_link_add(dev1, dev2, link->flags); - - put_device(dev2); -} - -static void acpi_lpss_create_device_links(struct acpi_device *adev, - struct platform_device *pdev) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) { - const struct lpss_device_links *link = &lpss_device_links[i]; - - if (acpi_lpss_is_supplier(adev, link)) - acpi_lpss_link_consumer(&pdev->dev, link); - - if (acpi_lpss_is_consumer(adev, link)) - acpi_lpss_link_supplier(&pdev->dev, link); - } -} - -static int acpi_lpss_create_device(struct acpi_device *adev, - const struct acpi_device_id *id) -{ - const struct lpss_device_desc *dev_desc; - struct lpss_private_data *pdata; - struct resource_entry *rentry; - struct list_head resource_list; - struct platform_device *pdev; - int ret; - - dev_desc = (const struct lpss_device_desc *)id->driver_data; - if (!dev_desc) - return -EINVAL; - - pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return -ENOMEM; - - INIT_LIST_HEAD(&resource_list); - ret = acpi_dev_get_memory_resources(adev, &resource_list); - if (ret < 0) - goto err_out; - - rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node); - if (rentry) { - if (dev_desc->prv_size_override) - pdata->mmio_size = dev_desc->prv_size_override; - else - pdata->mmio_size = resource_size(rentry->res); - pdata->mmio_base = ioremap(rentry->res->start, pdata->mmio_size); - } - - acpi_dev_free_resource_list(&resource_list); - - if (!pdata->mmio_base) { - /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */ - adev->pnp.type.platform_id = 0; - goto out_free; - } - - pdata->adev = adev; - pdata->dev_desc = dev_desc; - - if (dev_desc->setup) - dev_desc->setup(pdata); - - if (dev_desc->flags & LPSS_CLK) { - ret = register_device_clock(adev, pdata); - if (ret) - goto out_free; - } - - /* - * This works around a known issue in ACPI tables where LPSS devices - * have _PS0 and _PS3 without _PSC (and no power resources), so - * acpi_bus_init_power() will assume that the BIOS has put them into D0. - */ - acpi_device_fix_up_power(adev); - - adev->driver_data = pdata; - pdev = acpi_create_platform_device(adev, dev_desc->properties); - if (IS_ERR_OR_NULL(pdev)) { - adev->driver_data = NULL; - ret = PTR_ERR(pdev); - goto err_out; - } - - acpi_lpss_create_device_links(adev, pdev); - return 1; - -out_free: - /* Skip the device, but continue the namespace scan */ - ret = 0; -err_out: - kfree(pdata); - return ret; -} - -static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg) -{ - return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg); -} - -static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata, - unsigned int reg) -{ - writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg); -} - -static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val) -{ - struct acpi_device *adev = ACPI_COMPANION(dev); - struct lpss_private_data *pdata; - unsigned long flags; - int ret; - - if (WARN_ON(!adev)) - return -ENODEV; - - spin_lock_irqsave(&dev->power.lock, flags); - if (pm_runtime_suspended(dev)) { - ret = -EAGAIN; - goto out; - } - pdata = acpi_driver_data(adev); - if (WARN_ON(!pdata || !pdata->mmio_base)) { - ret = -ENODEV; - goto out; - } - *val = __lpss_reg_read(pdata, reg); - ret = 0; - - out: - spin_unlock_irqrestore(&dev->power.lock, flags); - return ret; -} - -static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - u32 ltr_value = 0; - unsigned int reg; - int ret; - - reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR; - ret = lpss_reg_read(dev, reg, <r_value); - if (ret) - return ret; - - return sysfs_emit(buf, "%08x\n", ltr_value); -} - -static ssize_t lpss_ltr_mode_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - u32 ltr_mode = 0; - char *outstr; - int ret; - - ret = lpss_reg_read(dev, LPSS_GENERAL, <r_mode); - if (ret) - return ret; - - outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto"; - return sprintf(buf, "%s\n", outstr); -} - -static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL); -static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL); -static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL); - -static struct attribute *lpss_attrs[] = { - &dev_attr_auto_ltr.attr, - &dev_attr_sw_ltr.attr, - &dev_attr_ltr_mode.attr, - NULL, -}; - -static const struct attribute_group lpss_attr_group = { - .attrs = lpss_attrs, - .name = "lpss_ltr", -}; - -static void acpi_lpss_set_ltr(struct device *dev, s32 val) -{ - struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - u32 ltr_mode, ltr_val; - - ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL); - if (val < 0) { - if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) { - ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW; - __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL); - } - return; - } - ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK; - if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) { - ltr_val |= LPSS_LTR_SNOOP_LAT_32US; - val = LPSS_LTR_MAX_VAL; - } else if (val > LPSS_LTR_MAX_VAL) { - ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ; - val >>= LPSS_LTR_SNOOP_LAT_SHIFT; - } else { - ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ; - } - ltr_val |= val; - __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR); - if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) { - ltr_mode |= LPSS_GENERAL_LTR_MODE_SW; - __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL); - } -} - -#ifdef CONFIG_PM -/** - * acpi_lpss_save_ctx() - Save the private registers of LPSS device - * @dev: LPSS device - * @pdata: pointer to the private data of the LPSS device - * - * Most LPSS devices have private registers which may loose their context when - * the device is powered down. acpi_lpss_save_ctx() saves those registers into - * prv_reg_ctx array. - */ -static void acpi_lpss_save_ctx(struct device *dev, - struct lpss_private_data *pdata) -{ - unsigned int i; - - for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { - unsigned long offset = i * sizeof(u32); - - pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset); - dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n", - pdata->prv_reg_ctx[i], offset); - } -} - -/** - * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device - * @dev: LPSS device - * @pdata: pointer to the private data of the LPSS device - * - * Restores the registers that were previously stored with acpi_lpss_save_ctx(). - */ -static void acpi_lpss_restore_ctx(struct device *dev, - struct lpss_private_data *pdata) -{ - unsigned int i; - - for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { - unsigned long offset = i * sizeof(u32); - - __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset); - dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n", - pdata->prv_reg_ctx[i], offset); - } -} - -static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata) -{ - /* - * The following delay is needed or the subsequent write operations may - * fail. The LPSS devices are actually PCI devices and the PCI spec - * expects 10ms delay before the device can be accessed after D3 to D0 - * transition. However some platforms like BSW does not need this delay. - */ - unsigned int delay = 10; /* default 10ms delay */ - - if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY) - delay = 0; - - msleep(delay); -} - -static int acpi_lpss_activate(struct device *dev) -{ - struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - int ret; - - ret = acpi_dev_resume(dev); - if (ret) - return ret; - - acpi_lpss_d3_to_d0_delay(pdata); - - /* - * This is called only on ->probe() stage where a device is either in - * known state defined by BIOS or most likely powered off. Due to this - * we have to deassert reset line to be sure that ->probe() will - * recognize the device. - */ - if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE)) - lpss_deassert_reset(pdata); - -#ifdef CONFIG_PM - if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE) - acpi_lpss_save_ctx(dev, pdata); -#endif - - return 0; -} - -static void acpi_lpss_dismiss(struct device *dev) -{ - acpi_dev_suspend(dev, false); -} - -/* IOSF SB for LPSS island */ -#define LPSS_IOSF_UNIT_LPIOEP 0xA0 -#define LPSS_IOSF_UNIT_LPIO1 0xAB -#define LPSS_IOSF_UNIT_LPIO2 0xAC - -#define LPSS_IOSF_PMCSR 0x84 -#define LPSS_PMCSR_D0 0 -#define LPSS_PMCSR_D3hot 3 -#define LPSS_PMCSR_Dx_MASK GENMASK(1, 0) - -#define LPSS_IOSF_GPIODEF0 0x154 -#define LPSS_GPIODEF0_DMA1_D3 BIT(2) -#define LPSS_GPIODEF0_DMA2_D3 BIT(3) -#define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2) -#define LPSS_GPIODEF0_DMA_LLP BIT(13) - -static DEFINE_MUTEX(lpss_iosf_mutex); -static bool lpss_iosf_d3_entered = true; - -static void lpss_iosf_enter_d3_state(void) -{ - u32 value1 = 0; - u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP; - u32 value2 = LPSS_PMCSR_D3hot; - u32 mask2 = LPSS_PMCSR_Dx_MASK; - /* - * PMC provides an information about actual status of the LPSS devices. - * Here we read the values related to LPSS power island, i.e. LPSS - * devices, excluding both LPSS DMA controllers, along with SCC domain. - */ - u32 func_dis, d3_sts_0, pmc_status; - int ret; - - ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis); - if (ret) - return; - - mutex_lock(&lpss_iosf_mutex); - - ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0); - if (ret) - goto exit; - - /* - * Get the status of entire LPSS power island per device basis. - * Shutdown both LPSS DMA controllers if and only if all other devices - * are already in D3hot. - */ - pmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask; - if (pmc_status) - goto exit; - - iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, - LPSS_IOSF_PMCSR, value2, mask2); - - iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE, - LPSS_IOSF_PMCSR, value2, mask2); - - iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, - LPSS_IOSF_GPIODEF0, value1, mask1); - - lpss_iosf_d3_entered = true; - -exit: - mutex_unlock(&lpss_iosf_mutex); -} - -static void lpss_iosf_exit_d3_state(void) -{ - u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 | - LPSS_GPIODEF0_DMA_LLP; - u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP; - u32 value2 = LPSS_PMCSR_D0; - u32 mask2 = LPSS_PMCSR_Dx_MASK; - - mutex_lock(&lpss_iosf_mutex); - - if (!lpss_iosf_d3_entered) - goto exit; - - lpss_iosf_d3_entered = false; - - iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, - LPSS_IOSF_GPIODEF0, value1, mask1); - - iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE, - LPSS_IOSF_PMCSR, value2, mask2); - - iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, - LPSS_IOSF_PMCSR, value2, mask2); - -exit: - mutex_unlock(&lpss_iosf_mutex); -} - -static int acpi_lpss_suspend(struct device *dev, bool wakeup) -{ - struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - int ret; - - if (pdata->dev_desc->flags & LPSS_SAVE_CTX) - acpi_lpss_save_ctx(dev, pdata); - - ret = acpi_dev_suspend(dev, wakeup); - - /* - * This call must be last in the sequence, otherwise PMC will return - * wrong status for devices being about to be powered off. See - * lpss_iosf_enter_d3_state() for further information. - */ - if (acpi_target_system_state() == ACPI_STATE_S0 && - lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) - lpss_iosf_enter_d3_state(); - - return ret; -} - -static int acpi_lpss_resume(struct device *dev) -{ - struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - int ret; - - /* - * This call is kept first to be in symmetry with - * acpi_lpss_runtime_suspend() one. - */ - if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) - lpss_iosf_exit_d3_state(); - - ret = acpi_dev_resume(dev); - if (ret) - return ret; - - acpi_lpss_d3_to_d0_delay(pdata); - - if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE)) - acpi_lpss_restore_ctx(dev, pdata); - - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static int acpi_lpss_do_suspend_late(struct device *dev) -{ - int ret; - - if (dev_pm_skip_suspend(dev)) - return 0; - - ret = pm_generic_suspend_late(dev); - return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); -} - -static int acpi_lpss_suspend_late(struct device *dev) -{ - struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - - if (pdata->dev_desc->resume_from_noirq) - return 0; - - return acpi_lpss_do_suspend_late(dev); -} - -static int acpi_lpss_suspend_noirq(struct device *dev) -{ - struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - int ret; - - if (pdata->dev_desc->resume_from_noirq) { - /* - * The driver's ->suspend_late callback will be invoked by - * acpi_lpss_do_suspend_late(), with the assumption that the - * driver really wanted to run that code in ->suspend_noirq, but - * it could not run after acpi_dev_suspend() and the driver - * expected the latter to be called in the "late" phase. - */ - ret = acpi_lpss_do_suspend_late(dev); - if (ret) - return ret; - } - - return acpi_subsys_suspend_noirq(dev); -} - -static int acpi_lpss_do_resume_early(struct device *dev) -{ - int ret = acpi_lpss_resume(dev); - - return ret ? ret : pm_generic_resume_early(dev); -} - -static int acpi_lpss_resume_early(struct device *dev) -{ - struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - - if (pdata->dev_desc->resume_from_noirq) - return 0; - - if (dev_pm_skip_resume(dev)) - return 0; - - return acpi_lpss_do_resume_early(dev); -} - -static int acpi_lpss_resume_noirq(struct device *dev) -{ - struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - int ret; - - /* Follow acpi_subsys_resume_noirq(). */ - if (dev_pm_skip_resume(dev)) - return 0; - - ret = pm_generic_resume_noirq(dev); - if (ret) - return ret; - - if (!pdata->dev_desc->resume_from_noirq) - return 0; - - /* - * The driver's ->resume_early callback will be invoked by - * acpi_lpss_do_resume_early(), with the assumption that the driver - * really wanted to run that code in ->resume_noirq, but it could not - * run before acpi_dev_resume() and the driver expected the latter to be - * called in the "early" phase. - */ - return acpi_lpss_do_resume_early(dev); -} - -static int acpi_lpss_do_restore_early(struct device *dev) -{ - int ret = acpi_lpss_resume(dev); - - return ret ? ret : pm_generic_restore_early(dev); -} - -static int acpi_lpss_restore_early(struct device *dev) -{ - struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - - if (pdata->dev_desc->resume_from_noirq) - return 0; - - return acpi_lpss_do_restore_early(dev); -} - -static int acpi_lpss_restore_noirq(struct device *dev) -{ - struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - int ret; - - ret = pm_generic_restore_noirq(dev); - if (ret) - return ret; - - if (!pdata->dev_desc->resume_from_noirq) - return 0; - - /* This is analogous to what happens in acpi_lpss_resume_noirq(). */ - return acpi_lpss_do_restore_early(dev); -} - -static int acpi_lpss_do_poweroff_late(struct device *dev) -{ - int ret = pm_generic_poweroff_late(dev); - - return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); -} - -static int acpi_lpss_poweroff_late(struct device *dev) -{ - struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - - if (dev_pm_skip_suspend(dev)) - return 0; - - if (pdata->dev_desc->resume_from_noirq) - return 0; - - return acpi_lpss_do_poweroff_late(dev); -} - -static int acpi_lpss_poweroff_noirq(struct device *dev) -{ - struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - - if (dev_pm_skip_suspend(dev)) - return 0; - - if (pdata->dev_desc->resume_from_noirq) { - /* This is analogous to the acpi_lpss_suspend_noirq() case. */ - int ret = acpi_lpss_do_poweroff_late(dev); - - if (ret) - return ret; - } - - return pm_generic_poweroff_noirq(dev); -} -#endif /* CONFIG_PM_SLEEP */ - -static int acpi_lpss_runtime_suspend(struct device *dev) -{ - int ret = pm_generic_runtime_suspend(dev); - - return ret ? ret : acpi_lpss_suspend(dev, true); -} - -static int acpi_lpss_runtime_resume(struct device *dev) -{ - int ret = acpi_lpss_resume(dev); - - return ret ? ret : pm_generic_runtime_resume(dev); -} -#endif /* CONFIG_PM */ - -static struct dev_pm_domain acpi_lpss_pm_domain = { -#ifdef CONFIG_PM - .activate = acpi_lpss_activate, - .dismiss = acpi_lpss_dismiss, -#endif - .ops = { -#ifdef CONFIG_PM -#ifdef CONFIG_PM_SLEEP - .prepare = acpi_subsys_prepare, - .complete = acpi_subsys_complete, - .suspend = acpi_subsys_suspend, - .suspend_late = acpi_lpss_suspend_late, - .suspend_noirq = acpi_lpss_suspend_noirq, - .resume_noirq = acpi_lpss_resume_noirq, - .resume_early = acpi_lpss_resume_early, - .freeze = acpi_subsys_freeze, - .poweroff = acpi_subsys_poweroff, - .poweroff_late = acpi_lpss_poweroff_late, - .poweroff_noirq = acpi_lpss_poweroff_noirq, - .restore_noirq = acpi_lpss_restore_noirq, - .restore_early = acpi_lpss_restore_early, -#endif - .runtime_suspend = acpi_lpss_runtime_suspend, - .runtime_resume = acpi_lpss_runtime_resume, -#endif - }, -}; - -static int acpi_lpss_platform_notify(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct platform_device *pdev = to_platform_device(data); - struct lpss_private_data *pdata; - struct acpi_device *adev; - const struct acpi_device_id *id; - - id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev); - if (!id || !id->driver_data) - return 0; - - adev = ACPI_COMPANION(&pdev->dev); - if (!adev) - return 0; - - pdata = acpi_driver_data(adev); - if (!pdata) - return 0; - - if (pdata->mmio_base && - pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) { - dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n"); - return 0; - } - - switch (action) { - case BUS_NOTIFY_BIND_DRIVER: - dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain); - break; - case BUS_NOTIFY_DRIVER_NOT_BOUND: - case BUS_NOTIFY_UNBOUND_DRIVER: - dev_pm_domain_set(&pdev->dev, NULL); - break; - case BUS_NOTIFY_ADD_DEVICE: - dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain); - if (pdata->dev_desc->flags & LPSS_LTR) - return sysfs_create_group(&pdev->dev.kobj, - &lpss_attr_group); - break; - case BUS_NOTIFY_DEL_DEVICE: - if (pdata->dev_desc->flags & LPSS_LTR) - sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); - dev_pm_domain_set(&pdev->dev, NULL); - break; - default: - break; - } - - return 0; -} - -static struct notifier_block acpi_lpss_nb = { - .notifier_call = acpi_lpss_platform_notify, -}; - -static void acpi_lpss_bind(struct device *dev) -{ - struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - - if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR)) - return; - - if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) - dev->power.set_latency_tolerance = acpi_lpss_set_ltr; - else - dev_err(dev, "MMIO size insufficient to access LTR\n"); -} - -static void acpi_lpss_unbind(struct device *dev) -{ - dev->power.set_latency_tolerance = NULL; -} - -static struct acpi_scan_handler lpss_handler = { - .ids = acpi_lpss_device_ids, - .attach = acpi_lpss_create_device, - .bind = acpi_lpss_bind, - .unbind = acpi_lpss_unbind, -}; - -void __init acpi_lpss_init(void) -{ - const struct x86_cpu_id *id; - int ret; - - ret = lpss_atom_clk_init(); - if (ret) - return; - - id = x86_match_cpu(lpss_cpu_ids); - if (id) - lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON; - - bus_register_notifier(&platform_bus_type, &acpi_lpss_nb); - acpi_scan_add_handler(&lpss_handler); -} - -#else - -static struct acpi_scan_handler lpss_handler = { - .ids = acpi_lpss_device_ids, -}; - -void __init acpi_lpss_init(void) -{ - acpi_scan_add_handler(&lpss_handler); -} - -#endif /* CONFIG_X86_INTEL_LPSS */ diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 82563b44af..02012168a0 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -547,7 +547,7 @@ struct acpi_field_info { struct acpi_ged_handler_info { struct acpi_ged_handler_info *next; - u32 int_id; /* The interrupt ID that triggers the execution ofthe evt_method. */ + u32 int_id; /* The interrupt ID that triggers the execution of the evt_method. */ struct acpi_namespace_node *evt_method; /* The _EVT method to be executed when an interrupt with ID = int_ID is received */ }; diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 1bdfeee5d7..8fc02946d3 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -48,7 +48,7 @@ u8 descriptor_type; /* To differentiate various internal objs */\ u8 type; /* acpi_object_type */\ u16 reference_count; /* For object deletion management */\ - u8 flags; + u8 flags /* * Note: There are 3 bytes available here before the * next natural alignment boundary (for both 32/64 cases) @@ -71,10 +71,12 @@ *****************************************************************************/ struct acpi_object_common { -ACPI_OBJECT_COMMON_HEADER}; + ACPI_OBJECT_COMMON_HEADER; +}; struct acpi_object_integer { - ACPI_OBJECT_COMMON_HEADER u8 fill[3]; /* Prevent warning on some compilers */ + ACPI_OBJECT_COMMON_HEADER; + u8 fill[3]; /* Prevent warning on some compilers */ u64 value; }; @@ -86,23 +88,26 @@ struct acpi_object_integer { */ #define ACPI_COMMON_BUFFER_INFO(_type) \ _type *pointer; \ - u32 length; + u32 length /* Null terminated, ASCII characters only */ struct acpi_object_string { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(char) /* String in AML stream or allocated string */ + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_BUFFER_INFO(char); /* String in AML stream or allocated string */ }; struct acpi_object_buffer { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(u8) /* Buffer in AML stream or allocated buffer */ + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_BUFFER_INFO(u8); /* Buffer in AML stream or allocated buffer */ u32 aml_length; u8 *aml_start; struct acpi_namespace_node *node; /* Link back to parent node */ }; struct acpi_object_package { - ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Link back to parent node */ + ACPI_OBJECT_COMMON_HEADER; + struct acpi_namespace_node *node; /* Link back to parent node */ union acpi_operand_object **elements; /* Array of pointers to acpi_objects */ u8 *aml_start; u32 aml_length; @@ -116,11 +121,13 @@ struct acpi_object_package { *****************************************************************************/ struct acpi_object_event { - ACPI_OBJECT_COMMON_HEADER acpi_semaphore os_semaphore; /* Actual OS synchronization object */ + ACPI_OBJECT_COMMON_HEADER; + acpi_semaphore os_semaphore; /* Actual OS synchronization object */ }; struct acpi_object_mutex { - ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */ + ACPI_OBJECT_COMMON_HEADER; + u8 sync_level; /* 0-15, specified in Mutex() call */ u16 acquisition_depth; /* Allow multiple Acquires, same thread */ acpi_mutex os_mutex; /* Actual OS synchronization object */ acpi_thread_id thread_id; /* Current owner of the mutex */ @@ -132,7 +139,8 @@ struct acpi_object_mutex { }; struct acpi_object_region { - ACPI_OBJECT_COMMON_HEADER u8 space_id; + ACPI_OBJECT_COMMON_HEADER; + u8 space_id; struct acpi_namespace_node *node; /* Containing namespace node */ union acpi_operand_object *handler; /* Handler for region access */ union acpi_operand_object *next; @@ -142,7 +150,8 @@ struct acpi_object_region { }; struct acpi_object_method { - ACPI_OBJECT_COMMON_HEADER u8 info_flags; + ACPI_OBJECT_COMMON_HEADER; + u8 info_flags; u8 param_count; u8 sync_level; union acpi_operand_object *mutex; @@ -178,33 +187,43 @@ struct acpi_object_method { */ #define ACPI_COMMON_NOTIFY_INFO \ union acpi_operand_object *notify_list[2]; /* Handlers for system/device notifies */\ - union acpi_operand_object *handler; /* Handler for Address space */ + union acpi_operand_object *handler /* Handler for Address space */ /* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */ struct acpi_object_notify_common { -ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO}; + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_NOTIFY_INFO; +}; struct acpi_object_device { - ACPI_OBJECT_COMMON_HEADER - ACPI_COMMON_NOTIFY_INFO struct acpi_gpe_block_info *gpe_block; + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_NOTIFY_INFO; + struct acpi_gpe_block_info *gpe_block; }; struct acpi_object_power_resource { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO u32 system_level; + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_NOTIFY_INFO; + u32 system_level; u32 resource_order; }; struct acpi_object_processor { - ACPI_OBJECT_COMMON_HEADER - /* The next two fields take advantage of the 3-byte space before NOTIFY_INFO */ + ACPI_OBJECT_COMMON_HEADER; + + /* The next two fields take advantage of the 3-byte space before NOTIFY_INFO */ + u8 proc_id; u8 length; - ACPI_COMMON_NOTIFY_INFO acpi_io_address address; + ACPI_COMMON_NOTIFY_INFO; + acpi_io_address address; }; struct acpi_object_thermal_zone { -ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO}; + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_NOTIFY_INFO; +}; /****************************************************************************** * @@ -226,17 +245,21 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO}; u32 base_byte_offset; /* Byte offset within containing object */\ u32 value; /* Value to store into the Bank or Index register */\ u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\ - u8 access_length; /* For serial regions/fields */ + u8 access_length /* For serial regions/fields */ /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */ struct acpi_object_field_common { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */ + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_FIELD_INFO; + union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */ }; struct acpi_object_region_field { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length; + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_FIELD_INFO; + u16 resource_length; union acpi_operand_object *region_obj; /* Containing op_region object */ u8 *resource_buffer; /* resource_template for serial regions/fields */ u16 pin_number_index; /* Index relative to previous Connection/Template */ @@ -244,16 +267,20 @@ struct acpi_object_region_field { }; struct acpi_object_bank_field { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing op_region object */ + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_FIELD_INFO; + union acpi_operand_object *region_obj; /* Containing op_region object */ union acpi_operand_object *bank_obj; /* bank_select Register object */ }; struct acpi_object_index_field { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO - /* - * No "RegionObj" pointer needed since the Index and Data registers - * are each field definitions unto themselves. - */ + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_FIELD_INFO; + + /* + * No "RegionObj" pointer needed since the Index and Data registers + * are each field definitions unto themselves. + */ union acpi_operand_object *index_obj; /* Index register */ union acpi_operand_object *data_obj; /* Data register */ }; @@ -261,7 +288,9 @@ struct acpi_object_index_field { /* The buffer_field is different in that it is part of a Buffer, not an op_region */ struct acpi_object_buffer_field { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u8 is_create_field; /* Special case for objects created by create_field() */ + ACPI_OBJECT_COMMON_HEADER; + ACPI_COMMON_FIELD_INFO; + u8 is_create_field; /* Special case for objects created by create_field() */ union acpi_operand_object *buffer_obj; /* Containing Buffer object */ }; @@ -272,7 +301,8 @@ struct acpi_object_buffer_field { *****************************************************************************/ struct acpi_object_notify_handler { - ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Parent device */ + ACPI_OBJECT_COMMON_HEADER; + struct acpi_namespace_node *node; /* Parent device */ u32 handler_type; /* Type: Device/System/Both */ acpi_notify_handler handler; /* Handler address */ void *context; @@ -280,7 +310,8 @@ struct acpi_object_notify_handler { }; struct acpi_object_addr_handler { - ACPI_OBJECT_COMMON_HEADER u8 space_id; + ACPI_OBJECT_COMMON_HEADER; + u8 space_id; u8 handler_flags; acpi_adr_space_handler handler; struct acpi_namespace_node *node; /* Parent device */ @@ -307,7 +338,8 @@ struct acpi_object_addr_handler { * The Reference.Class differentiates these types. */ struct acpi_object_reference { - ACPI_OBJECT_COMMON_HEADER u8 class; /* Reference Class */ + ACPI_OBJECT_COMMON_HEADER; + u8 class; /* Reference Class */ u8 target_type; /* Used for Index Op */ u8 resolved; /* Reference has been resolved to a value */ void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */ @@ -340,7 +372,8 @@ typedef enum { * Currently: Region and field_unit types */ struct acpi_object_extra { - ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */ + ACPI_OBJECT_COMMON_HEADER; + struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */ struct acpi_namespace_node *scope_node; void *region_context; /* Region-specific data */ u8 *aml_start; @@ -350,14 +383,16 @@ struct acpi_object_extra { /* Additional data that can be attached to namespace nodes */ struct acpi_object_data { - ACPI_OBJECT_COMMON_HEADER acpi_object_handler handler; + ACPI_OBJECT_COMMON_HEADER; + acpi_object_handler handler; void *pointer; }; /* Structure used when objects are cached for reuse */ struct acpi_object_cache_list { - ACPI_OBJECT_COMMON_HEADER union acpi_operand_object *next; /* Link for object cache and internal lists */ + ACPI_OBJECT_COMMON_HEADER; + union acpi_operand_object *next; /* Link for object cache and internal lists */ }; /****************************************************************************** diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index 0dbc4d8891..38f408cf13 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c @@ -413,6 +413,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle, gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK); gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD); gpe_event_info->dispatch.method_node = method_node; + walk_info->count++; ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Registered GPE method %s as GPE number 0x%.2X\n", diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 44267a92bc..3c126c6d30 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c @@ -315,23 +315,19 @@ void acpi_tb_parse_fadt(void) ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, NULL, FALSE, TRUE, &acpi_gbl_dsdt_index); - /* If Hardware Reduced flag is set, there is no FACS */ - - if (!acpi_gbl_reduced_hardware) { - if (acpi_gbl_FADT.facs) { - acpi_tb_install_standard_table((acpi_physical_address) - acpi_gbl_FADT.facs, - ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, - NULL, FALSE, TRUE, - &acpi_gbl_facs_index); - } - if (acpi_gbl_FADT.Xfacs) { - acpi_tb_install_standard_table((acpi_physical_address) - acpi_gbl_FADT.Xfacs, - ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, - NULL, FALSE, TRUE, - &acpi_gbl_xfacs_index); - } + if (acpi_gbl_FADT.facs) { + acpi_tb_install_standard_table((acpi_physical_address) + acpi_gbl_FADT.facs, + ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, + NULL, FALSE, TRUE, + &acpi_gbl_facs_index); + } + if (acpi_gbl_FADT.Xfacs) { + acpi_tb_install_standard_table((acpi_physical_address) + acpi_gbl_FADT.Xfacs, + ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, + NULL, FALSE, TRUE, + &acpi_gbl_xfacs_index); } } diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index bb4a56e567..15fa68a5ea 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -36,12 +36,7 @@ acpi_status acpi_tb_initialize_facs(void) { struct acpi_table_facs *facs; - /* If Hardware Reduced flag is set, there is no FACS */ - - if (acpi_gbl_reduced_hardware) { - acpi_gbl_FACS = NULL; - return (AE_OK); - } else if (acpi_gbl_FADT.Xfacs && + if (acpi_gbl_FADT.Xfacs && (!acpi_gbl_FADT.facs || !acpi_gbl_use32_bit_facs_addresses)) { (void)acpi_get_table_by_index(acpi_gbl_xfacs_index, diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index c5f6c85a3a..3d71bd9245 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c @@ -62,7 +62,12 @@ void acpi_ut_track_stack_ptr(void) acpi_size current_sp; if (¤t_sp < acpi_gbl_lowest_stack_pointer) { +#pragma GCC diagnostic push +#if defined(__GNUC__) && __GNUC__ >= 12 +#pragma GCC diagnostic ignored "-Wdangling-pointer=" +#endif acpi_gbl_lowest_stack_pointer = ¤t_sp; +#pragma GCC diagnostic pop } if (acpi_gbl_nesting_level > acpi_gbl_deepest_nesting) { diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c index bb9f8475ce..73903a497d 100644 --- a/drivers/acpi/apei/einj-core.c +++ b/drivers/acpi/apei/einj-core.c @@ -851,7 +851,7 @@ err_put_table: return rc; } -static void einj_remove(struct platform_device *pdev) +static void __exit einj_remove(struct platform_device *pdev) { struct apei_exec_context ctx; @@ -873,8 +873,14 @@ static void einj_remove(struct platform_device *pdev) } static struct platform_device *einj_dev; -static struct platform_driver einj_driver = { - .remove_new = einj_remove, +/* + * einj_remove() lives in .exit.text. For drivers registered via + * platform_driver_probe() this is ok because they cannot get unbound at + * runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver einj_driver __refdata = { + .remove_new = __exit_p(einj_remove), .driver = { .name = "acpi-einj", }, diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 512067cac1..623cc0cb4a 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -26,6 +26,8 @@ #include #include #include +#include +#include #include #include #include @@ -33,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -673,6 +676,75 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata, schedule_work(&entry->work); } +/* Room for 8 entries for each of the 4 event log queues */ +#define CXL_CPER_FIFO_DEPTH 32 +DEFINE_KFIFO(cxl_cper_fifo, struct cxl_cper_work_data, CXL_CPER_FIFO_DEPTH); + +/* Synchronize schedule_work() with cxl_cper_work changes */ +static DEFINE_SPINLOCK(cxl_cper_work_lock); +struct work_struct *cxl_cper_work; + +static void cxl_cper_post_event(enum cxl_event_type event_type, + struct cxl_cper_event_rec *rec) +{ + struct cxl_cper_work_data wd; + + if (rec->hdr.length <= sizeof(rec->hdr) || + rec->hdr.length > sizeof(*rec)) { + pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n", + rec->hdr.length); + return; + } + + if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) { + pr_err(FW_WARN "CXL CPER invalid event\n"); + return; + } + + guard(spinlock_irqsave)(&cxl_cper_work_lock); + + if (!cxl_cper_work) + return; + + wd.event_type = event_type; + memcpy(&wd.rec, rec, sizeof(wd.rec)); + + if (!kfifo_put(&cxl_cper_fifo, wd)) { + pr_err_ratelimited("CXL CPER kfifo overflow\n"); + return; + } + + schedule_work(cxl_cper_work); +} + +int cxl_cper_register_work(struct work_struct *work) +{ + if (cxl_cper_work) + return -EINVAL; + + guard(spinlock)(&cxl_cper_work_lock); + cxl_cper_work = work; + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, CXL); + +int cxl_cper_unregister_work(struct work_struct *work) +{ + if (cxl_cper_work != work) + return -EINVAL; + + guard(spinlock)(&cxl_cper_work_lock); + cxl_cper_work = NULL; + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, CXL); + +int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd) +{ + return kfifo_get(&cxl_cper_fifo, wd); +} +EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, CXL); + static bool ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { @@ -707,6 +779,18 @@ static bool ghes_do_proc(struct ghes *ghes, } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { queued = ghes_handle_arm_hw_error(gdata, sev, sync); + } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) { + struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); + + cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec); + } else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) { + struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); + + cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec); + } else if (guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) { + struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); + + cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec); } else { void *err = acpi_hest_get_payload(gdata); diff --git a/drivers/acpi/arm64/amba.c b/drivers/acpi/arm64/amba.c index 171b5c2c7e..e1f0bbb8f3 100644 --- a/drivers/acpi/arm64/amba.c +++ b/drivers/acpi/arm64/amba.c @@ -22,14 +22,6 @@ static const struct acpi_device_id amba_id_list[] = { {"ARMH0061", 0}, /* PL061 GPIO Device */ {"ARMH0330", 0}, /* ARM DMA Controller DMA-330 */ - {"ARMHC501", 0}, /* ARM CoreSight ETR */ - {"ARMHC502", 0}, /* ARM CoreSight STM */ - {"ARMHC503", 0}, /* ARM CoreSight Debug */ - {"ARMHC979", 0}, /* ARM CoreSight TPIU */ - {"ARMHC97C", 0}, /* ARM CoreSight SoC-400 TMC, SoC-600 ETF/ETB */ - {"ARMHC98D", 0}, /* ARM CoreSight Dynamic Replicator */ - {"ARMHC9CA", 0}, /* ARM CoreSight CATU */ - {"ARMHC9FF", 0}, /* ARM CoreSight Dynamic Funnel */ {"", 0}, }; diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c index 93d796531a..52b2abf886 100644 --- a/drivers/acpi/arm64/dma.c +++ b/drivers/acpi/arm64/dma.c @@ -8,7 +8,6 @@ void acpi_arch_dma_setup(struct device *dev) { int ret; u64 end, mask; - u64 size = 0; const struct bus_dma_region *map = NULL; /* @@ -23,31 +22,23 @@ void acpi_arch_dma_setup(struct device *dev) } if (dev->coherent_dma_mask) - size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); + end = dev->coherent_dma_mask; else - size = 1ULL << 32; + end = (1ULL << 32) - 1; ret = acpi_dma_get_range(dev, &map); if (!ret && map) { - const struct bus_dma_region *r = map; - - for (end = 0; r->size; r++) { - if (r->dma_start + r->size - 1 > end) - end = r->dma_start + r->size - 1; - } - - size = end + 1; + end = dma_range_map_max(map); dev->dma_range_map = map; } if (ret == -ENODEV) - ret = iort_dma_get_ranges(dev, &size); + ret = iort_dma_get_ranges(dev, &end); if (!ret) { /* * Limit coherent and dma mask based on size retrieved from * firmware. */ - end = size - 1; mask = DMA_BIT_MASK(ilog2(end) + 1); dev->bus_dma_limit = end; dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask); diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 6496ff5a6b..c0b1c2c194 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1367,7 +1367,7 @@ int iort_iommu_configure_id(struct device *dev, const u32 *input_id) { return -ENODEV; } #endif -static int nc_dma_get_range(struct device *dev, u64 *size) +static int nc_dma_get_range(struct device *dev, u64 *limit) { struct acpi_iort_node *node; struct acpi_iort_named_component *ncomp; @@ -1384,13 +1384,13 @@ static int nc_dma_get_range(struct device *dev, u64 *size) return -EINVAL; } - *size = ncomp->memory_address_limit >= 64 ? U64_MAX : - 1ULL<memory_address_limit; + *limit = ncomp->memory_address_limit >= 64 ? U64_MAX : + (1ULL << ncomp->memory_address_limit) - 1; return 0; } -static int rc_dma_get_range(struct device *dev, u64 *size) +static int rc_dma_get_range(struct device *dev, u64 *limit) { struct acpi_iort_node *node; struct acpi_iort_root_complex *rc; @@ -1408,8 +1408,8 @@ static int rc_dma_get_range(struct device *dev, u64 *size) return -EINVAL; } - *size = rc->memory_address_limit >= 64 ? U64_MAX : - 1ULL<memory_address_limit; + *limit = rc->memory_address_limit >= 64 ? U64_MAX : + (1ULL << rc->memory_address_limit) - 1; return 0; } @@ -1417,16 +1417,16 @@ static int rc_dma_get_range(struct device *dev, u64 *size) /** * iort_dma_get_ranges() - Look up DMA addressing limit for the device * @dev: device to lookup - * @size: DMA range size result pointer + * @limit: DMA limit result pointer * * Return: 0 on success, an error otherwise. */ -int iort_dma_get_ranges(struct device *dev, u64 *size) +int iort_dma_get_ranges(struct device *dev, u64 *limit) { if (dev_is_pci(dev)) - return rc_dma_get_range(dev, size); + return rc_dma_get_range(dev, limit); else - return nc_dma_get_range(dev, size); + return nc_dma_get_range(dev, limit); } static void __init acpi_iort_register_irq(int hwirq, const char *name, diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c index e4fb9e225d..d1d9c92890 100644 --- a/drivers/acpi/bgrt.c +++ b/drivers/acpi/bgrt.c @@ -29,14 +29,7 @@ BGRT_SHOW(type, image_type); BGRT_SHOW(xoffset, image_offset_x); BGRT_SHOW(yoffset, image_offset_y); -static ssize_t image_read(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t off, size_t count) -{ - memcpy(buf, attr->private + off, count); - return count; -} - -static BIN_ATTR_RO(image, 0); /* size gets filled in later */ +static BIN_ATTR_SIMPLE_RO(image); static struct attribute *bgrt_attributes[] = { &bgrt_attr_version.attr, diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c deleted file mode 100644 index a558d24fb7..0000000000 --- a/drivers/acpi/blacklist.c +++ /dev/null @@ -1,140 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * blacklist.c - * - * Check to see if the given machine has a known bad ACPI BIOS - * or if the BIOS is too old. - * Check given machine against acpi_rev_dmi_table[]. - * - * Copyright (C) 2004 Len Brown - * Copyright (C) 2002 Andy Grover - */ - -#define pr_fmt(fmt) "ACPI: " fmt - -#include -#include -#include -#include - -#include "internal.h" - -#ifdef CONFIG_DMI -static const struct dmi_system_id acpi_rev_dmi_table[] __initconst; -#endif - -/* - * POLICY: If *anything* doesn't work, put it on the blacklist. - * If they are critical errors, mark it critical, and abort driver load. - */ -static struct acpi_platform_list acpi_blacklist[] __initdata = { - /* Compaq Presario 1700 */ - {"PTLTD ", " DSDT ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal, - "Multiple problems", 1}, - /* Sony FX120, FX140, FX150? */ - {"SONY ", "U0 ", 0x20010313, ACPI_SIG_DSDT, less_than_or_equal, - "ACPI driver problem", 1}, - /* Compaq Presario 800, Insyde BIOS */ - {"INT440", "SYSFexxx", 0x00001001, ACPI_SIG_DSDT, less_than_or_equal, - "Does not use _REG to protect EC OpRegions", 1}, - /* IBM 600E - _ADR should return 7, but it returns 1 */ - {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal, - "Incorrect _ADR", 1}, - - { } -}; - -int __init acpi_blacklisted(void) -{ - int i; - int blacklisted = 0; - - i = acpi_match_platform_list(acpi_blacklist); - if (i >= 0) { - pr_err("Vendor \"%6.6s\" System \"%8.8s\" Revision 0x%x has a known ACPI BIOS problem.\n", - acpi_blacklist[i].oem_id, - acpi_blacklist[i].oem_table_id, - acpi_blacklist[i].oem_revision); - - pr_err("Reason: %s. This is a %s error\n", - acpi_blacklist[i].reason, - (acpi_blacklist[i].data ? - "non-recoverable" : "recoverable")); - - blacklisted = acpi_blacklist[i].data; - } - - (void)early_acpi_osi_init(); -#ifdef CONFIG_DMI - dmi_check_system(acpi_rev_dmi_table); -#endif - - return blacklisted; -} -#ifdef CONFIG_DMI -#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE -static int __init dmi_enable_rev_override(const struct dmi_system_id *d) -{ - pr_notice("DMI detected: %s (force ACPI _REV to 5)\n", d->ident); - acpi_rev_override_setup(NULL); - return 0; -} -#endif - -static const struct dmi_system_id acpi_rev_dmi_table[] __initconst = { -#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE - /* - * DELL XPS 13 (2015) switches sound between HDA and I2S - * depending on the ACPI _REV callback. If userspace supports - * I2S sufficiently (or if you do not care about sound), you - * can safely disable this quirk. - */ - { - .callback = dmi_enable_rev_override, - .ident = "DELL XPS 13 (2015)", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"), - }, - }, - { - .callback = dmi_enable_rev_override, - .ident = "DELL Precision 5520", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision 5520"), - }, - }, - { - .callback = dmi_enable_rev_override, - .ident = "DELL Precision 3520", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3520"), - }, - }, - /* - * Resolves a quirk with the Dell Latitude 3350 that - * causes the ethernet adapter to not function. - */ - { - .callback = dmi_enable_rev_override, - .ident = "DELL Latitude 3350", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 3350"), - }, - }, - { - .callback = dmi_enable_rev_override, - .ident = "DELL Inspiron 7537", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"), - }, - }, -#endif - {} -}; - -#endif /* CONFIG_DMI */ diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index a87b10eef7..787eca8384 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -112,6 +112,17 @@ int acpi_bus_get_status(struct acpi_device *device) if (ACPI_FAILURE(status)) return -ENODEV; + if (!device->status.present && device->status.enabled) { + pr_info(FW_BUG "Device [%s] status [%08x]: not present and enabled\n", + device->pnp.bus_id, (u32)sta); + device->status.enabled = 0; + /* + * The status is clearly invalid, so clear the functional bit as + * well to avoid attempting to use the device. + */ + device->status.functional = 0; + } + acpi_set_device_status(device, sta); if (device->status.functional && !device->status.present) { @@ -995,25 +1006,26 @@ EXPORT_SYMBOL_GPL(acpi_driver_match_device); -------------------------------------------------------------------------- */ /** - * acpi_bus_register_driver - register a driver with the ACPI bus + * __acpi_bus_register_driver - register a driver with the ACPI bus * @driver: driver being registered + * @owner: owning module/driver * * Registers a driver with the ACPI bus. Searches the namespace for all * devices that match the driver's criteria and binds. Returns zero for * success or a negative error status for failure. */ -int acpi_bus_register_driver(struct acpi_driver *driver) +int __acpi_bus_register_driver(struct acpi_driver *driver, struct module *owner) { if (acpi_disabled) return -ENODEV; driver->drv.name = driver->name; driver->drv.bus = &acpi_bus_type; - driver->drv.owner = driver->owner; + driver->drv.owner = owner; return driver_register(&driver->drv); } -EXPORT_SYMBOL(acpi_bus_register_driver); +EXPORT_SYMBOL(__acpi_bus_register_driver); /** * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index a40b6f3946..1d857978f5 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -686,8 +686,10 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) if (!osc_sb_cppc2_support_acked) { pr_debug("CPPC v2 _OSC not acked\n"); - if (!cpc_supported_by_cpu()) + if (!cpc_supported_by_cpu()) { + pr_debug("CPPC is not supported by the CPU\n"); return -ENODEV; + } } /* Parse the ACPI _CPC table for this CPU. */ diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index a7c00ef780..34affbda29 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -88,43 +88,29 @@ static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event, enum dock_callback_type cb_type) { struct acpi_device *adev = dd->adev; + acpi_hp_fixup fixup = NULL; + acpi_hp_uevent uevent = NULL; + acpi_hp_notify notify = NULL; acpi_lock_hp_context(); - if (!adev->hp) - goto out; - - if (cb_type == DOCK_CALL_FIXUP) { - void (*fixup)(struct acpi_device *); - - fixup = adev->hp->fixup; - if (fixup) { - acpi_unlock_hp_context(); - fixup(adev); - return; - } - } else if (cb_type == DOCK_CALL_UEVENT) { - void (*uevent)(struct acpi_device *, u32); - - uevent = adev->hp->uevent; - if (uevent) { - acpi_unlock_hp_context(); - uevent(adev, event); - return; - } - } else { - int (*notify)(struct acpi_device *, u32); - - notify = adev->hp->notify; - if (notify) { - acpi_unlock_hp_context(); - notify(adev, event); - return; - } + if (adev->hp) { + if (cb_type == DOCK_CALL_FIXUP) + fixup = adev->hp->fixup; + else if (cb_type == DOCK_CALL_UEVENT) + uevent = adev->hp->uevent; + else + notify = adev->hp->notify; } - out: acpi_unlock_hp_context(); + + if (fixup) + fixup(adev); + else if (uevent) + uevent(adev, event); + else if (notify) + notify(adev, event); } static struct dock_station *find_dock_station(acpi_handle handle) diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c index 654aaa53c6..d202730faf 100644 --- a/drivers/acpi/dptf/dptf_pch_fivr.c +++ b/drivers/acpi/dptf/dptf_pch_fivr.c @@ -150,6 +150,7 @@ static const struct acpi_device_id pch_fivr_device_ids[] = { {"INTC1045", 0}, {"INTC1049", 0}, {"INTC1064", 0}, + {"INTC106B", 0}, {"INTC10A3", 0}, {"", 0}, }; diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c index b8187babbb..8023b3e233 100644 --- a/drivers/acpi/dptf/dptf_power.c +++ b/drivers/acpi/dptf/dptf_power.c @@ -232,6 +232,8 @@ static const struct acpi_device_id int3407_device_ids[] = { {"INTC1061", 0}, {"INTC1065", 0}, {"INTC1066", 0}, + {"INTC106C", 0}, + {"INTC106D", 0}, {"INTC10A4", 0}, {"INTC10A5", 0}, {"", 0}, diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c index b7113fa92f..014ada7599 100644 --- a/drivers/acpi/dptf/int340x_thermal.c +++ b/drivers/acpi/dptf/int340x_thermal.c @@ -43,6 +43,12 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = { {"INTC1064"}, {"INTC1065"}, {"INTC1066"}, + {"INTC1068"}, + {"INTC1069"}, + {"INTC106A"}, + {"INTC106B"}, + {"INTC106C"}, + {"INTC106D"}, {"INTC10A0"}, {"INTC10A1"}, {"INTC10A2"}, diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h index e7b4b4e4a5..f89d19c922 100644 --- a/drivers/acpi/fan.h +++ b/drivers/acpi/fan.h @@ -15,6 +15,7 @@ {"INTC1044", }, /* Fan for Tiger Lake generation */ \ {"INTC1048", }, /* Fan for Alder Lake generation */ \ {"INTC1063", }, /* Fan for Meteor Lake generation */ \ + {"INTC106A", }, /* Fan for Lunar Lake generation */ \ {"INTC10A2", }, /* Fan for Raptor Lake generation */ \ {"PNP0C0B", } /* Generic ACPI fan */ diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index a96d1bc662..601b670356 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -69,7 +69,8 @@ void acpi_debugfs_init(void); #else static inline void acpi_debugfs_init(void) { return; } #endif -#ifdef CONFIG_PCI + +#if defined(CONFIG_X86) && defined(CONFIG_PCI) void acpi_lpss_init(void); #else static inline void acpi_lpss_init(void) {} diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c index 0ab13751f0..92b658f92d 100644 --- a/drivers/acpi/mipi-disco-img.c +++ b/drivers/acpi/mipi-disco-img.c @@ -731,13 +731,13 @@ void acpi_mipi_crs_csi2_cleanup(void) /* CPU matches for Dell generations with broken ACPI MIPI DISCO info */ static const struct x86_cpu_id dell_broken_mipi_disco_cpu_gens[] = { - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL), + X86_MATCH_VFM(INTEL_TIGERLAKE, NULL), + X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL), + X86_MATCH_VFM(INTEL_ALDERLAKE, NULL), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL), {} }; diff --git a/drivers/acpi/nhlt.c b/drivers/acpi/nhlt.c new file mode 100644 index 0000000000..dc1bd0df92 --- /dev/null +++ b/drivers/acpi/nhlt.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2023-2024 Intel Corporation + * + * Authors: Cezary Rojewski + * Amadeusz Slawinski + */ + +#define pr_fmt(fmt) "ACPI: NHLT: " fmt + +#include +#include +#include +#include +#include +#include +#include + +static struct acpi_table_nhlt *acpi_gbl_nhlt; + +static struct acpi_table_nhlt empty_nhlt = { + .header = { + .signature = ACPI_SIG_NHLT, + }, +}; + +/** + * acpi_nhlt_get_gbl_table - Retrieve a pointer to the first NHLT table. + * + * If there is no NHLT in the system, acpi_gbl_nhlt will instead point to an + * empty table. + * + * Return: ACPI status code of the operation. + */ +acpi_status acpi_nhlt_get_gbl_table(void) +{ + acpi_status status; + + status = acpi_get_table(ACPI_SIG_NHLT, 0, (struct acpi_table_header **)(&acpi_gbl_nhlt)); + if (!acpi_gbl_nhlt) + acpi_gbl_nhlt = &empty_nhlt; + return status; +} +EXPORT_SYMBOL_GPL(acpi_nhlt_get_gbl_table); + +/** + * acpi_nhlt_put_gbl_table - Release the global NHLT table. + */ +void acpi_nhlt_put_gbl_table(void) +{ + acpi_put_table((struct acpi_table_header *)acpi_gbl_nhlt); +} +EXPORT_SYMBOL_GPL(acpi_nhlt_put_gbl_table); + +/** + * acpi_nhlt_endpoint_match - Verify if an endpoint matches criteria. + * @ep: the endpoint to check. + * @link_type: the hardware link type, e.g.: PDM or SSP. + * @dev_type: the device type. + * @dir: stream direction. + * @bus_id: the ID of virtual bus hosting the endpoint. + * + * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative + * value to ignore the parameter when matching. + * + * Return: %true if endpoint matches specified criteria or %false otherwise. + */ +bool acpi_nhlt_endpoint_match(const struct acpi_nhlt_endpoint *ep, + int link_type, int dev_type, int dir, int bus_id) +{ + return ep && + (link_type < 0 || ep->link_type == link_type) && + (dev_type < 0 || ep->device_type == dev_type) && + (bus_id < 0 || ep->virtual_bus_id == bus_id) && + (dir < 0 || ep->direction == dir); +} +EXPORT_SYMBOL_GPL(acpi_nhlt_endpoint_match); + +/** + * acpi_nhlt_tb_find_endpoint - Search a NHLT table for an endpoint. + * @tb: the table to search. + * @link_type: the hardware link type, e.g.: PDM or SSP. + * @dev_type: the device type. + * @dir: stream direction. + * @bus_id: the ID of virtual bus hosting the endpoint. + * + * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative + * value to ignore the parameter during the search. + * + * Return: A pointer to endpoint matching the criteria, %NULL if not found or + * an ERR_PTR() otherwise. + */ +struct acpi_nhlt_endpoint * +acpi_nhlt_tb_find_endpoint(const struct acpi_table_nhlt *tb, + int link_type, int dev_type, int dir, int bus_id) +{ + struct acpi_nhlt_endpoint *ep; + + for_each_nhlt_endpoint(tb, ep) + if (acpi_nhlt_endpoint_match(ep, link_type, dev_type, dir, bus_id)) + return ep; + return NULL; +} +EXPORT_SYMBOL_GPL(acpi_nhlt_tb_find_endpoint); + +/** + * acpi_nhlt_find_endpoint - Search all NHLT tables for an endpoint. + * @link_type: the hardware link type, e.g.: PDM or SSP. + * @dev_type: the device type. + * @dir: stream direction. + * @bus_id: the ID of virtual bus hosting the endpoint. + * + * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative + * value to ignore the parameter during the search. + * + * Return: A pointer to endpoint matching the criteria, %NULL if not found or + * an ERR_PTR() otherwise. + */ +struct acpi_nhlt_endpoint * +acpi_nhlt_find_endpoint(int link_type, int dev_type, int dir, int bus_id) +{ + /* TODO: Currently limited to table of index 0. */ + return acpi_nhlt_tb_find_endpoint(acpi_gbl_nhlt, link_type, dev_type, dir, bus_id); +} +EXPORT_SYMBOL_GPL(acpi_nhlt_find_endpoint); + +/** + * acpi_nhlt_endpoint_find_fmtcfg - Search endpoint's formats configuration space + * for a specific format. + * @ep: the endpoint to search. + * @ch: number of channels. + * @rate: samples per second. + * @vbps: valid bits per sample. + * @bps: bits per sample. + * + * Return: A pointer to format matching the criteria, %NULL if not found or + * an ERR_PTR() otherwise. + */ +struct acpi_nhlt_format_config * +acpi_nhlt_endpoint_find_fmtcfg(const struct acpi_nhlt_endpoint *ep, + u16 ch, u32 rate, u16 vbps, u16 bps) +{ + struct acpi_nhlt_wave_formatext *wav; + struct acpi_nhlt_format_config *fmt; + + for_each_nhlt_endpoint_fmtcfg(ep, fmt) { + wav = &fmt->format; + + if (wav->valid_bits_per_sample == vbps && + wav->samples_per_sec == rate && + wav->bits_per_sample == bps && + wav->channel_count == ch) + return fmt; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(acpi_nhlt_endpoint_find_fmtcfg); + +/** + * acpi_nhlt_tb_find_fmtcfg - Search a NHLT table for a specific format. + * @tb: the table to search. + * @link_type: the hardware link type, e.g.: PDM or SSP. + * @dev_type: the device type. + * @dir: stream direction. + * @bus_id: the ID of virtual bus hosting the endpoint. + * + * @ch: number of channels. + * @rate: samples per second. + * @vbps: valid bits per sample. + * @bps: bits per sample. + * + * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative + * value to ignore the parameter during the search. + * + * Return: A pointer to format matching the criteria, %NULL if not found or + * an ERR_PTR() otherwise. + */ +struct acpi_nhlt_format_config * +acpi_nhlt_tb_find_fmtcfg(const struct acpi_table_nhlt *tb, + int link_type, int dev_type, int dir, int bus_id, + u16 ch, u32 rate, u16 vbps, u16 bps) +{ + struct acpi_nhlt_format_config *fmt; + struct acpi_nhlt_endpoint *ep; + + for_each_nhlt_endpoint(tb, ep) { + if (!acpi_nhlt_endpoint_match(ep, link_type, dev_type, dir, bus_id)) + continue; + + fmt = acpi_nhlt_endpoint_find_fmtcfg(ep, ch, rate, vbps, bps); + if (fmt) + return fmt; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(acpi_nhlt_tb_find_fmtcfg); + +/** + * acpi_nhlt_find_fmtcfg - Search all NHLT tables for a specific format. + * @link_type: the hardware link type, e.g.: PDM or SSP. + * @dev_type: the device type. + * @dir: stream direction. + * @bus_id: the ID of virtual bus hosting the endpoint. + * + * @ch: number of channels. + * @rate: samples per second. + * @vbps: valid bits per sample. + * @bps: bits per sample. + * + * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative + * value to ignore the parameter during the search. + * + * Return: A pointer to format matching the criteria, %NULL if not found or + * an ERR_PTR() otherwise. + */ +struct acpi_nhlt_format_config * +acpi_nhlt_find_fmtcfg(int link_type, int dev_type, int dir, int bus_id, + u16 ch, u32 rate, u16 vbps, u16 bps) +{ + /* TODO: Currently limited to table of index 0. */ + return acpi_nhlt_tb_find_fmtcfg(acpi_gbl_nhlt, link_type, dev_type, dir, bus_id, + ch, rate, vbps, bps); +} +EXPORT_SYMBOL_GPL(acpi_nhlt_find_fmtcfg); + +static bool acpi_nhlt_config_is_micdevice(struct acpi_nhlt_config *cfg) +{ + return cfg->capabilities_size >= sizeof(struct acpi_nhlt_micdevice_config); +} + +static bool acpi_nhlt_config_is_vendor_micdevice(struct acpi_nhlt_config *cfg) +{ + struct acpi_nhlt_vendor_micdevice_config *devcfg = __acpi_nhlt_config_caps(cfg); + + return cfg->capabilities_size >= sizeof(*devcfg) && + cfg->capabilities_size == struct_size(devcfg, mics, devcfg->mics_count); +} + +/** + * acpi_nhlt_endpoint_mic_count - Retrieve number of digital microphones for a PDM endpoint. + * @ep: the endpoint to return microphones count for. + * + * Return: A number of microphones or an error code if an invalid endpoint is provided. + */ +int acpi_nhlt_endpoint_mic_count(const struct acpi_nhlt_endpoint *ep) +{ + union acpi_nhlt_device_config *devcfg; + struct acpi_nhlt_format_config *fmt; + struct acpi_nhlt_config *cfg; + u16 max_ch = 0; + + if (!ep || ep->link_type != ACPI_NHLT_LINKTYPE_PDM) + return -EINVAL; + + /* Find max number of channels based on formats configuration. */ + for_each_nhlt_endpoint_fmtcfg(ep, fmt) + max_ch = max(fmt->format.channel_count, max_ch); + + cfg = __acpi_nhlt_endpoint_config(ep); + devcfg = __acpi_nhlt_config_caps(cfg); + + /* If @ep is not a mic array, fallback to channels count. */ + if (!acpi_nhlt_config_is_micdevice(cfg) || + devcfg->gen.config_type != ACPI_NHLT_CONFIGTYPE_MICARRAY) + return max_ch; + + switch (devcfg->mic.array_type) { + case ACPI_NHLT_ARRAYTYPE_LINEAR2_SMALL: + case ACPI_NHLT_ARRAYTYPE_LINEAR2_BIG: + return 2; + + case ACPI_NHLT_ARRAYTYPE_LINEAR4_GEO1: + case ACPI_NHLT_ARRAYTYPE_PLANAR4_LSHAPED: + case ACPI_NHLT_ARRAYTYPE_LINEAR4_GEO2: + return 4; + + case ACPI_NHLT_ARRAYTYPE_VENDOR: + if (!acpi_nhlt_config_is_vendor_micdevice(cfg)) + return -EINVAL; + return devcfg->vendor_mic.mics_count; + + default: + pr_warn("undefined mic array type: %#x\n", devcfg->mic.array_type); + return max_ch; + } +} +EXPORT_SYMBOL_GPL(acpi_nhlt_endpoint_mic_count); diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index 3b09fd39ee..e3f26e7163 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -213,16 +213,21 @@ __weak int __init numa_fill_memblks(u64 start, u64 end) return NUMA_NO_MEMBLK; } -#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) /* * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for * I/O localities since SRAT does not list them. I/O localities are * not supported at this point. */ -void __init acpi_numa_slit_init(struct acpi_table_slit *slit) +static int __init acpi_parse_slit(struct acpi_table_header *table) { + struct acpi_table_slit *slit = (struct acpi_table_slit *)table; int i, j; + if (!slit_valid(slit)) { + pr_info("SLIT table looks invalid. Not used.\n"); + return -EINVAL; + } + for (i = 0; i < slit->locality_count; i++) { const int from_node = pxm_to_node(i); @@ -239,28 +244,34 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) slit->entry[slit->locality_count * i + j]); } } + + return 0; } -/* - * Default callback for parsing of the Proximity Domain <-> Memory - * Area mappings - */ -int __init -acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) +static int parsed_numa_memblks __initdata; + +static int __init +acpi_parse_memory_affinity(union acpi_subtable_headers *header, + const unsigned long table_end) { + struct acpi_srat_mem_affinity *ma; u64 start, end; u32 hotpluggable; int node, pxm; + ma = (struct acpi_srat_mem_affinity *)header; + + acpi_table_print_srat_entry(&header->common); + if (srat_disabled()) - goto out_err; + return 0; if (ma->header.length < sizeof(struct acpi_srat_mem_affinity)) { pr_err("SRAT: Unexpected header length: %d\n", ma->header.length); goto out_err_bad_srat; } if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) - goto out_err; + return 0; hotpluggable = IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE); @@ -298,11 +309,15 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1)); + parsed_numa_memblks++; + return 0; + out_err_bad_srat: + /* Just disable SRAT, but do not fail and ignore errors. */ bad_srat(); -out_err: - return -EINVAL; + + return 0; } static int __init acpi_parse_cfmws(union acpi_subtable_headers *header, @@ -345,26 +360,6 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header, (*fake_pxm)++; return 0; } -#else -static int __init acpi_parse_cfmws(union acpi_subtable_headers *header, - void *arg, const unsigned long table_end) -{ - return 0; -} -#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */ - -static int __init acpi_parse_slit(struct acpi_table_header *table) -{ - struct acpi_table_slit *slit = (struct acpi_table_slit *)table; - - if (!slit_valid(slit)) { - pr_info("SLIT table looks invalid. Not used.\n"); - return -EINVAL; - } - acpi_numa_slit_init(slit); - - return 0; -} void __init __weak acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) @@ -455,24 +450,6 @@ acpi_parse_gi_affinity(union acpi_subtable_headers *header, } #endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */ -static int __initdata parsed_numa_memblks; - -static int __init -acpi_parse_memory_affinity(union acpi_subtable_headers * header, - const unsigned long end) -{ - struct acpi_srat_mem_affinity *memory_affinity; - - memory_affinity = (struct acpi_srat_mem_affinity *)header; - - acpi_table_print_srat_entry(&header->common); - - /* let architecture-dependent part to do it */ - if (!acpi_numa_memory_affinity_init(memory_affinity)) - parsed_numa_memblks++; - return 0; -} - static int __init acpi_parse_srat(struct acpi_table_header *table) { struct acpi_table_srat *srat = (struct acpi_table_srat *)table; diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c index d418462ab7..4a97047302 100644 --- a/drivers/acpi/platform_profile.c +++ b/drivers/acpi/platform_profile.c @@ -136,6 +136,45 @@ void platform_profile_notify(void) } EXPORT_SYMBOL_GPL(platform_profile_notify); +int platform_profile_cycle(void) +{ + enum platform_profile_option profile; + enum platform_profile_option next; + int err; + + err = mutex_lock_interruptible(&profile_lock); + if (err) + return err; + + if (!cur_profile) { + mutex_unlock(&profile_lock); + return -ENODEV; + } + + err = cur_profile->profile_get(cur_profile, &profile); + if (err) { + mutex_unlock(&profile_lock); + return err; + } + + next = find_next_bit_wrap(cur_profile->choices, PLATFORM_PROFILE_LAST, + profile + 1); + + if (WARN_ON(next == PLATFORM_PROFILE_LAST)) { + mutex_unlock(&profile_lock); + return -EINVAL; + } + + err = cur_profile->profile_set(cur_profile, next); + mutex_unlock(&profile_lock); + + if (!err) + sysfs_notify(acpi_kobj, NULL, "platform_profile"); + + return err; +} +EXPORT_SYMBOL_GPL(platform_profile_cycle); + int platform_profile_register(struct platform_profile_handler *pprof) { int err; diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 2b73580c9f..80a52a4e66 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -31,9 +31,14 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data, * not defined without a warning. For instance if any of the properties * from different GUID appear in a property list of another, it will be * accepted by the kernel. Firmware validation tools should catch these. + * + * References: + * + * [1] UEFI DSD Guide. + * https://github.com/UEFI/DSD-Guide/blob/main/src/dsd-guide.adoc */ static const guid_t prp_guids[] = { - /* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */ + /* ACPI _DSD device properties GUID [1]: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */ GUID_INIT(0xdaffd814, 0x6eba, 0x4d8c, 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01), /* Hotplug in D3 GUID: 6211e2c0-58a3-4af3-90e1-927a4e0c55a4 */ @@ -53,12 +58,12 @@ static const guid_t prp_guids[] = { 0xa5, 0x61, 0x99, 0xa5, 0x18, 0x97, 0x62, 0xd0), }; -/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */ +/* ACPI _DSD data subnodes GUID [1]: dbb8e3e6-5886-4ba6-8795-1319f52a966b */ static const guid_t ads_guid = GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6, 0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b); -/* ACPI _DSD data buffer GUID: edb12dd0-363d-4085-a3d2-49522ca160c4 */ +/* ACPI _DSD data buffer GUID [1]: edb12dd0-363d-4085-a3d2-49522ca160c4 */ static const guid_t buffer_prop_guid = GUID_INIT(0xedb12dd0, 0x363d, 0x4085, 0xa3, 0xd2, 0x49, 0x52, 0x2c, 0xa1, 0x60, 0xc4); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index d1464324de..503773707e 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -73,8 +73,7 @@ void acpi_unlock_hp_context(void) void acpi_initialize_hp_context(struct acpi_device *adev, struct acpi_hotplug_context *hp, - int (*notify)(struct acpi_device *, u32), - void (*uevent)(struct acpi_device *, u32)) + acpi_hp_notify notify, acpi_hp_uevent uevent) { acpi_lock_hp_context(); hp->notify = notify; @@ -428,7 +427,7 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src) } else if (adev->flags.hotplug_notify) { error = acpi_generic_hotplug_event(adev, src); } else { - int (*notify)(struct acpi_device *, u32); + acpi_hp_notify notify; acpi_lock_hp_context(); notify = adev->hp ? adev->hp->notify : NULL; @@ -1298,10 +1297,10 @@ const char *acpi_device_hid(struct acpi_device *device) { struct acpi_hardware_id *hid; - if (list_empty(&device->pnp.ids)) + hid = list_first_entry_or_null(&device->pnp.ids, struct acpi_hardware_id, list); + if (!hid) return dummy_hid; - hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list); return hid->id; } EXPORT_SYMBOL(acpi_device_hid); @@ -1581,12 +1580,13 @@ int acpi_iommu_fwspec_init(struct device *dev, u32 id, struct fwnode_handle *fwnode, const struct iommu_ops *ops) { - int ret = iommu_fwspec_init(dev, fwnode, ops); + int ret; - if (!ret) - ret = iommu_fwspec_add_ids(dev, &id, 1); + ret = iommu_fwspec_init(dev, fwnode, ops); + if (ret) + return ret; - return ret; + return iommu_fwspec_add_ids(dev, &id, 1); } static inline const struct iommu_ops *acpi_iommu_fwspec_ops(struct device *dev) @@ -1625,12 +1625,11 @@ static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in) if (!err && dev->bus) err = iommu_probe_device(dev); - /* Ignore all other errors apart from EPROBE_DEFER */ - if (err == -EPROBE_DEFER) { + if (err == -EPROBE_DEFER) return err; - } else if (err) { + if (err) { dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); - return -ENODEV; + return err; } if (!acpi_iommu_fwspec_ops(dev)) return -ENODEV; @@ -1671,16 +1670,12 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, acpi_arch_dma_setup(dev); + /* Ignore all other errors apart from EPROBE_DEFER */ ret = acpi_iommu_configure_id(dev, input_id); if (ret == -EPROBE_DEFER) return -EPROBE_DEFER; - /* - * Historically this routine doesn't fail driver probing due to errors - * in acpi_iommu_configure_id() - */ - - arch_setup_dma_ops(dev, 0, U64_MAX, attr == DEV_DMA_COHERENT); + arch_setup_dma_ops(dev, attr == DEV_DMA_COHERENT); return 0; } @@ -1962,7 +1957,7 @@ bool acpi_device_is_present(const struct acpi_device *adev) bool acpi_device_is_enabled(const struct acpi_device *adev) { - return adev->status.present && adev->status.enabled; + return adev->status.enabled; } static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler, diff --git a/drivers/acpi/x86/Makefile b/drivers/acpi/x86/Makefile new file mode 100644 index 0000000000..63c99509ed --- /dev/null +++ b/drivers/acpi/x86/Makefile @@ -0,0 +1,8 @@ +obj-$(CONFIG_ACPI) += acpi-x86.o +acpi-x86-y += apple.o +acpi-x86-y += cmos_rtc.o +acpi-x86-$(CONFIG_PCI) += lpss.o +acpi-x86-y += s2idle.o +acpi-x86-y += utils.o + +obj-$(CONFIG_X86) += blacklist.o diff --git a/drivers/acpi/x86/blacklist.c b/drivers/acpi/x86/blacklist.c new file mode 100644 index 0000000000..55214d0a12 --- /dev/null +++ b/drivers/acpi/x86/blacklist.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * blacklist.c + * + * Check to see if the given machine has a known bad ACPI BIOS + * or if the BIOS is too old. + * Check given machine against acpi_rev_dmi_table[]. + * + * Copyright (C) 2004 Len Brown + * Copyright (C) 2002 Andy Grover + */ + +#define pr_fmt(fmt) "ACPI: " fmt + +#include +#include +#include +#include + +#include "../internal.h" + +#ifdef CONFIG_DMI +static const struct dmi_system_id acpi_rev_dmi_table[] __initconst; +#endif + +/* + * POLICY: If *anything* doesn't work, put it on the blacklist. + * If they are critical errors, mark it critical, and abort driver load. + */ +static struct acpi_platform_list acpi_blacklist[] __initdata = { + /* Compaq Presario 1700 */ + {"PTLTD ", " DSDT ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal, + "Multiple problems", 1}, + /* Sony FX120, FX140, FX150? */ + {"SONY ", "U0 ", 0x20010313, ACPI_SIG_DSDT, less_than_or_equal, + "ACPI driver problem", 1}, + /* Compaq Presario 800, Insyde BIOS */ + {"INT440", "SYSFexxx", 0x00001001, ACPI_SIG_DSDT, less_than_or_equal, + "Does not use _REG to protect EC OpRegions", 1}, + /* IBM 600E - _ADR should return 7, but it returns 1 */ + {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal, + "Incorrect _ADR", 1}, + + { } +}; + +int __init acpi_blacklisted(void) +{ + int i; + int blacklisted = 0; + + i = acpi_match_platform_list(acpi_blacklist); + if (i >= 0) { + pr_err("Vendor \"%6.6s\" System \"%8.8s\" Revision 0x%x has a known ACPI BIOS problem.\n", + acpi_blacklist[i].oem_id, + acpi_blacklist[i].oem_table_id, + acpi_blacklist[i].oem_revision); + + pr_err("Reason: %s. This is a %s error\n", + acpi_blacklist[i].reason, + (acpi_blacklist[i].data ? + "non-recoverable" : "recoverable")); + + blacklisted = acpi_blacklist[i].data; + } + + (void)early_acpi_osi_init(); +#ifdef CONFIG_DMI + dmi_check_system(acpi_rev_dmi_table); +#endif + + return blacklisted; +} +#ifdef CONFIG_DMI +#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE +static int __init dmi_enable_rev_override(const struct dmi_system_id *d) +{ + pr_notice("DMI detected: %s (force ACPI _REV to 5)\n", d->ident); + acpi_rev_override_setup(NULL); + return 0; +} +#endif + +static const struct dmi_system_id acpi_rev_dmi_table[] __initconst = { +#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE + /* + * DELL XPS 13 (2015) switches sound between HDA and I2S + * depending on the ACPI _REV callback. If userspace supports + * I2S sufficiently (or if you do not care about sound), you + * can safely disable this quirk. + */ + { + .callback = dmi_enable_rev_override, + .ident = "DELL XPS 13 (2015)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"), + }, + }, + { + .callback = dmi_enable_rev_override, + .ident = "DELL Precision 5520", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 5520"), + }, + }, + { + .callback = dmi_enable_rev_override, + .ident = "DELL Precision 3520", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3520"), + }, + }, + /* + * Resolves a quirk with the Dell Latitude 3350 that + * causes the ethernet adapter to not function. + */ + { + .callback = dmi_enable_rev_override, + .ident = "DELL Latitude 3350", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 3350"), + }, + }, + { + .callback = dmi_enable_rev_override, + .ident = "DELL Inspiron 7537", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"), + }, + }, +#endif + {} +}; + +#endif /* CONFIG_DMI */ diff --git a/drivers/acpi/x86/cmos_rtc.c b/drivers/acpi/x86/cmos_rtc.c new file mode 100644 index 0000000000..51643ff6fe --- /dev/null +++ b/drivers/acpi/x86/cmos_rtc.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ACPI support for CMOS RTC Address Space access + * + * Copyright (C) 2013, Intel Corporation + * Authors: Lan Tianyu + */ + +#define pr_fmt(fmt) "ACPI: " fmt + +#include +#include +#include +#include +#include +#include + +#include "../internal.h" + +static const struct acpi_device_id acpi_cmos_rtc_ids[] = { + { "PNP0B00" }, + { "PNP0B01" }, + { "PNP0B02" }, + {} +}; + +static acpi_status +acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address, + u32 bits, u64 *value64, + void *handler_context, void *region_context) +{ + int i; + u8 *value = (u8 *)value64; + + if (address > 0xff || !value64) + return AE_BAD_PARAMETER; + + if (function != ACPI_WRITE && function != ACPI_READ) + return AE_BAD_PARAMETER; + + spin_lock_irq(&rtc_lock); + + for (i = 0; i < DIV_ROUND_UP(bits, 8); ++i, ++address, ++value) + if (function == ACPI_READ) + *value = CMOS_READ(address); + else + CMOS_WRITE(*value, address); + + spin_unlock_irq(&rtc_lock); + + return AE_OK; +} + +int acpi_install_cmos_rtc_space_handler(acpi_handle handle) +{ + acpi_status status; + + status = acpi_install_address_space_handler(handle, + ACPI_ADR_SPACE_CMOS, + &acpi_cmos_rtc_space_handler, + NULL, NULL); + if (ACPI_FAILURE(status)) { + pr_err("Error installing CMOS-RTC region handler\n"); + return -ENODEV; + } + + return 1; +} +EXPORT_SYMBOL_GPL(acpi_install_cmos_rtc_space_handler); + +void acpi_remove_cmos_rtc_space_handler(acpi_handle handle) +{ + if (ACPI_FAILURE(acpi_remove_address_space_handler(handle, + ACPI_ADR_SPACE_CMOS, &acpi_cmos_rtc_space_handler))) + pr_err("Error removing CMOS-RTC region handler\n"); +} +EXPORT_SYMBOL_GPL(acpi_remove_cmos_rtc_space_handler); + +static int acpi_cmos_rtc_attach_handler(struct acpi_device *adev, const struct acpi_device_id *id) +{ + return acpi_install_cmos_rtc_space_handler(adev->handle); +} + +static void acpi_cmos_rtc_detach_handler(struct acpi_device *adev) +{ + acpi_remove_cmos_rtc_space_handler(adev->handle); +} + +static struct acpi_scan_handler cmos_rtc_handler = { + .ids = acpi_cmos_rtc_ids, + .attach = acpi_cmos_rtc_attach_handler, + .detach = acpi_cmos_rtc_detach_handler, +}; + +void __init acpi_cmos_rtc_init(void) +{ + acpi_scan_add_handler(&cmos_rtc_handler); +} diff --git a/drivers/acpi/x86/lpss.c b/drivers/acpi/x86/lpss.c new file mode 100644 index 0000000000..148e29c2c5 --- /dev/null +++ b/drivers/acpi/x86/lpss.c @@ -0,0 +1,1355 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ACPI support for Intel Lynxpoint LPSS. + * + * Copyright (C) 2013, Intel Corporation + * Authors: Mika Westerberg + * Rafael J. Wysocki + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../internal.h" + +#ifdef CONFIG_X86_INTEL_LPSS + +#include +#include +#include + +#define LPSS_ADDR(desc) ((unsigned long)&desc) + +#define LPSS_CLK_SIZE 0x04 +#define LPSS_LTR_SIZE 0x18 + +/* Offsets relative to LPSS_PRIVATE_OFFSET */ +#define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16)) +#define LPSS_RESETS 0x04 +#define LPSS_RESETS_RESET_FUNC BIT(0) +#define LPSS_RESETS_RESET_APB BIT(1) +#define LPSS_GENERAL 0x08 +#define LPSS_GENERAL_LTR_MODE_SW BIT(2) +#define LPSS_GENERAL_UART_RTS_OVRD BIT(3) +#define LPSS_SW_LTR 0x10 +#define LPSS_AUTO_LTR 0x14 +#define LPSS_LTR_SNOOP_REQ BIT(15) +#define LPSS_LTR_SNOOP_MASK 0x0000FFFF +#define LPSS_LTR_SNOOP_LAT_1US 0x800 +#define LPSS_LTR_SNOOP_LAT_32US 0xC00 +#define LPSS_LTR_SNOOP_LAT_SHIFT 5 +#define LPSS_LTR_SNOOP_LAT_CUTOFF 3000 +#define LPSS_LTR_MAX_VAL 0x3FF +#define LPSS_TX_INT 0x20 +#define LPSS_TX_INT_MASK BIT(1) + +#define LPSS_PRV_REG_COUNT 9 + +/* LPSS Flags */ +#define LPSS_CLK BIT(0) +#define LPSS_CLK_GATE BIT(1) +#define LPSS_CLK_DIVIDER BIT(2) +#define LPSS_LTR BIT(3) +#define LPSS_SAVE_CTX BIT(4) +/* + * For some devices the DSDT AML code for another device turns off the device + * before our suspend handler runs, causing us to read/save all 1-s (0xffffffff) + * as ctx register values. + * Luckily these devices always use the same ctx register values, so we can + * work around this by saving the ctx registers once on activation. + */ +#define LPSS_SAVE_CTX_ONCE BIT(5) +#define LPSS_NO_D3_DELAY BIT(6) + +struct lpss_private_data; + +struct lpss_device_desc { + unsigned int flags; + const char *clk_con_id; + unsigned int prv_offset; + size_t prv_size_override; + const struct property_entry *properties; + void (*setup)(struct lpss_private_data *pdata); + bool resume_from_noirq; +}; + +static const struct lpss_device_desc lpss_dma_desc = { + .flags = LPSS_CLK, +}; + +struct lpss_private_data { + struct acpi_device *adev; + void __iomem *mmio_base; + resource_size_t mmio_size; + unsigned int fixed_clk_rate; + struct clk *clk; + const struct lpss_device_desc *dev_desc; + u32 prv_reg_ctx[LPSS_PRV_REG_COUNT]; +}; + +/* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */ +static u32 pmc_atom_d3_mask = 0xfe000ffe; + +/* LPSS run time quirks */ +static unsigned int lpss_quirks; + +/* + * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device. + * + * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover + * it can be powered off automatically whenever the last LPSS device goes down. + * In case of no power any access to the DMA controller will hang the system. + * The behaviour is reproduced on some HP laptops based on Intel BayTrail as + * well as on ASuS T100TA transformer. + * + * This quirk overrides power state of entire LPSS island to keep DMA powered + * on whenever we have at least one other device in use. + */ +#define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0) + +/* UART Component Parameter Register */ +#define LPSS_UART_CPR 0xF4 +#define LPSS_UART_CPR_AFCE BIT(4) + +static void lpss_uart_setup(struct lpss_private_data *pdata) +{ + unsigned int offset; + u32 val; + + offset = pdata->dev_desc->prv_offset + LPSS_TX_INT; + val = readl(pdata->mmio_base + offset); + writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset); + + val = readl(pdata->mmio_base + LPSS_UART_CPR); + if (!(val & LPSS_UART_CPR_AFCE)) { + offset = pdata->dev_desc->prv_offset + LPSS_GENERAL; + val = readl(pdata->mmio_base + offset); + val |= LPSS_GENERAL_UART_RTS_OVRD; + writel(val, pdata->mmio_base + offset); + } +} + +static void lpss_deassert_reset(struct lpss_private_data *pdata) +{ + unsigned int offset; + u32 val; + + offset = pdata->dev_desc->prv_offset + LPSS_RESETS; + val = readl(pdata->mmio_base + offset); + val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC; + writel(val, pdata->mmio_base + offset); +} + +/* + * BYT PWM used for backlight control by the i915 driver on systems without + * the Crystal Cove PMIC. + */ +static struct pwm_lookup byt_pwm_lookup[] = { + PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0", + "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL, + "pwm-lpss-platform"), +}; + +static void byt_pwm_setup(struct lpss_private_data *pdata) +{ + /* Only call pwm_add_table for the first PWM controller */ + if (acpi_dev_uid_match(pdata->adev, 1)) + pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); +} + +#define LPSS_I2C_ENABLE 0x6c + +static void byt_i2c_setup(struct lpss_private_data *pdata) +{ + acpi_handle handle = pdata->adev->handle; + unsigned long long shared_host = 0; + acpi_status status; + u64 uid; + + /* Expected to always be successfull, but better safe then sorry */ + if (!acpi_dev_uid_to_integer(pdata->adev, &uid) && uid) { + /* Detect I2C bus shared with PUNIT and ignore its d3 status */ + status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); + if (ACPI_SUCCESS(status) && shared_host) + pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1)); + } + + lpss_deassert_reset(pdata); + + if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset)) + pdata->fixed_clk_rate = 133000000; + + writel(0, pdata->mmio_base + LPSS_I2C_ENABLE); +} + +/* + * BSW PWM1 is used for backlight control by the i915 driver + * BSW PWM2 is used for backlight control for fixed (etched into the glass) + * touch controls on some models. These touch-controls have specialized + * drivers which know they need the "pwm_soc_lpss_2" con-id. + */ +static struct pwm_lookup bsw_pwm_lookup[] = { + PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0", + "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL, + "pwm-lpss-platform"), + PWM_LOOKUP_WITH_MODULE("80862289:00", 0, NULL, + "pwm_soc_lpss_2", 0, PWM_POLARITY_NORMAL, + "pwm-lpss-platform"), +}; + +static void bsw_pwm_setup(struct lpss_private_data *pdata) +{ + /* Only call pwm_add_table for the first PWM controller */ + if (acpi_dev_uid_match(pdata->adev, 1)) + pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); +} + +static const struct property_entry lpt_spi_properties[] = { + PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_LPT_SSP), + { } +}; + +static const struct lpss_device_desc lpt_spi_dev_desc = { + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR + | LPSS_SAVE_CTX, + .prv_offset = 0x800, + .properties = lpt_spi_properties, +}; + +static const struct lpss_device_desc lpt_i2c_dev_desc = { + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX, + .prv_offset = 0x800, +}; + +static struct property_entry uart_properties[] = { + PROPERTY_ENTRY_U32("reg-io-width", 4), + PROPERTY_ENTRY_U32("reg-shift", 2), + PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"), + { }, +}; + +static const struct lpss_device_desc lpt_uart_dev_desc = { + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR + | LPSS_SAVE_CTX, + .clk_con_id = "baudclk", + .prv_offset = 0x800, + .setup = lpss_uart_setup, + .properties = uart_properties, +}; + +static const struct lpss_device_desc lpt_sdio_dev_desc = { + .flags = LPSS_LTR, + .prv_offset = 0x1000, + .prv_size_override = 0x1018, +}; + +static const struct lpss_device_desc byt_pwm_dev_desc = { + .flags = LPSS_SAVE_CTX, + .prv_offset = 0x800, + .setup = byt_pwm_setup, +}; + +static const struct lpss_device_desc bsw_pwm_dev_desc = { + .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY, + .prv_offset = 0x800, + .setup = bsw_pwm_setup, + .resume_from_noirq = true, +}; + +static const struct lpss_device_desc bsw_pwm2_dev_desc = { + .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY, + .prv_offset = 0x800, + .resume_from_noirq = true, +}; + +static const struct lpss_device_desc byt_uart_dev_desc = { + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, + .clk_con_id = "baudclk", + .prv_offset = 0x800, + .setup = lpss_uart_setup, + .properties = uart_properties, +}; + +static const struct lpss_device_desc bsw_uart_dev_desc = { + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX + | LPSS_NO_D3_DELAY, + .clk_con_id = "baudclk", + .prv_offset = 0x800, + .setup = lpss_uart_setup, + .properties = uart_properties, +}; + +static const struct property_entry byt_spi_properties[] = { + PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BYT_SSP), + { } +}; + +static const struct lpss_device_desc byt_spi_dev_desc = { + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, + .prv_offset = 0x400, + .properties = byt_spi_properties, +}; + +static const struct lpss_device_desc byt_sdio_dev_desc = { + .flags = LPSS_CLK, +}; + +static const struct lpss_device_desc byt_i2c_dev_desc = { + .flags = LPSS_CLK | LPSS_SAVE_CTX, + .prv_offset = 0x800, + .setup = byt_i2c_setup, + .resume_from_noirq = true, +}; + +static const struct lpss_device_desc bsw_i2c_dev_desc = { + .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, + .prv_offset = 0x800, + .setup = byt_i2c_setup, + .resume_from_noirq = true, +}; + +static const struct property_entry bsw_spi_properties[] = { + PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP), + PROPERTY_ENTRY_U32("num-cs", 2), + { } +}; + +static const struct lpss_device_desc bsw_spi_dev_desc = { + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX + | LPSS_NO_D3_DELAY, + .prv_offset = 0x400, + .setup = lpss_deassert_reset, + .properties = bsw_spi_properties, +}; + +static const struct x86_cpu_id lpss_cpu_ids[] = { + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL), + {} +}; + +#else + +#define LPSS_ADDR(desc) (0UL) + +#endif /* CONFIG_X86_INTEL_LPSS */ + +static const struct acpi_device_id acpi_lpss_device_ids[] = { + /* Generic LPSS devices */ + { "INTL9C60", LPSS_ADDR(lpss_dma_desc) }, + + /* Lynxpoint LPSS devices */ + { "INT33C0", LPSS_ADDR(lpt_spi_dev_desc) }, + { "INT33C1", LPSS_ADDR(lpt_spi_dev_desc) }, + { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) }, + { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) }, + { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) }, + { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) }, + { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) }, + + /* BayTrail LPSS devices */ + { "80860F09", LPSS_ADDR(byt_pwm_dev_desc) }, + { "80860F0A", LPSS_ADDR(byt_uart_dev_desc) }, + { "80860F0E", LPSS_ADDR(byt_spi_dev_desc) }, + { "80860F14", LPSS_ADDR(byt_sdio_dev_desc) }, + { "80860F41", LPSS_ADDR(byt_i2c_dev_desc) }, + + /* Braswell LPSS devices */ + { "80862286", LPSS_ADDR(lpss_dma_desc) }, + { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) }, + { "80862289", LPSS_ADDR(bsw_pwm2_dev_desc) }, + { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) }, + { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) }, + { "808622C0", LPSS_ADDR(lpss_dma_desc) }, + { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) }, + + /* Broadwell LPSS devices */ + { "INT3430", LPSS_ADDR(lpt_spi_dev_desc) }, + { "INT3431", LPSS_ADDR(lpt_spi_dev_desc) }, + { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) }, + { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) }, + { "INT3434", LPSS_ADDR(lpt_uart_dev_desc) }, + { "INT3435", LPSS_ADDR(lpt_uart_dev_desc) }, + { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) }, + + /* Wildcat Point LPSS devices */ + { "INT3438", LPSS_ADDR(lpt_spi_dev_desc) }, + + { } +}; + +#ifdef CONFIG_X86_INTEL_LPSS + +/* LPSS main clock device. */ +static struct platform_device *lpss_clk_dev; + +static inline void lpt_register_clock_device(void) +{ + lpss_clk_dev = platform_device_register_simple("clk-lpss-atom", + PLATFORM_DEVID_NONE, + NULL, 0); +} + +static int register_device_clock(struct acpi_device *adev, + struct lpss_private_data *pdata) +{ + const struct lpss_device_desc *dev_desc = pdata->dev_desc; + const char *devname = dev_name(&adev->dev); + struct clk *clk; + struct lpss_clk_data *clk_data; + const char *parent, *clk_name; + void __iomem *prv_base; + + if (!lpss_clk_dev) + lpt_register_clock_device(); + + if (IS_ERR(lpss_clk_dev)) + return PTR_ERR(lpss_clk_dev); + + clk_data = platform_get_drvdata(lpss_clk_dev); + if (!clk_data) + return -ENODEV; + clk = clk_data->clk; + + if (!pdata->mmio_base + || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE) + return -ENODATA; + + parent = clk_data->name; + prv_base = pdata->mmio_base + dev_desc->prv_offset; + + if (pdata->fixed_clk_rate) { + clk = clk_register_fixed_rate(NULL, devname, parent, 0, + pdata->fixed_clk_rate); + goto out; + } + + if (dev_desc->flags & LPSS_CLK_GATE) { + clk = clk_register_gate(NULL, devname, parent, 0, + prv_base, 0, 0, NULL); + parent = devname; + } + + if (dev_desc->flags & LPSS_CLK_DIVIDER) { + /* Prevent division by zero */ + if (!readl(prv_base)) + writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base); + + clk_name = kasprintf(GFP_KERNEL, "%s-div", devname); + if (!clk_name) + return -ENOMEM; + clk = clk_register_fractional_divider(NULL, clk_name, parent, + 0, prv_base, 1, 15, 16, 15, + CLK_FRAC_DIVIDER_POWER_OF_TWO_PS, + NULL); + parent = clk_name; + + clk_name = kasprintf(GFP_KERNEL, "%s-update", devname); + if (!clk_name) { + kfree(parent); + return -ENOMEM; + } + clk = clk_register_gate(NULL, clk_name, parent, + CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, + prv_base, 31, 0, NULL); + kfree(parent); + kfree(clk_name); + } +out: + if (IS_ERR(clk)) + return PTR_ERR(clk); + + pdata->clk = clk; + clk_register_clkdev(clk, dev_desc->clk_con_id, devname); + return 0; +} + +struct lpss_device_links { + const char *supplier_hid; + const char *supplier_uid; + const char *consumer_hid; + const char *consumer_uid; + u32 flags; + const struct dmi_system_id *dep_missing_ids; +}; + +/* Please keep this list sorted alphabetically by vendor and model */ +static const struct dmi_system_id i2c1_dep_missing_dmi_ids[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"), + }, + }, + {} +}; + +/* + * The _DEP method is used to identify dependencies but instead of creating + * device links for every handle in _DEP, only links in the following list are + * created. That is necessary because, in the general case, _DEP can refer to + * devices that might not have drivers, or that are on different buses, or where + * the supplier is not enumerated until after the consumer is probed. + */ +static const struct lpss_device_links lpss_device_links[] = { + /* CHT External sdcard slot controller depends on PMIC I2C ctrl */ + {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME}, + /* CHT iGPU depends on PMIC I2C controller */ + {"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME}, + /* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */ + {"80860F41", "1", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME, + i2c1_dep_missing_dmi_ids}, + /* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */ + {"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME}, + /* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */ + {"80860F41", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME}, +}; + +static bool acpi_lpss_is_supplier(struct acpi_device *adev, + const struct lpss_device_links *link) +{ + return acpi_dev_hid_uid_match(adev, link->supplier_hid, link->supplier_uid); +} + +static bool acpi_lpss_is_consumer(struct acpi_device *adev, + const struct lpss_device_links *link) +{ + return acpi_dev_hid_uid_match(adev, link->consumer_hid, link->consumer_uid); +} + +struct hid_uid { + const char *hid; + const char *uid; +}; + +static int match_hid_uid(struct device *dev, const void *data) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + const struct hid_uid *id = data; + + if (!adev) + return 0; + + return acpi_dev_hid_uid_match(adev, id->hid, id->uid); +} + +static struct device *acpi_lpss_find_device(const char *hid, const char *uid) +{ + struct device *dev; + + struct hid_uid data = { + .hid = hid, + .uid = uid, + }; + + dev = bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid); + if (dev) + return dev; + + return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid); +} + +static void acpi_lpss_link_consumer(struct device *dev1, + const struct lpss_device_links *link) +{ + struct device *dev2; + + dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid); + if (!dev2) + return; + + if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids)) + || acpi_device_dep(ACPI_HANDLE(dev2), ACPI_HANDLE(dev1))) + device_link_add(dev2, dev1, link->flags); + + put_device(dev2); +} + +static void acpi_lpss_link_supplier(struct device *dev1, + const struct lpss_device_links *link) +{ + struct device *dev2; + + dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid); + if (!dev2) + return; + + if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids)) + || acpi_device_dep(ACPI_HANDLE(dev1), ACPI_HANDLE(dev2))) + device_link_add(dev1, dev2, link->flags); + + put_device(dev2); +} + +static void acpi_lpss_create_device_links(struct acpi_device *adev, + struct platform_device *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) { + const struct lpss_device_links *link = &lpss_device_links[i]; + + if (acpi_lpss_is_supplier(adev, link)) + acpi_lpss_link_consumer(&pdev->dev, link); + + if (acpi_lpss_is_consumer(adev, link)) + acpi_lpss_link_supplier(&pdev->dev, link); + } +} + +static int acpi_lpss_create_device(struct acpi_device *adev, + const struct acpi_device_id *id) +{ + const struct lpss_device_desc *dev_desc; + struct lpss_private_data *pdata; + struct resource_entry *rentry; + struct list_head resource_list; + struct platform_device *pdev; + int ret; + + dev_desc = (const struct lpss_device_desc *)id->driver_data; + if (!dev_desc) + return -EINVAL; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + INIT_LIST_HEAD(&resource_list); + ret = acpi_dev_get_memory_resources(adev, &resource_list); + if (ret < 0) + goto err_out; + + rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node); + if (rentry) { + if (dev_desc->prv_size_override) + pdata->mmio_size = dev_desc->prv_size_override; + else + pdata->mmio_size = resource_size(rentry->res); + pdata->mmio_base = ioremap(rentry->res->start, pdata->mmio_size); + } + + acpi_dev_free_resource_list(&resource_list); + + if (!pdata->mmio_base) { + /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */ + adev->pnp.type.platform_id = 0; + goto out_free; + } + + pdata->adev = adev; + pdata->dev_desc = dev_desc; + + if (dev_desc->setup) + dev_desc->setup(pdata); + + if (dev_desc->flags & LPSS_CLK) { + ret = register_device_clock(adev, pdata); + if (ret) + goto out_free; + } + + /* + * This works around a known issue in ACPI tables where LPSS devices + * have _PS0 and _PS3 without _PSC (and no power resources), so + * acpi_bus_init_power() will assume that the BIOS has put them into D0. + */ + acpi_device_fix_up_power(adev); + + adev->driver_data = pdata; + pdev = acpi_create_platform_device(adev, dev_desc->properties); + if (IS_ERR_OR_NULL(pdev)) { + adev->driver_data = NULL; + ret = PTR_ERR(pdev); + goto err_out; + } + + acpi_lpss_create_device_links(adev, pdev); + return 1; + +out_free: + /* Skip the device, but continue the namespace scan */ + ret = 0; +err_out: + kfree(pdata); + return ret; +} + +static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg) +{ + return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg); +} + +static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata, + unsigned int reg) +{ + writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg); +} + +static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + struct lpss_private_data *pdata; + unsigned long flags; + int ret; + + if (WARN_ON(!adev)) + return -ENODEV; + + spin_lock_irqsave(&dev->power.lock, flags); + if (pm_runtime_suspended(dev)) { + ret = -EAGAIN; + goto out; + } + pdata = acpi_driver_data(adev); + if (WARN_ON(!pdata || !pdata->mmio_base)) { + ret = -ENODEV; + goto out; + } + *val = __lpss_reg_read(pdata, reg); + ret = 0; + + out: + spin_unlock_irqrestore(&dev->power.lock, flags); + return ret; +} + +static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u32 ltr_value = 0; + unsigned int reg; + int ret; + + reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR; + ret = lpss_reg_read(dev, reg, <r_value); + if (ret) + return ret; + + return sysfs_emit(buf, "%08x\n", ltr_value); +} + +static ssize_t lpss_ltr_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 ltr_mode = 0; + char *outstr; + int ret; + + ret = lpss_reg_read(dev, LPSS_GENERAL, <r_mode); + if (ret) + return ret; + + outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto"; + return sprintf(buf, "%s\n", outstr); +} + +static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL); +static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL); +static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL); + +static struct attribute *lpss_attrs[] = { + &dev_attr_auto_ltr.attr, + &dev_attr_sw_ltr.attr, + &dev_attr_ltr_mode.attr, + NULL, +}; + +static const struct attribute_group lpss_attr_group = { + .attrs = lpss_attrs, + .name = "lpss_ltr", +}; + +static void acpi_lpss_set_ltr(struct device *dev, s32 val) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + u32 ltr_mode, ltr_val; + + ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL); + if (val < 0) { + if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) { + ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW; + __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL); + } + return; + } + ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK; + if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) { + ltr_val |= LPSS_LTR_SNOOP_LAT_32US; + val = LPSS_LTR_MAX_VAL; + } else if (val > LPSS_LTR_MAX_VAL) { + ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ; + val >>= LPSS_LTR_SNOOP_LAT_SHIFT; + } else { + ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ; + } + ltr_val |= val; + __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR); + if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) { + ltr_mode |= LPSS_GENERAL_LTR_MODE_SW; + __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL); + } +} + +#ifdef CONFIG_PM +/** + * acpi_lpss_save_ctx() - Save the private registers of LPSS device + * @dev: LPSS device + * @pdata: pointer to the private data of the LPSS device + * + * Most LPSS devices have private registers which may loose their context when + * the device is powered down. acpi_lpss_save_ctx() saves those registers into + * prv_reg_ctx array. + */ +static void acpi_lpss_save_ctx(struct device *dev, + struct lpss_private_data *pdata) +{ + unsigned int i; + + for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { + unsigned long offset = i * sizeof(u32); + + pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset); + dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n", + pdata->prv_reg_ctx[i], offset); + } +} + +/** + * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device + * @dev: LPSS device + * @pdata: pointer to the private data of the LPSS device + * + * Restores the registers that were previously stored with acpi_lpss_save_ctx(). + */ +static void acpi_lpss_restore_ctx(struct device *dev, + struct lpss_private_data *pdata) +{ + unsigned int i; + + for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { + unsigned long offset = i * sizeof(u32); + + __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset); + dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n", + pdata->prv_reg_ctx[i], offset); + } +} + +static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata) +{ + /* + * The following delay is needed or the subsequent write operations may + * fail. The LPSS devices are actually PCI devices and the PCI spec + * expects 10ms delay before the device can be accessed after D3 to D0 + * transition. However some platforms like BSW does not need this delay. + */ + unsigned int delay = 10; /* default 10ms delay */ + + if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY) + delay = 0; + + msleep(delay); +} + +static int acpi_lpss_activate(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + int ret; + + ret = acpi_dev_resume(dev); + if (ret) + return ret; + + acpi_lpss_d3_to_d0_delay(pdata); + + /* + * This is called only on ->probe() stage where a device is either in + * known state defined by BIOS or most likely powered off. Due to this + * we have to deassert reset line to be sure that ->probe() will + * recognize the device. + */ + if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE)) + lpss_deassert_reset(pdata); + + if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE) + acpi_lpss_save_ctx(dev, pdata); + + return 0; +} + +static void acpi_lpss_dismiss(struct device *dev) +{ + acpi_dev_suspend(dev, false); +} + +/* IOSF SB for LPSS island */ +#define LPSS_IOSF_UNIT_LPIOEP 0xA0 +#define LPSS_IOSF_UNIT_LPIO1 0xAB +#define LPSS_IOSF_UNIT_LPIO2 0xAC + +#define LPSS_IOSF_PMCSR 0x84 +#define LPSS_PMCSR_D0 0 +#define LPSS_PMCSR_D3hot 3 +#define LPSS_PMCSR_Dx_MASK GENMASK(1, 0) + +#define LPSS_IOSF_GPIODEF0 0x154 +#define LPSS_GPIODEF0_DMA1_D3 BIT(2) +#define LPSS_GPIODEF0_DMA2_D3 BIT(3) +#define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2) +#define LPSS_GPIODEF0_DMA_LLP BIT(13) + +static DEFINE_MUTEX(lpss_iosf_mutex); +static bool lpss_iosf_d3_entered = true; + +static void lpss_iosf_enter_d3_state(void) +{ + u32 value1 = 0; + u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP; + u32 value2 = LPSS_PMCSR_D3hot; + u32 mask2 = LPSS_PMCSR_Dx_MASK; + /* + * PMC provides an information about actual status of the LPSS devices. + * Here we read the values related to LPSS power island, i.e. LPSS + * devices, excluding both LPSS DMA controllers, along with SCC domain. + */ + u32 func_dis, d3_sts_0, pmc_status; + int ret; + + ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis); + if (ret) + return; + + mutex_lock(&lpss_iosf_mutex); + + ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0); + if (ret) + goto exit; + + /* + * Get the status of entire LPSS power island per device basis. + * Shutdown both LPSS DMA controllers if and only if all other devices + * are already in D3hot. + */ + pmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask; + if (pmc_status) + goto exit; + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, + LPSS_IOSF_PMCSR, value2, mask2); + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE, + LPSS_IOSF_PMCSR, value2, mask2); + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, + LPSS_IOSF_GPIODEF0, value1, mask1); + + lpss_iosf_d3_entered = true; + +exit: + mutex_unlock(&lpss_iosf_mutex); +} + +static void lpss_iosf_exit_d3_state(void) +{ + u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 | + LPSS_GPIODEF0_DMA_LLP; + u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP; + u32 value2 = LPSS_PMCSR_D0; + u32 mask2 = LPSS_PMCSR_Dx_MASK; + + mutex_lock(&lpss_iosf_mutex); + + if (!lpss_iosf_d3_entered) + goto exit; + + lpss_iosf_d3_entered = false; + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, + LPSS_IOSF_GPIODEF0, value1, mask1); + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE, + LPSS_IOSF_PMCSR, value2, mask2); + + iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, + LPSS_IOSF_PMCSR, value2, mask2); + +exit: + mutex_unlock(&lpss_iosf_mutex); +} + +static int acpi_lpss_suspend(struct device *dev, bool wakeup) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + int ret; + + if (pdata->dev_desc->flags & LPSS_SAVE_CTX) + acpi_lpss_save_ctx(dev, pdata); + + ret = acpi_dev_suspend(dev, wakeup); + + /* + * This call must be last in the sequence, otherwise PMC will return + * wrong status for devices being about to be powered off. See + * lpss_iosf_enter_d3_state() for further information. + */ + if (acpi_target_system_state() == ACPI_STATE_S0 && + lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) + lpss_iosf_enter_d3_state(); + + return ret; +} + +static int acpi_lpss_resume(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + int ret; + + /* + * This call is kept first to be in symmetry with + * acpi_lpss_runtime_suspend() one. + */ + if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) + lpss_iosf_exit_d3_state(); + + ret = acpi_dev_resume(dev); + if (ret) + return ret; + + acpi_lpss_d3_to_d0_delay(pdata); + + if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE)) + acpi_lpss_restore_ctx(dev, pdata); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int acpi_lpss_do_suspend_late(struct device *dev) +{ + int ret; + + if (dev_pm_skip_suspend(dev)) + return 0; + + ret = pm_generic_suspend_late(dev); + return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); +} + +static int acpi_lpss_suspend_late(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + + if (pdata->dev_desc->resume_from_noirq) + return 0; + + return acpi_lpss_do_suspend_late(dev); +} + +static int acpi_lpss_suspend_noirq(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + int ret; + + if (pdata->dev_desc->resume_from_noirq) { + /* + * The driver's ->suspend_late callback will be invoked by + * acpi_lpss_do_suspend_late(), with the assumption that the + * driver really wanted to run that code in ->suspend_noirq, but + * it could not run after acpi_dev_suspend() and the driver + * expected the latter to be called in the "late" phase. + */ + ret = acpi_lpss_do_suspend_late(dev); + if (ret) + return ret; + } + + return acpi_subsys_suspend_noirq(dev); +} + +static int acpi_lpss_do_resume_early(struct device *dev) +{ + int ret = acpi_lpss_resume(dev); + + return ret ? ret : pm_generic_resume_early(dev); +} + +static int acpi_lpss_resume_early(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + + if (pdata->dev_desc->resume_from_noirq) + return 0; + + if (dev_pm_skip_resume(dev)) + return 0; + + return acpi_lpss_do_resume_early(dev); +} + +static int acpi_lpss_resume_noirq(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + int ret; + + /* Follow acpi_subsys_resume_noirq(). */ + if (dev_pm_skip_resume(dev)) + return 0; + + ret = pm_generic_resume_noirq(dev); + if (ret) + return ret; + + if (!pdata->dev_desc->resume_from_noirq) + return 0; + + /* + * The driver's ->resume_early callback will be invoked by + * acpi_lpss_do_resume_early(), with the assumption that the driver + * really wanted to run that code in ->resume_noirq, but it could not + * run before acpi_dev_resume() and the driver expected the latter to be + * called in the "early" phase. + */ + return acpi_lpss_do_resume_early(dev); +} + +static int acpi_lpss_do_restore_early(struct device *dev) +{ + int ret = acpi_lpss_resume(dev); + + return ret ? ret : pm_generic_restore_early(dev); +} + +static int acpi_lpss_restore_early(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + + if (pdata->dev_desc->resume_from_noirq) + return 0; + + return acpi_lpss_do_restore_early(dev); +} + +static int acpi_lpss_restore_noirq(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + int ret; + + ret = pm_generic_restore_noirq(dev); + if (ret) + return ret; + + if (!pdata->dev_desc->resume_from_noirq) + return 0; + + /* This is analogous to what happens in acpi_lpss_resume_noirq(). */ + return acpi_lpss_do_restore_early(dev); +} + +static int acpi_lpss_do_poweroff_late(struct device *dev) +{ + int ret = pm_generic_poweroff_late(dev); + + return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); +} + +static int acpi_lpss_poweroff_late(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + + if (dev_pm_skip_suspend(dev)) + return 0; + + if (pdata->dev_desc->resume_from_noirq) + return 0; + + return acpi_lpss_do_poweroff_late(dev); +} + +static int acpi_lpss_poweroff_noirq(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + + if (dev_pm_skip_suspend(dev)) + return 0; + + if (pdata->dev_desc->resume_from_noirq) { + /* This is analogous to the acpi_lpss_suspend_noirq() case. */ + int ret = acpi_lpss_do_poweroff_late(dev); + + if (ret) + return ret; + } + + return pm_generic_poweroff_noirq(dev); +} +#endif /* CONFIG_PM_SLEEP */ + +static int acpi_lpss_runtime_suspend(struct device *dev) +{ + int ret = pm_generic_runtime_suspend(dev); + + return ret ? ret : acpi_lpss_suspend(dev, true); +} + +static int acpi_lpss_runtime_resume(struct device *dev) +{ + int ret = acpi_lpss_resume(dev); + + return ret ? ret : pm_generic_runtime_resume(dev); +} +#endif /* CONFIG_PM */ + +static struct dev_pm_domain acpi_lpss_pm_domain = { +#ifdef CONFIG_PM + .activate = acpi_lpss_activate, + .dismiss = acpi_lpss_dismiss, +#endif + .ops = { +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP + .prepare = acpi_subsys_prepare, + .complete = acpi_subsys_complete, + .suspend = acpi_subsys_suspend, + .suspend_late = acpi_lpss_suspend_late, + .suspend_noirq = acpi_lpss_suspend_noirq, + .resume_noirq = acpi_lpss_resume_noirq, + .resume_early = acpi_lpss_resume_early, + .freeze = acpi_subsys_freeze, + .poweroff = acpi_subsys_poweroff, + .poweroff_late = acpi_lpss_poweroff_late, + .poweroff_noirq = acpi_lpss_poweroff_noirq, + .restore_noirq = acpi_lpss_restore_noirq, + .restore_early = acpi_lpss_restore_early, +#endif + .runtime_suspend = acpi_lpss_runtime_suspend, + .runtime_resume = acpi_lpss_runtime_resume, +#endif + }, +}; + +static int acpi_lpss_platform_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct platform_device *pdev = to_platform_device(data); + struct lpss_private_data *pdata; + struct acpi_device *adev; + const struct acpi_device_id *id; + + id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev); + if (!id || !id->driver_data) + return 0; + + adev = ACPI_COMPANION(&pdev->dev); + if (!adev) + return 0; + + pdata = acpi_driver_data(adev); + if (!pdata) + return 0; + + if (pdata->mmio_base && + pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) { + dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n"); + return 0; + } + + switch (action) { + case BUS_NOTIFY_BIND_DRIVER: + dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain); + break; + case BUS_NOTIFY_DRIVER_NOT_BOUND: + case BUS_NOTIFY_UNBOUND_DRIVER: + dev_pm_domain_set(&pdev->dev, NULL); + break; + case BUS_NOTIFY_ADD_DEVICE: + dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain); + if (pdata->dev_desc->flags & LPSS_LTR) + return sysfs_create_group(&pdev->dev.kobj, + &lpss_attr_group); + break; + case BUS_NOTIFY_DEL_DEVICE: + if (pdata->dev_desc->flags & LPSS_LTR) + sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); + dev_pm_domain_set(&pdev->dev, NULL); + break; + default: + break; + } + + return 0; +} + +static struct notifier_block acpi_lpss_nb = { + .notifier_call = acpi_lpss_platform_notify, +}; + +static void acpi_lpss_bind(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + + if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR)) + return; + + if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) + dev->power.set_latency_tolerance = acpi_lpss_set_ltr; + else + dev_err(dev, "MMIO size insufficient to access LTR\n"); +} + +static void acpi_lpss_unbind(struct device *dev) +{ + dev->power.set_latency_tolerance = NULL; +} + +static struct acpi_scan_handler lpss_handler = { + .ids = acpi_lpss_device_ids, + .attach = acpi_lpss_create_device, + .bind = acpi_lpss_bind, + .unbind = acpi_lpss_unbind, +}; + +void __init acpi_lpss_init(void) +{ + const struct x86_cpu_id *id; + int ret; + + ret = lpss_atom_clk_init(); + if (ret) + return; + + id = x86_match_cpu(lpss_cpu_ids); + if (id) + lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON; + + bus_register_notifier(&platform_bus_type, &acpi_lpss_nb); + acpi_scan_add_handler(&lpss_handler); +} + +#else + +static struct acpi_scan_handler lpss_handler = { + .ids = acpi_lpss_device_ids, +}; + +void __init acpi_lpss_init(void) +{ + acpi_scan_add_handler(&lpss_handler); +} + +#endif /* CONFIG_X86_INTEL_LPSS */ diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index 448e0d14fd..2fe0934dcd 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -100,6 +100,15 @@ static const struct override_status_id override_status_ids[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"), }), + /* + * The Dell XPS 15 9550 has a SMO8110 accelerometer / + * HDD freefall sensor which is wrongly marked as not present. + */ + PRESENT_ENTRY_HID("SMO8810", "1", SKYLAKE, { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9550"), + }), + /* * The GPD win BIOS dated 20170221 has disabled the accelerometer, the * drivers sometimes cause crashes under Windows and this is how the diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index a24c152bfa..aba3aa95b2 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -488,28 +488,31 @@ static int __init amba_stub_drv_init(void) * waiting on amba_match(). So, register a stub driver to make sure * amba_match() is called even if no amba driver has been registered. */ - return amba_driver_register(&amba_proxy_drv); + return __amba_driver_register(&amba_proxy_drv, NULL); } late_initcall_sync(amba_stub_drv_init); /** - * amba_driver_register - register an AMBA device driver + * __amba_driver_register - register an AMBA device driver * @drv: amba device driver structure + * @owner: owning module/driver * * Register an AMBA device driver with the Linux device model * core. If devices pre-exist, the drivers probe function will * be called. */ -int amba_driver_register(struct amba_driver *drv) +int __amba_driver_register(struct amba_driver *drv, + struct module *owner) { if (!drv->probe) return -EINVAL; + drv->drv.owner = owner; drv->drv.bus = &amba_bustype; return driver_register(&drv->drv); } -EXPORT_SYMBOL(amba_driver_register); +EXPORT_SYMBOL(__amba_driver_register); /** * amba_driver_unregister - remove an AMBA device driver diff --git a/drivers/android/binder.c b/drivers/android/binder.c index b21a7b246a..2d0a24a565 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -570,9 +570,7 @@ static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) { return !thread->transaction_stack && - binder_worklist_empty_ilocked(&thread->todo) && - (thread->looper & (BINDER_LOOPER_STATE_ENTERED | - BINDER_LOOPER_STATE_REGISTERED)); + binder_worklist_empty_ilocked(&thread->todo); } static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 928ec93c6b..b595494ab9 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -556,7 +556,7 @@ comment "PATA SFF controllers with BMDMA" config PATA_ALI tristate "ALi PATA support" - depends on PCI + depends on PCI && HAS_IOPORT select PATA_TIMINGS help This option enables support for the ALi ATA interfaces @@ -566,7 +566,7 @@ config PATA_ALI config PATA_AMD tristate "AMD/NVidia PATA support" - depends on PCI + depends on PCI && HAS_IOPORT select PATA_TIMINGS help This option enables support for the AMD and NVidia PATA @@ -584,7 +584,7 @@ config PATA_ARASAN_CF config PATA_ARTOP tristate "ARTOP 6210/6260 PATA support" - depends on PCI + depends on PCI && HAS_IOPORT help This option enables support for ARTOP PATA controllers. @@ -611,7 +611,7 @@ config PATA_ATP867X config PATA_CMD64X tristate "CMD64x PATA support" - depends on PCI + depends on PCI && HAS_IOPORT select PATA_TIMINGS help This option enables support for the CMD64x series chips @@ -658,7 +658,7 @@ config PATA_CS5536 config PATA_CYPRESS tristate "Cypress CY82C693 PATA support (Very Experimental)" - depends on PCI + depends on PCI && HAS_IOPORT select PATA_TIMINGS help This option enables support for the Cypress/Contaq CY82C693 @@ -706,7 +706,7 @@ config PATA_HPT366 config PATA_HPT37X tristate "HPT 370/370A/371/372/374/302 PATA support" - depends on PCI + depends on PCI && HAS_IOPORT help This option enables support for the majority of the later HPT PATA controllers via the new ATA layer. @@ -715,7 +715,7 @@ config PATA_HPT37X config PATA_HPT3X2N tristate "HPT 371N/372N/302N PATA support" - depends on PCI + depends on PCI && HAS_IOPORT help This option enables support for the N variant HPT PATA controllers via the new ATA layer. @@ -818,7 +818,7 @@ config PATA_MPC52xx config PATA_NETCELL tristate "NETCELL Revolution RAID support" - depends on PCI + depends on PCI && HAS_IOPORT help This option enables support for the Netcell Revolution RAID PATA controller. @@ -854,7 +854,7 @@ config PATA_OLDPIIX config PATA_OPTIDMA tristate "OPTI FireStar PATA support (Very Experimental)" - depends on PCI + depends on PCI && HAS_IOPORT help This option enables DMA/PIO support for the later OPTi controllers found on some old motherboards and in some @@ -864,7 +864,7 @@ config PATA_OPTIDMA config PATA_PDC2027X tristate "Promise PATA 2027x support" - depends on PCI + depends on PCI && HAS_IOPORT help This option enables support for Promise PATA pdc20268 to pdc20277 host adapters. @@ -872,7 +872,7 @@ config PATA_PDC2027X config PATA_PDC_OLD tristate "Older Promise PATA controller support" - depends on PCI + depends on PCI && HAS_IOPORT help This option enables support for the Promise 20246, 20262, 20263, 20265 and 20267 adapters. @@ -900,7 +900,7 @@ config PATA_RDC config PATA_SC1200 tristate "SC1200 PATA support" - depends on PCI && (X86_32 || COMPILE_TEST) + depends on PCI && (X86_32 || COMPILE_TEST) && HAS_IOPORT help This option enables support for the NatSemi/AMD SC1200 SoC companion chip used with the Geode processor family. @@ -918,7 +918,7 @@ config PATA_SCH config PATA_SERVERWORKS tristate "SERVERWORKS OSB4/CSB5/CSB6/HT1000 PATA support" - depends on PCI + depends on PCI && HAS_IOPORT help This option enables support for the Serverworks OSB4/CSB5/CSB6 and HT1000 PATA controllers, via the new ATA layer. @@ -1182,7 +1182,7 @@ config ATA_GENERIC config PATA_LEGACY tristate "Legacy ISA PATA support (Experimental)" - depends on (ISA || PCI) + depends on (ISA || PCI) && HAS_IOPORT select PATA_TIMINGS help This option enables support for ISA/VLB/PCI bus legacy PATA diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 344c87210d..8f40f75ba0 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -397,7 +397,7 @@ extern const struct attribute_group *ahci_sdev_groups[]; .sdev_groups = ahci_sdev_groups, \ .change_queue_depth = ata_scsi_change_queue_depth, \ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ - .slave_configure = ata_scsi_slave_config + .device_configure = ata_scsi_device_configure extern struct ata_port_operations ahci_ops; extern struct ata_port_operations ahci_platform_ops; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index d937e6e5cc..74b59b78d2 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1480,19 +1480,19 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc) } /** - * ata_exec_internal_sg - execute libata internal command + * ata_exec_internal - execute libata internal command * @dev: Device to which the command is sent * @tf: Taskfile registers for the command and the result * @cdb: CDB for packet command * @dma_dir: Data transfer direction of the command - * @sgl: sg list for the data buffer of the command - * @n_elem: Number of sg entries + * @buf: Data buffer of the command + * @buflen: Length of data buffer * @timeout: Timeout in msecs (0 for default) * - * Executes libata internal command with timeout. @tf contains - * command on entry and result on return. Timeout and error - * conditions are reported via return value. No recovery action - * is taken after a command times out. It's caller's duty to + * Executes libata internal command with timeout. @tf contains + * the command on entry and the result on return. Timeout and error + * conditions are reported via the return value. No recovery action + * is taken after a command times out. It is the caller's duty to * clean up after timeout. * * LOCKING: @@ -1501,34 +1501,38 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc) * RETURNS: * Zero on success, AC_ERR_* mask on failure */ -static unsigned ata_exec_internal_sg(struct ata_device *dev, - struct ata_taskfile *tf, const u8 *cdb, - int dma_dir, struct scatterlist *sgl, - unsigned int n_elem, unsigned int timeout) +unsigned int ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf, + const u8 *cdb, enum dma_data_direction dma_dir, + void *buf, unsigned int buflen, + unsigned int timeout) { struct ata_link *link = dev->link; struct ata_port *ap = link->ap; u8 command = tf->command; - int auto_timeout = 0; struct ata_queued_cmd *qc; + struct scatterlist sgl; unsigned int preempted_tag; u32 preempted_sactive; u64 preempted_qc_active; int preempted_nr_active_links; + bool auto_timeout = false; DECLARE_COMPLETION_ONSTACK(wait); unsigned long flags; unsigned int err_mask; int rc; + if (WARN_ON(dma_dir != DMA_NONE && !buf)) + return AC_ERR_INVALID; + spin_lock_irqsave(ap->lock, flags); - /* no internal command while frozen */ + /* No internal command while frozen */ if (ata_port_is_frozen(ap)) { spin_unlock_irqrestore(ap->lock, flags); return AC_ERR_SYSTEM; } - /* initialize internal qc */ + /* Initialize internal qc */ qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL); qc->tag = ATA_TAG_INTERNAL; @@ -1547,12 +1551,12 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev, ap->qc_active = 0; ap->nr_active_links = 0; - /* prepare & issue qc */ + /* Prepare and issue qc */ qc->tf = *tf; if (cdb) memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); - /* some SATA bridges need us to indicate data xfer direction */ + /* Some SATA bridges need us to indicate data xfer direction */ if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && dma_dir == DMA_FROM_DEVICE) qc->tf.feature |= ATAPI_DMADIR; @@ -1560,13 +1564,8 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev, qc->flags |= ATA_QCFLAG_RESULT_TF; qc->dma_dir = dma_dir; if (dma_dir != DMA_NONE) { - unsigned int i, buflen = 0; - struct scatterlist *sg; - - for_each_sg(sgl, sg, n_elem, i) - buflen += sg->length; - - ata_sg_init(qc, sgl, n_elem); + sg_init_one(&sgl, buf, buflen); + ata_sg_init(qc, &sgl, 1); qc->nbytes = buflen; } @@ -1578,11 +1577,11 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev, spin_unlock_irqrestore(ap->lock, flags); if (!timeout) { - if (ata_probe_timeout) + if (ata_probe_timeout) { timeout = ata_probe_timeout * 1000; - else { + } else { timeout = ata_internal_cmd_timeout(dev, command); - auto_timeout = 1; + auto_timeout = true; } } @@ -1595,30 +1594,25 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev, ata_sff_flush_pio_task(ap); if (!rc) { - spin_lock_irqsave(ap->lock, flags); - - /* We're racing with irq here. If we lose, the - * following test prevents us from completing the qc - * twice. If we win, the port is frozen and will be - * cleaned up by ->post_internal_cmd(). + /* + * We are racing with irq here. If we lose, the following test + * prevents us from completing the qc twice. If we win, the port + * is frozen and will be cleaned up by ->post_internal_cmd(). */ + spin_lock_irqsave(ap->lock, flags); if (qc->flags & ATA_QCFLAG_ACTIVE) { qc->err_mask |= AC_ERR_TIMEOUT; - ata_port_freeze(ap); - ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n", timeout, command); } - spin_unlock_irqrestore(ap->lock, flags); } - /* do post_internal_cmd */ if (ap->ops->post_internal_cmd) ap->ops->post_internal_cmd(qc); - /* perform minimal error analysis */ + /* Perform minimal error analysis */ if (qc->flags & ATA_QCFLAG_EH) { if (qc->result_tf.status & (ATA_ERR | ATA_DF)) qc->err_mask |= AC_ERR_DEV; @@ -1632,7 +1626,7 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev, qc->result_tf.status |= ATA_SENSE; } - /* finish up */ + /* Finish up */ spin_lock_irqsave(ap->lock, flags); *tf = qc->result_tf; @@ -1652,44 +1646,6 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev, return err_mask; } -/** - * ata_exec_internal - execute libata internal command - * @dev: Device to which the command is sent - * @tf: Taskfile registers for the command and the result - * @cdb: CDB for packet command - * @dma_dir: Data transfer direction of the command - * @buf: Data buffer of the command - * @buflen: Length of data buffer - * @timeout: Timeout in msecs (0 for default) - * - * Wrapper around ata_exec_internal_sg() which takes simple - * buffer instead of sg list. - * - * LOCKING: - * None. Should be called with kernel context, might sleep. - * - * RETURNS: - * Zero on success, AC_ERR_* mask on failure - */ -unsigned ata_exec_internal(struct ata_device *dev, - struct ata_taskfile *tf, const u8 *cdb, - int dma_dir, void *buf, unsigned int buflen, - unsigned int timeout) -{ - struct scatterlist *psg = NULL, sg; - unsigned int n_elem = 0; - - if (dma_dir != DMA_NONE) { - WARN_ON(!buf); - sg_init_one(&sg, buf, buflen); - psg = &sg; - n_elem++; - } - - return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, - timeout); -} - /** * ata_pio_need_iordy - check if iordy needed * @adev: ATA device @@ -4199,12 +4155,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM | ATA_HORKAGE_NOLPM }, - /* Apacer models with LPM issues */ - { "Apacer AS340*", NULL, ATA_HORKAGE_NOLPM }, - /* AMD Radeon devices with broken LPM support */ { "R3SL240G", NULL, ATA_HORKAGE_NOLPM }, + /* Apacer models with LPM issues */ + { "Apacer AS340*", NULL, ATA_HORKAGE_NOLPM }, + /* These specific Samsung models/firmware-revs do not handle LPM well */ { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM }, { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM }, diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index 0fb1934875..9e047bf912 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -848,80 +848,143 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, ata_scsi_lpm_show, ata_scsi_lpm_store); EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); -static ssize_t ata_ncq_prio_supported_show(struct device *device, - struct device_attribute *attr, - char *buf) +/** + * ata_ncq_prio_supported - Check if device supports NCQ Priority + * @ap: ATA port of the target device + * @sdev: SCSI device + * @supported: Address of a boolean to store the result + * + * Helper to check if device supports NCQ Priority feature. + * + * Context: Any context. Takes and releases @ap->lock. + * + * Return: + * * %0 - OK. Status is stored into @supported + * * %-ENODEV - Failed to find the ATA device + */ +int ata_ncq_prio_supported(struct ata_port *ap, struct scsi_device *sdev, + bool *supported) { - struct scsi_device *sdev = to_scsi_device(device); - struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *dev; - bool ncq_prio_supported; + unsigned long flags; int rc = 0; - spin_lock_irq(ap->lock); + spin_lock_irqsave(ap->lock, flags); dev = ata_scsi_find_dev(ap, sdev); if (!dev) rc = -ENODEV; else - ncq_prio_supported = dev->flags & ATA_DFLAG_NCQ_PRIO; - spin_unlock_irq(ap->lock); + *supported = dev->flags & ATA_DFLAG_NCQ_PRIO; + spin_unlock_irqrestore(ap->lock, flags); + + return rc; +} +EXPORT_SYMBOL_GPL(ata_ncq_prio_supported); + +static ssize_t ata_ncq_prio_supported_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(device); + struct ata_port *ap = ata_shost_to_port(sdev->host); + bool supported; + int rc; + + rc = ata_ncq_prio_supported(ap, sdev, &supported); + if (rc) + return rc; - return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_supported); + return sysfs_emit(buf, "%d\n", supported); } DEVICE_ATTR(ncq_prio_supported, S_IRUGO, ata_ncq_prio_supported_show, NULL); EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_supported); -static ssize_t ata_ncq_prio_enable_show(struct device *device, - struct device_attribute *attr, - char *buf) +/** + * ata_ncq_prio_enabled - Check if NCQ Priority is enabled + * @ap: ATA port of the target device + * @sdev: SCSI device + * @enabled: Address of a boolean to store the result + * + * Helper to check if NCQ Priority feature is enabled. + * + * Context: Any context. Takes and releases @ap->lock. + * + * Return: + * * %0 - OK. Status is stored into @enabled + * * %-ENODEV - Failed to find the ATA device + */ +int ata_ncq_prio_enabled(struct ata_port *ap, struct scsi_device *sdev, + bool *enabled) { - struct scsi_device *sdev = to_scsi_device(device); - struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *dev; - bool ncq_prio_enable; + unsigned long flags; int rc = 0; - spin_lock_irq(ap->lock); + spin_lock_irqsave(ap->lock, flags); dev = ata_scsi_find_dev(ap, sdev); if (!dev) rc = -ENODEV; else - ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED; - spin_unlock_irq(ap->lock); + *enabled = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED; + spin_unlock_irqrestore(ap->lock, flags); - return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_enable); + return rc; } +EXPORT_SYMBOL_GPL(ata_ncq_prio_enabled); -static ssize_t ata_ncq_prio_enable_store(struct device *device, - struct device_attribute *attr, - const char *buf, size_t len) +static ssize_t ata_ncq_prio_enable_show(struct device *device, + struct device_attribute *attr, + char *buf) { struct scsi_device *sdev = to_scsi_device(device); - struct ata_port *ap; - struct ata_device *dev; - long int input; - int rc = 0; + struct ata_port *ap = ata_shost_to_port(sdev->host); + bool enabled; + int rc; - rc = kstrtol(buf, 10, &input); + rc = ata_ncq_prio_enabled(ap, sdev, &enabled); if (rc) return rc; - if ((input < 0) || (input > 1)) - return -EINVAL; - ap = ata_shost_to_port(sdev->host); - dev = ata_scsi_find_dev(ap, sdev); - if (unlikely(!dev)) - return -ENODEV; + return sysfs_emit(buf, "%d\n", enabled); +} + +/** + * ata_ncq_prio_enable - Enable/disable NCQ Priority + * @ap: ATA port of the target device + * @sdev: SCSI device + * @enable: true - enable NCQ Priority, false - disable NCQ Priority + * + * Helper to enable/disable NCQ Priority feature. + * + * Context: Any context. Takes and releases @ap->lock. + * + * Return: + * * %0 - OK. Status is stored into @enabled + * * %-ENODEV - Failed to find the ATA device + * * %-EINVAL - NCQ Priority is not supported or CDL is enabled + */ +int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev, + bool enable) +{ + struct ata_device *dev; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(ap->lock, flags); - spin_lock_irq(ap->lock); + dev = ata_scsi_find_dev(ap, sdev); + if (!dev) { + rc = -ENODEV; + goto unlock; + } if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) { rc = -EINVAL; goto unlock; } - if (input) { + if (enable) { if (dev->flags & ATA_DFLAG_CDL_ENABLED) { ata_dev_err(dev, "CDL must be disabled to enable NCQ priority\n"); @@ -934,9 +997,30 @@ static ssize_t ata_ncq_prio_enable_store(struct device *device, } unlock: - spin_unlock_irq(ap->lock); + spin_unlock_irqrestore(ap->lock, flags); + + return rc; +} +EXPORT_SYMBOL_GPL(ata_ncq_prio_enable); + +static ssize_t ata_ncq_prio_enable_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct scsi_device *sdev = to_scsi_device(device); + struct ata_port *ap = ata_shost_to_port(sdev->host); + bool enable; + int rc; + + rc = kstrtobool(buf, &enable); + if (rc) + return rc; + + rc = ata_ncq_prio_enable(ap, sdev, enable); + if (rc) + return rc; - return rc ? rc : len; + return len; } DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR, @@ -1170,21 +1254,24 @@ void ata_sas_tport_delete(struct ata_port *ap) EXPORT_SYMBOL_GPL(ata_sas_tport_delete); /** - * ata_sas_slave_configure - Default slave_config routine for libata devices + * ata_sas_device_configure - Default device_configure routine for libata + * devices * @sdev: SCSI device to configure + * @lim: queue limits * @ap: ATA port to which SCSI device is attached * * RETURNS: * Zero. */ -int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap) +int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim, + struct ata_port *ap) { ata_scsi_sdev_config(sdev); - return ata_scsi_dev_config(sdev, ap->link.device); + return ata_scsi_dev_config(sdev, lim, ap->link.device); } -EXPORT_SYMBOL_GPL(ata_sas_slave_configure); +EXPORT_SYMBOL_GPL(ata_sas_device_configure); /** * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 9c3daa7d19..076fbeadce 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -230,6 +230,80 @@ void ata_scsi_set_sense_information(struct ata_device *dev, SCSI_SENSE_BUFFERSIZE, information); } +/** + * ata_scsi_set_passthru_sense_fields - Set ATA fields in sense buffer + * @qc: ATA PASS-THROUGH command. + * + * Populates "ATA Status Return sense data descriptor" / "Fixed format + * sense data" with ATA taskfile fields. + * + * LOCKING: + * None. + */ +static void ata_scsi_set_passthru_sense_fields(struct ata_queued_cmd *qc) +{ + struct scsi_cmnd *cmd = qc->scsicmd; + struct ata_taskfile *tf = &qc->result_tf; + unsigned char *sb = cmd->sense_buffer; + + if ((sb[0] & 0x7f) >= 0x72) { + unsigned char *desc; + u8 len; + + /* descriptor format */ + len = sb[7]; + desc = (char *)scsi_sense_desc_find(sb, len + 8, 9); + if (!desc) { + if (SCSI_SENSE_BUFFERSIZE < len + 14) + return; + sb[7] = len + 14; + desc = sb + 8 + len; + } + desc[0] = 9; + desc[1] = 12; + /* + * Copy registers into sense buffer. + */ + desc[2] = 0x00; + desc[3] = tf->error; + desc[5] = tf->nsect; + desc[7] = tf->lbal; + desc[9] = tf->lbam; + desc[11] = tf->lbah; + desc[12] = tf->device; + desc[13] = tf->status; + + /* + * Fill in Extend bit, and the high order bytes + * if applicable. + */ + if (tf->flags & ATA_TFLAG_LBA48) { + desc[2] |= 0x01; + desc[4] = tf->hob_nsect; + desc[6] = tf->hob_lbal; + desc[8] = tf->hob_lbam; + desc[10] = tf->hob_lbah; + } + } else { + /* Fixed sense format */ + sb[0] |= 0x80; + sb[3] = tf->error; + sb[4] = tf->status; + sb[5] = tf->device; + sb[6] = tf->nsect; + if (tf->flags & ATA_TFLAG_LBA48) { + sb[8] |= 0x80; + if (tf->hob_nsect) + sb[8] |= 0x40; + if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah) + sb[8] |= 0x20; + } + sb[9] = tf->lbal; + sb[10] = tf->lbam; + sb[11] = tf->lbah; + } +} + static void ata_scsi_set_invalid_field(struct ata_device *dev, struct scsi_cmnd *cmd, u16 field, u8 bit) { @@ -837,10 +911,8 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, * ata_gen_passthru_sense - Generate check condition sense block. * @qc: Command that completed. * - * This function is specific to the ATA descriptor format sense - * block specified for the ATA pass through commands. Regardless - * of whether the command errored or not, return a sense - * block. Copy all controller registers into the sense + * This function is specific to the ATA pass through commands. + * Regardless of whether the command errored or not, return a sense * block. If there was no error, we get the request from an ATA * passthrough command, so we use the following sense data: * sk = RECOVERED ERROR @@ -855,7 +927,6 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) struct scsi_cmnd *cmd = qc->scsicmd; struct ata_taskfile *tf = &qc->result_tf; unsigned char *sb = cmd->sense_buffer; - unsigned char *desc = sb + 8; u8 sense_key, asc, ascq; memset(sb, 0, SCSI_SENSE_BUFFERSIZE); @@ -870,67 +941,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) &sense_key, &asc, &ascq); ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); } else { - /* - * ATA PASS-THROUGH INFORMATION AVAILABLE - * Always in descriptor format sense. - */ - scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D); - } - - if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) { - u8 len; - - /* descriptor format */ - len = sb[7]; - desc = (char *)scsi_sense_desc_find(sb, len + 8, 9); - if (!desc) { - if (SCSI_SENSE_BUFFERSIZE < len + 14) - return; - sb[7] = len + 14; - desc = sb + 8 + len; - } - desc[0] = 9; - desc[1] = 12; - /* - * Copy registers into sense buffer. - */ - desc[2] = 0x00; - desc[3] = tf->error; - desc[5] = tf->nsect; - desc[7] = tf->lbal; - desc[9] = tf->lbam; - desc[11] = tf->lbah; - desc[12] = tf->device; - desc[13] = tf->status; - - /* - * Fill in Extend bit, and the high order bytes - * if applicable. - */ - if (tf->flags & ATA_TFLAG_LBA48) { - desc[2] |= 0x01; - desc[4] = tf->hob_nsect; - desc[6] = tf->hob_lbal; - desc[8] = tf->hob_lbam; - desc[10] = tf->hob_lbah; - } - } else { - /* Fixed sense format */ - desc[0] = tf->error; - desc[1] = tf->status; - desc[2] = tf->device; - desc[3] = tf->nsect; - desc[7] = 0; - if (tf->flags & ATA_TFLAG_LBA48) { - desc[8] |= 0x80; - if (tf->hob_nsect) - desc[8] |= 0x40; - if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah) - desc[8] |= 0x20; - } - desc[9] = tf->lbal; - desc[10] = tf->lbam; - desc[11] = tf->lbah; + /* ATA PASS-THROUGH INFORMATION AVAILABLE */ + ata_scsi_set_sense(qc->dev, cmd, RECOVERED_ERROR, 0, 0x1D); } } @@ -1021,7 +1033,8 @@ bool ata_scsi_dma_need_drain(struct request *rq) } EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain); -int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) +int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim, + struct ata_device *dev) { struct request_queue *q = sdev->request_queue; int depth = 1; @@ -1031,7 +1044,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) /* configure max sectors */ dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors); - blk_queue_max_hw_sectors(q, dev->max_sectors); + lim->max_hw_sectors = dev->max_sectors; if (dev->class == ATA_DEV_ATAPI) { sdev->sector_size = ATA_SECT_SIZE; @@ -1040,7 +1053,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1); /* make room for appending the drain */ - blk_queue_max_segments(q, queue_max_segments(q) - 1); + lim->max_segments--; sdev->dma_drain_len = ATAPI_MAX_DRAIN; sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO); @@ -1077,7 +1090,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) "sector_size=%u > PAGE_SIZE, PIO may malfunction\n", sdev->sector_size); - blk_queue_update_dma_alignment(q, sdev->sector_size - 1); + lim->dma_alignment = sdev->sector_size - 1; if (dev->flags & ATA_DFLAG_AN) set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); @@ -1131,8 +1144,9 @@ int ata_scsi_slave_alloc(struct scsi_device *sdev) EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc); /** - * ata_scsi_slave_config - Set SCSI device attributes + * ata_scsi_device_configure - Set SCSI device attributes * @sdev: SCSI device to examine + * @lim: queue limits * * This is called before we actually start reading * and writing to the device, to configure certain @@ -1142,17 +1156,18 @@ EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc); * Defined by SCSI layer. We don't really care. */ -int ata_scsi_slave_config(struct scsi_device *sdev) +int ata_scsi_device_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); if (dev) - return ata_scsi_dev_config(sdev, dev); + return ata_scsi_dev_config(sdev, lim, dev); return 0; } -EXPORT_SYMBOL_GPL(ata_scsi_slave_config); +EXPORT_SYMBOL_GPL(ata_scsi_device_configure); /** * ata_scsi_slave_destroy - SCSI device is about to be destroyed @@ -1629,26 +1644,32 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) { struct scsi_cmnd *cmd = qc->scsicmd; u8 *cdb = cmd->cmnd; - int need_sense = (qc->err_mask != 0) && - !(qc->flags & ATA_QCFLAG_SENSE_VALID); + bool have_sense = qc->flags & ATA_QCFLAG_SENSE_VALID; + bool is_ata_passthru = cdb[0] == ATA_16 || cdb[0] == ATA_12; + bool is_ck_cond_request = cdb[2] & 0x20; + bool is_error = qc->err_mask != 0; /* For ATA pass thru (SAT) commands, generate a sense block if * user mandated it or if there's an error. Note that if we - * generate because the user forced us to [CK_COND =1], a check + * generate because the user forced us to [CK_COND=1], a check * condition is generated and the ATA register values are returned * whether the command completed successfully or not. If there - * was no error, we use the following sense data: + * was no error, and CK_COND=1, we use the following sense data: * sk = RECOVERED ERROR * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE */ - if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && - ((cdb[2] & 0x20) || need_sense)) - ata_gen_passthru_sense(qc); - else if (need_sense) + if (is_ata_passthru && (is_ck_cond_request || is_error || have_sense)) { + if (!have_sense) + ata_gen_passthru_sense(qc); + ata_scsi_set_passthru_sense_fields(qc); + if (is_ck_cond_request) + set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION); + } else if (is_error && !have_sense) { ata_gen_ata_sense(qc); - else + } else { /* Keep the SCSI ML and status byte, clear host byte. */ cmd->result &= 0x0000ffff; + } ata_qc_done(qc); } @@ -2587,14 +2608,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) /* handle completion from EH */ if (unlikely(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID)) { - if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) { - /* FIXME: not quite right; we don't want the - * translation of taskfile registers into a - * sense descriptors, since that's only - * correct for ATA, not ATAPI - */ + if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) ata_gen_passthru_sense(qc); - } /* SCSI EH automatically locks door if sdev->locked is * set. Sometimes door lock request continues to diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 95a19c4ef2..250f7dae05 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -3032,6 +3032,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_port_start32); */ int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) { +#ifdef CONFIG_HAS_IOPORT unsigned long bmdma = pci_resource_start(pdev, 4); u8 simplex; @@ -3044,6 +3045,9 @@ int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) if (simplex & 0x80) return -EOPNOTSUPP; return 0; +#else + return -ENOENT; +#endif /* CONFIG_HAS_IOPORT */ } EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 5c685bb193..38ce13b554 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -50,10 +50,10 @@ extern int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block, unsigned int tf_flags, int dld, int class); extern u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev); -extern unsigned ata_exec_internal(struct ata_device *dev, - struct ata_taskfile *tf, const u8 *cdb, - int dma_dir, void *buf, unsigned int buflen, - unsigned int timeout); +unsigned int ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf, + const u8 *cdb, enum dma_data_direction dma_dir, + void *buf, unsigned int buflen, + unsigned int timeout); extern int ata_wait_ready(struct ata_link *link, unsigned long deadline, int (*check_ready)(struct ata_link *link)); extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, @@ -131,7 +131,8 @@ extern void ata_scsi_dev_rescan(struct work_struct *work); extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, unsigned int id, u64 lun); void ata_scsi_sdev_config(struct scsi_device *sdev); -int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev); +int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim, + struct ata_device *dev); int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev); /* libata-eh.c */ diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c index 38795508c2..027cf67101 100644 --- a/drivers/ata/pata_cs5520.c +++ b/drivers/ata/pata_cs5520.c @@ -151,12 +151,6 @@ static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) if (!host) return -ENOMEM; - /* Perform set up for DMA */ - if (pci_enable_device_io(pdev)) { - dev_err(&pdev->dev, "unable to configure BAR2.\n"); - return -ENODEV; - } - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { dev_err(&pdev->dev, "unable to configure DMA mask.\n"); return -ENODEV; diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 88b2e9817f..3cb455a32d 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -796,7 +796,8 @@ static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume) /* Hook the standard slave config to fixup some HW related alignment * restrictions */ -static int pata_macio_slave_config(struct scsi_device *sdev) +static int pata_macio_device_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct ata_port *ap = ata_shost_to_port(sdev->host); struct pata_macio_priv *priv = ap->private_data; @@ -805,7 +806,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev) int rc; /* First call original */ - rc = ata_scsi_slave_config(sdev); + rc = ata_scsi_device_configure(sdev, lim); if (rc) return rc; @@ -814,7 +815,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev) /* OHare has issues with non cache aligned DMA on some chipsets */ if (priv->kind == controller_ohare) { - blk_queue_update_dma_alignment(sdev->request_queue, 31); + lim->dma_alignment = 31; blk_queue_update_dma_pad(sdev->request_queue, 31); /* Tell the world about it */ @@ -829,7 +830,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev) /* Shasta and K2 seem to have "issues" with reads ... */ if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) { /* Allright these are bad, apply restrictions */ - blk_queue_update_dma_alignment(sdev->request_queue, 15); + lim->dma_alignment = 15; blk_queue_update_dma_pad(sdev->request_queue, 15); /* We enable MWI and hack cache line size directly here, this @@ -914,11 +915,14 @@ static const struct scsi_host_template pata_macio_sht = { .sg_tablesize = MAX_DCMDS, /* We may not need that strict one */ .dma_boundary = ATA_DMA_BOUNDARY, - /* Not sure what the real max is but we know it's less than 64K, let's - * use 64K minus 256 + /* + * The SCSI core requires the segment size to cover at least a page, so + * for 64K page size kernels this must be at least 64K. However the + * hardware can't handle 64K, so pata_macio_qc_prep() will split large + * requests. */ - .max_segment_size = MAX_DBDMA_SEG, - .slave_configure = pata_macio_slave_config, + .max_segment_size = SZ_64K, + .device_configure = pata_macio_device_configure, .sdev_groups = ata_common_sdev_groups, .can_queue = ATA_DEF_QUEUE, .tag_alloc_policy = BLK_TAG_ALLOC_RR, diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 9bec0aee92..05c905827d 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -673,7 +673,7 @@ static const struct scsi_host_template mv6_sht = { .sdev_groups = ata_ncq_sdev_groups, .change_queue_depth = ata_scsi_change_queue_depth, .tag_alloc_policy = BLK_TAG_ALLOC_RR, - .slave_configure = ata_scsi_slave_config + .device_configure = ata_scsi_device_configure }; static struct ata_port_operations mv5_ops = { diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 0a0cee755b..36d99043ef 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -296,7 +296,8 @@ static void nv_nf2_freeze(struct ata_port *ap); static void nv_nf2_thaw(struct ata_port *ap); static void nv_ck804_freeze(struct ata_port *ap); static void nv_ck804_thaw(struct ata_port *ap); -static int nv_adma_slave_config(struct scsi_device *sdev); +static int nv_adma_device_configure(struct scsi_device *sdev, + struct queue_limits *lim); static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc); static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc); @@ -318,7 +319,8 @@ static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf); static void nv_mcp55_thaw(struct ata_port *ap); static void nv_mcp55_freeze(struct ata_port *ap); static void nv_swncq_error_handler(struct ata_port *ap); -static int nv_swncq_slave_config(struct scsi_device *sdev); +static int nv_swncq_device_configure(struct scsi_device *sdev, + struct queue_limits *lim); static int nv_swncq_port_start(struct ata_port *ap); static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc); static void nv_swncq_fill_sg(struct ata_queued_cmd *qc); @@ -380,7 +382,7 @@ static const struct scsi_host_template nv_adma_sht = { .can_queue = NV_ADMA_MAX_CPBS, .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN, .dma_boundary = NV_ADMA_DMA_BOUNDARY, - .slave_configure = nv_adma_slave_config, + .device_configure = nv_adma_device_configure, .sdev_groups = ata_ncq_sdev_groups, .change_queue_depth = ata_scsi_change_queue_depth, .tag_alloc_policy = BLK_TAG_ALLOC_RR, @@ -391,7 +393,7 @@ static const struct scsi_host_template nv_swncq_sht = { .can_queue = ATA_MAX_QUEUE - 1, .sg_tablesize = LIBATA_MAX_PRD, .dma_boundary = ATA_DMA_BOUNDARY, - .slave_configure = nv_swncq_slave_config, + .device_configure = nv_swncq_device_configure, .sdev_groups = ata_ncq_sdev_groups, .change_queue_depth = ata_scsi_change_queue_depth, .tag_alloc_policy = BLK_TAG_ALLOC_RR, @@ -661,7 +663,8 @@ static void nv_adma_mode(struct ata_port *ap) pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE; } -static int nv_adma_slave_config(struct scsi_device *sdev) +static int nv_adma_device_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct ata_port *ap = ata_shost_to_port(sdev->host); struct nv_adma_port_priv *pp = ap->private_data; @@ -673,7 +676,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev) int adma_enable; u32 current_reg, new_reg, config_mask; - rc = ata_scsi_slave_config(sdev); + rc = ata_scsi_device_configure(sdev, lim); if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) /* Not a proper libata device, ignore */ @@ -740,8 +743,8 @@ static int nv_adma_slave_config(struct scsi_device *sdev) rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask); } - blk_queue_segment_boundary(sdev->request_queue, segment_boundary); - blk_queue_max_segments(sdev->request_queue, sg_tablesize); + lim->seg_boundary_mask = segment_boundary; + lim->max_segments = sg_tablesize; ata_port_info(ap, "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n", (unsigned long long)*ap->host->dev->dma_mask, @@ -1868,7 +1871,8 @@ static void nv_swncq_host_init(struct ata_host *host) writel(~0x0, mmio + NV_INT_STATUS_MCP55); } -static int nv_swncq_slave_config(struct scsi_device *sdev) +static int nv_swncq_device_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct ata_port *ap = ata_shost_to_port(sdev->host); struct pci_dev *pdev = to_pci_dev(ap->host->dev); @@ -1878,7 +1882,7 @@ static int nv_swncq_slave_config(struct scsi_device *sdev) u8 check_maxtor = 0; unsigned char model_num[ATA_ID_PROD_LEN + 1]; - rc = ata_scsi_slave_config(sdev); + rc = ata_scsi_device_configure(sdev, lim); if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) /* Not a proper libata device, ignore */ return rc; diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 142e70bfc4..72c03cbdaf 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -381,7 +381,7 @@ static const struct scsi_host_template sil24_sht = { .tag_alloc_policy = BLK_TAG_ALLOC_FIFO, .sdev_groups = ata_ncq_sdev_groups, .change_queue_depth = ata_scsi_change_queue_depth, - .slave_configure = ata_scsi_slave_config + .device_configure = ata_scsi_device_configure }; static struct ata_port_operations sil24_ops = { diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index 9fb1575f8d..cb00f8244e 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -94,9 +94,6 @@ static const struct atmdev_ops fore200e_ops; -static LIST_HEAD(fore200e_boards); - - MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen"); MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION); diff --git a/drivers/atm/fore200e.h b/drivers/atm/fore200e.h index caf0ea6a32..5d95fe9fd8 100644 --- a/drivers/atm/fore200e.h +++ b/drivers/atm/fore200e.h @@ -830,7 +830,6 @@ typedef struct fore200e_vc_map { /* per-device data */ typedef struct fore200e { - struct list_head entry; /* next device */ const struct fore200e_bus* bus; /* bus-dependent code and data */ union fore200e_regs regs; /* bus-dependent registers */ struct atm_dev* atm_dev; /* ATM device */ diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index 96acfb2b58..8a7034b41d 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -284,27 +284,14 @@ static int ht16k33_initialize(struct ht16k33_priv *priv) static int ht16k33_bl_update_status(struct backlight_device *bl) { - int brightness = bl->props.brightness; + const int brightness = backlight_get_brightness(bl); struct ht16k33_priv *priv = bl_get_data(bl); - if (bl->props.power != FB_BLANK_UNBLANK || - bl->props.fb_blank != FB_BLANK_UNBLANK || - bl->props.state & BL_CORE_FBBLANK) - brightness = 0; - return ht16k33_brightness_set(priv, brightness); } -static int ht16k33_bl_check_fb(struct backlight_device *bl, struct fb_info *fi) -{ - struct ht16k33_priv *priv = bl_get_data(bl); - - return (fi == NULL) || (fi->par == priv); -} - static const struct backlight_ops ht16k33_bl_ops = { .update_status = ht16k33_bl_update_status, - .check_fb = ht16k33_bl_check_fb, }; /* @@ -496,6 +483,7 @@ static int ht16k33_led_probe(struct device *dev, struct led_classdev *led, led->max_brightness = MAX_BRIGHTNESS; err = devm_led_classdev_register_ext(dev, led, &init_data); + fwnode_handle_put(init_data.fwnode); if (err) dev_err(dev, "Failed to register LED\n"); diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 024b78a0cf..c66d070207 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -22,7 +22,7 @@ #include #define CREATE_TRACE_POINTS -#include +#include static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data); static struct cpumask scale_freq_counters_mask; @@ -160,26 +160,26 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) per_cpu(cpu_scale, cpu) = capacity; } -DEFINE_PER_CPU(unsigned long, thermal_pressure); +DEFINE_PER_CPU(unsigned long, hw_pressure); /** - * topology_update_thermal_pressure() - Update thermal pressure for CPUs + * topology_update_hw_pressure() - Update HW pressure for CPUs * @cpus : The related CPUs for which capacity has been reduced * @capped_freq : The maximum allowed frequency that CPUs can run at * - * Update the value of thermal pressure for all @cpus in the mask. The + * Update the value of HW pressure for all @cpus in the mask. The * cpumask should include all (online+offline) affected CPUs, to avoid * operating on stale data when hot-plug is used for some CPUs. The * @capped_freq reflects the currently allowed max CPUs frequency due to - * thermal capping. It might be also a boost frequency value, which is bigger + * HW capping. It might be also a boost frequency value, which is bigger * than the internal 'capacity_freq_ref' max frequency. In such case the * pressure value should simply be removed, since this is an indication that - * there is no thermal throttling. The @capped_freq must be provided in kHz. + * there is no HW throttling. The @capped_freq must be provided in kHz. */ -void topology_update_thermal_pressure(const struct cpumask *cpus, +void topology_update_hw_pressure(const struct cpumask *cpus, unsigned long capped_freq) { - unsigned long max_capacity, capacity, th_pressure; + unsigned long max_capacity, capacity, pressure; u32 max_freq; int cpu; @@ -189,21 +189,21 @@ void topology_update_thermal_pressure(const struct cpumask *cpus, /* * Handle properly the boost frequencies, which should simply clean - * the thermal pressure value. + * the HW pressure value. */ if (max_freq <= capped_freq) capacity = max_capacity; else capacity = mult_frac(max_capacity, capped_freq, max_freq); - th_pressure = max_capacity - capacity; + pressure = max_capacity - capacity; - trace_thermal_pressure_update(cpu, th_pressure); + trace_hw_pressure_update(cpu, pressure); for_each_cpu(cpu, cpus) - WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); + WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure); } -EXPORT_SYMBOL_GPL(topology_update_thermal_pressure); +EXPORT_SYMBOL_GPL(topology_update_hw_pressure); static ssize_t cpu_capacity_show(struct device *dev, struct device_attribute *attr, diff --git a/drivers/base/core.c b/drivers/base/core.c index 8e3bd230b1..2b4c0624b7 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2346,8 +2346,6 @@ static void fw_devlink_link_device(struct device *dev) /* Device links support end. */ -int (*platform_notify)(struct device *dev) = NULL; -int (*platform_notify_remove)(struct device *dev) = NULL; static struct kobject *dev_kobj; /* /sys/dev/char */ @@ -2395,16 +2393,10 @@ static void device_platform_notify(struct device *dev) acpi_device_notify(dev); software_node_notify(dev); - - if (platform_notify) - platform_notify(dev); } static void device_platform_notify_remove(struct device *dev) { - if (platform_notify_remove) - platform_notify_remove(dev); - software_node_notify_remove(dev); acpi_device_notify_remove(dev); @@ -2546,6 +2538,15 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, } EXPORT_SYMBOL_GPL(device_show_bool); +ssize_t device_show_string(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + + return sysfs_emit(buf, "%s\n", (char *)ea->var); +} +EXPORT_SYMBOL_GPL(device_show_string); + /** * device_release - free device structure. * @kobj: device's kobject. @@ -2847,15 +2848,6 @@ static void devm_attr_group_remove(struct device *dev, void *res) sysfs_remove_group(&dev->kobj, group); } -static void devm_attr_groups_remove(struct device *dev, void *res) -{ - union device_attr_group_devres *devres = res; - const struct attribute_group **groups = devres->groups; - - dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); - sysfs_remove_groups(&dev->kobj, groups); -} - /** * devm_device_add_group - given a device, create a managed attribute group * @dev: The device to create the group for @@ -2888,42 +2880,6 @@ int devm_device_add_group(struct device *dev, const struct attribute_group *grp) } EXPORT_SYMBOL_GPL(devm_device_add_group); -/** - * devm_device_add_groups - create a bunch of managed attribute groups - * @dev: The device to create the group for - * @groups: The attribute groups to create, NULL terminated - * - * This function creates a bunch of managed attribute groups. If an error - * occurs when creating a group, all previously created groups will be - * removed, unwinding everything back to the original state when this - * function was called. It will explicitly warn and error if any of the - * attribute files being created already exist. - * - * Returns 0 on success or error code from sysfs_create_group on failure. - */ -int devm_device_add_groups(struct device *dev, - const struct attribute_group **groups) -{ - union device_attr_group_devres *devres; - int error; - - devres = devres_alloc(devm_attr_groups_remove, - sizeof(*devres), GFP_KERNEL); - if (!devres) - return -ENOMEM; - - error = sysfs_create_groups(&dev->kobj, groups); - if (error) { - devres_free(devres); - return error; - } - - devres->groups = groups; - devres_add(dev, devres); - return 0; -} -EXPORT_SYMBOL_GPL(devm_device_add_groups); - static int device_add_attrs(struct device *dev) { const struct class *class = dev->class; diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 56fba44ba3..c61ecb0c2a 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -306,7 +306,7 @@ static ssize_t crash_hotplug_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sysfs_emit(buf, "%d\n", crash_hotplug_cpu_support()); + return sysfs_emit(buf, "%d\n", crash_check_hotplug_support()); } static DEVICE_ATTR_ADMIN_RO(crash_hotplug); #endif diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c index 7e2d1f0d90..82aeb09b3d 100644 --- a/drivers/base/devcoredump.c +++ b/drivers/base/devcoredump.c @@ -304,6 +304,29 @@ static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset, offset); } +/** + * dev_coredump_put - remove device coredump + * @dev: the struct device for the crashed device + * + * dev_coredump_put() removes coredump, if exists, for a given device from + * the file system and free its associated data otherwise, does nothing. + * + * It is useful for modules that do not want to keep coredump + * available after its unload. + */ +void dev_coredump_put(struct device *dev) +{ + struct device *existing; + + existing = class_find_device(&devcd_class, NULL, dev, + devcd_match_failing); + if (existing) { + devcd_free(existing, NULL); + put_device(existing); + } +} +EXPORT_SYMBOL_GPL(dev_coredump_put); + /** * dev_coredumpm - create device coredump with read/free methods * @dev: the struct device for the crashed device diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 3df0025d12..8d709dbd4e 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -896,9 +896,12 @@ void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp) /* * Otherwise: allocate new, larger chunk. We need to allocate before * taking the lock as most probably the caller uses GFP_KERNEL. + * alloc_dr() will call check_dr_size() to reserve extra memory + * for struct devres automatically, so size @new_size user request + * is delivered to it directly as devm_kmalloc() does. */ new_dr = alloc_dr(devm_kmalloc_release, - total_new_size, gfp, dev_to_node(dev)); + new_size, gfp, dev_to_node(dev)); if (!new_dr) return NULL; @@ -1222,7 +1225,11 @@ EXPORT_SYMBOL_GPL(__devm_alloc_percpu); */ void devm_free_percpu(struct device *dev, void __percpu *pdata) { - WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match, + /* + * Use devres_release() to prevent memory leakage as + * devm_free_pages() does. + */ + WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match, (__force void *)pdata)); } EXPORT_SYMBOL_GPL(devm_free_percpu); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index c0436f46cf..67858eeb92 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -535,7 +535,7 @@ static DEVICE_ATTR_RW(auto_online_blocks); static ssize_t crash_hotplug_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sysfs_emit(buf, "%d\n", crash_hotplug_memory_support()); + return sysfs_emit(buf, "%d\n", crash_check_hotplug_support()); } static DEVICE_ATTR_RO(crash_hotplug); #endif diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 5679f966f6..4a67e83300 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -208,7 +208,7 @@ static ktime_t initcall_debug_start(struct device *dev, void *cb) if (!pm_print_times_enabled) return 0; - dev_info(dev, "calling %pS @ %i, parent: %s\n", cb, + dev_info(dev, "calling %ps @ %i, parent: %s\n", cb, task_pid_nr(current), dev->parent ? dev_name(dev->parent) : "none"); return ktime_get(); @@ -223,7 +223,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime, return; rettime = ktime_get(); - dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error, + dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error, (unsigned long long)ktime_us_delta(rettime, calltime)); } @@ -1927,7 +1927,7 @@ EXPORT_SYMBOL_GPL(dpm_suspend_start); void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret) { if (ret) - dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret); + dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret); } EXPORT_SYMBOL_GPL(__suspend_report_result); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index a917219fee..752b417e81 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -451,16 +451,15 @@ static struct wakeup_source *device_wakeup_detach(struct device *dev) * Detach the @dev's wakeup source object from it, unregister this wakeup source * object and destroy it. */ -int device_wakeup_disable(struct device *dev) +void device_wakeup_disable(struct device *dev) { struct wakeup_source *ws; if (!dev || !dev->power.can_wakeup) - return -EINVAL; + return; ws = device_wakeup_detach(dev); wakeup_source_unregister(ws); - return 0; } EXPORT_SYMBOL_GPL(device_wakeup_disable); @@ -502,7 +501,11 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable); */ int device_set_wakeup_enable(struct device *dev, bool enable) { - return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev); + if (enable) + return device_wakeup_enable(dev); + + device_wakeup_disable(dev); + return 0; } EXPORT_SYMBOL_GPL(device_set_wakeup_enable); diff --git a/drivers/base/property.c b/drivers/base/property.c index 7324a704a9..837d77e3af 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -868,20 +868,6 @@ struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode) } EXPORT_SYMBOL_GPL(fwnode_handle_get); -/** - * fwnode_handle_put - Drop reference to a device node - * @fwnode: Pointer to the device node to drop the reference to. - * - * This has to be used when terminating device_for_each_child_node() iteration - * with break or return to prevent stale device node references from being left - * behind. - */ -void fwnode_handle_put(struct fwnode_handle *fwnode) -{ - fwnode_call_void_op(fwnode, put); -} -EXPORT_SYMBOL_GPL(fwnode_handle_put); - /** * fwnode_device_is_available - check if a device is available for use * @fwnode: Pointer to the fwnode of the device. @@ -905,7 +891,7 @@ EXPORT_SYMBOL_GPL(fwnode_device_is_available); /** * device_get_child_node_count - return the number of child nodes for device - * @dev: Device to cound the child nodes for + * @dev: Device to count the child nodes for * * Return: the number of child nodes for a given device. */ diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index bcdb25bec7..83acccdc10 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -326,20 +326,22 @@ struct regmap_ram_data { * Create a test register map with data stored in RAM, not intended * for practical use. */ -struct regmap *__regmap_init_ram(const struct regmap_config *config, +struct regmap *__regmap_init_ram(struct device *dev, + const struct regmap_config *config, struct regmap_ram_data *data, struct lock_class_key *lock_key, const char *lock_name); -#define regmap_init_ram(config, data) \ - __regmap_lockdep_wrapper(__regmap_init_ram, #config, config, data) +#define regmap_init_ram(dev, config, data) \ + __regmap_lockdep_wrapper(__regmap_init_ram, #dev, dev, config, data) -struct regmap *__regmap_init_raw_ram(const struct regmap_config *config, +struct regmap *__regmap_init_raw_ram(struct device *dev, + const struct regmap_config *config, struct regmap_ram_data *data, struct lock_class_key *lock_key, const char *lock_name); -#define regmap_init_raw_ram(config, data) \ - __regmap_lockdep_wrapper(__regmap_init_raw_ram, #config, config, data) +#define regmap_init_raw_ram(dev, config, data) \ + __regmap_lockdep_wrapper(__regmap_init_raw_ram, #dev, dev, config, data) #endif diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c index 55999a50cc..e424334048 100644 --- a/drivers/base/regmap/regcache-maple.c +++ b/drivers/base/regmap/regcache-maple.c @@ -294,7 +294,7 @@ static int regcache_maple_exit(struct regmap *map) { struct maple_tree *mt = map->cache; MA_STATE(mas, mt, 0, UINT_MAX); - unsigned int *entry;; + unsigned int *entry; /* if we've already been called then just return */ if (!mt) diff --git a/drivers/base/regmap/regmap-i3c.c b/drivers/base/regmap/regmap-i3c.c index 0328b0b342..b5300b7c47 100644 --- a/drivers/base/regmap/regmap-i3c.c +++ b/drivers/base/regmap/regmap-i3c.c @@ -56,5 +56,5 @@ struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c, EXPORT_SYMBOL_GPL(__devm_regmap_init_i3c); MODULE_AUTHOR("Vitor Soares "); -MODULE_DESCRIPTION("Regmap I3C Module"); +MODULE_DESCRIPTION("regmap I3C Module"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c index bb2ab6129f..be32cd4e84 100644 --- a/drivers/base/regmap/regmap-kunit.c +++ b/drivers/base/regmap/regmap-kunit.c @@ -4,11 +4,26 @@ // // Copyright 2023 Arm Ltd +#include +#include #include #include "internal.h" #define BLOCK_TEST_SIZE 12 +KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *); + +struct regmap_test_priv { + struct device *dev; +}; + +struct regmap_test_param { + enum regcache_type cache; + enum regmap_endian val_endian; + + unsigned int from_reg; +}; + static void get_changed_bytes(void *orig, void *new, size_t size) { char *o = orig; @@ -27,57 +42,128 @@ static void get_changed_bytes(void *orig, void *new, size_t size) } static const struct regmap_config test_regmap_config = { - .max_register = BLOCK_TEST_SIZE, .reg_stride = 1, .val_bits = sizeof(unsigned int) * 8, }; -struct regcache_types { - enum regcache_type type; - const char *name; -}; +static const char *regcache_type_name(enum regcache_type type) +{ + switch (type) { + case REGCACHE_NONE: + return "none"; + case REGCACHE_FLAT: + return "flat"; + case REGCACHE_RBTREE: + return "rbtree"; + case REGCACHE_MAPLE: + return "maple"; + default: + return NULL; + } +} + +static const char *regmap_endian_name(enum regmap_endian endian) +{ + switch (endian) { + case REGMAP_ENDIAN_BIG: + return "big"; + case REGMAP_ENDIAN_LITTLE: + return "little"; + case REGMAP_ENDIAN_DEFAULT: + return "default"; + case REGMAP_ENDIAN_NATIVE: + return "native"; + default: + return NULL; + } +} -static void case_to_desc(const struct regcache_types *t, char *desc) +static void param_to_desc(const struct regmap_test_param *param, char *desc) { - strcpy(desc, t->name); + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s @%#x", + regcache_type_name(param->cache), + regmap_endian_name(param->val_endian), + param->from_reg); } -static const struct regcache_types regcache_types_list[] = { - { REGCACHE_NONE, "none" }, - { REGCACHE_FLAT, "flat" }, - { REGCACHE_RBTREE, "rbtree" }, - { REGCACHE_MAPLE, "maple" }, +static const struct regmap_test_param regcache_types_list[] = { + { .cache = REGCACHE_NONE }, + { .cache = REGCACHE_FLAT }, + { .cache = REGCACHE_RBTREE }, + { .cache = REGCACHE_MAPLE }, }; -KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc); +KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc); -static const struct regcache_types real_cache_types_list[] = { - { REGCACHE_FLAT, "flat" }, - { REGCACHE_RBTREE, "rbtree" }, - { REGCACHE_MAPLE, "maple" }, +static const struct regmap_test_param real_cache_types_only_list[] = { + { .cache = REGCACHE_FLAT }, + { .cache = REGCACHE_RBTREE }, + { .cache = REGCACHE_MAPLE }, }; -KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc); +KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc); + +static const struct regmap_test_param real_cache_types_list[] = { + { .cache = REGCACHE_FLAT, .from_reg = 0 }, + { .cache = REGCACHE_FLAT, .from_reg = 0x2001 }, + { .cache = REGCACHE_FLAT, .from_reg = 0x2002 }, + { .cache = REGCACHE_FLAT, .from_reg = 0x2003 }, + { .cache = REGCACHE_FLAT, .from_reg = 0x2004 }, + { .cache = REGCACHE_RBTREE, .from_reg = 0 }, + { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 }, + { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 }, + { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 }, + { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 }, + { .cache = REGCACHE_MAPLE, .from_reg = 0 }, + { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 }, + { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 }, + { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 }, + { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 }, +}; -static const struct regcache_types sparse_cache_types_list[] = { - { REGCACHE_RBTREE, "rbtree" }, - { REGCACHE_MAPLE, "maple" }, +KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc); + +static const struct regmap_test_param sparse_cache_types_list[] = { + { .cache = REGCACHE_RBTREE, .from_reg = 0 }, + { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 }, + { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 }, + { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 }, + { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 }, + { .cache = REGCACHE_MAPLE, .from_reg = 0 }, + { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 }, + { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 }, + { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 }, + { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 }, }; -KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc); +KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc); -static struct regmap *gen_regmap(struct regmap_config *config, +static struct regmap *gen_regmap(struct kunit *test, + struct regmap_config *config, struct regmap_ram_data **data) { + const struct regmap_test_param *param = test->param_value; + struct regmap_test_priv *priv = test->priv; unsigned int *buf; struct regmap *ret; - size_t size = (config->max_register + 1) * sizeof(unsigned int); + size_t size; int i; struct reg_default *defaults; + config->cache_type = param->cache; config->disable_locking = config->cache_type == REGCACHE_RBTREE || config->cache_type == REGCACHE_MAPLE; + if (config->max_register == 0) { + config->max_register = param->from_reg; + if (config->num_reg_defaults) + config->max_register += (config->num_reg_defaults - 1) * + config->reg_stride; + else + config->max_register += (BLOCK_TEST_SIZE * config->reg_stride); + } + + size = (config->max_register + 1) * sizeof(unsigned int); buf = kmalloc(size, GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM); @@ -98,37 +184,40 @@ static struct regmap *gen_regmap(struct regmap_config *config, config->reg_defaults = defaults; for (i = 0; i < config->num_reg_defaults; i++) { - defaults[i].reg = i * config->reg_stride; - defaults[i].def = buf[i * config->reg_stride]; + defaults[i].reg = param->from_reg + (i * config->reg_stride); + defaults[i].def = buf[param->from_reg + (i * config->reg_stride)]; } } - ret = regmap_init_ram(config, *data); + ret = regmap_init_ram(priv->dev, config, *data); if (IS_ERR(ret)) { kfree(buf); kfree(*data); + } else { + kunit_add_action(test, regmap_exit_action, ret); } return ret; } -static bool reg_5_false(struct device *context, unsigned int reg) +static bool reg_5_false(struct device *dev, unsigned int reg) { - return reg != 5; + struct kunit *test = dev_get_drvdata(dev); + const struct regmap_test_param *param = test->param_value; + + return reg != (param->from_reg + 5); } static void basic_read_write(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; unsigned int val, rval; config = test_regmap_config; - config.cache_type = t->type; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -141,14 +230,11 @@ static void basic_read_write(struct kunit *test) KUNIT_EXPECT_EQ(test, val, rval); /* If using a cache the cache satisfied the read */ - KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]); - - regmap_exit(map); + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]); } static void bulk_write(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -156,9 +242,8 @@ static void bulk_write(struct kunit *test) int i; config = test_regmap_config; - config.cache_type = t->type; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -178,14 +263,11 @@ static void bulk_write(struct kunit *test) /* If using a cache the cache satisfied the read */ for (i = 0; i < BLOCK_TEST_SIZE; i++) - KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); - - regmap_exit(map); + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); } static void bulk_read(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -193,9 +275,8 @@ static void bulk_read(struct kunit *test) int i; config = test_regmap_config; - config.cache_type = t->type; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -211,14 +292,140 @@ static void bulk_read(struct kunit *test) /* If using a cache the cache satisfied the read */ for (i = 0; i < BLOCK_TEST_SIZE; i++) - KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); +} + +static void read_bypassed(struct kunit *test) +{ + const struct regmap_test_param *param = test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int val[BLOCK_TEST_SIZE], rval; + int i; + + config = test_regmap_config; + + map = gen_regmap(test, &config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + KUNIT_EXPECT_FALSE(test, map->cache_bypass); + + get_random_bytes(&val, sizeof(val)); + + /* Write some test values */ + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val))); + + regcache_cache_only(map, true); + + /* + * While in cache-only regmap_read_bypassed() should return the register + * value and leave the map in cache-only. + */ + for (i = 0; i < ARRAY_SIZE(val); i++) { + /* Put inverted bits in rval to prove we really read the value */ + rval = ~val[i]; + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval)); + KUNIT_EXPECT_EQ(test, val[i], rval); + + rval = ~val[i]; + KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); + KUNIT_EXPECT_EQ(test, val[i], rval); + KUNIT_EXPECT_TRUE(test, map->cache_only); + KUNIT_EXPECT_FALSE(test, map->cache_bypass); + } - regmap_exit(map); + /* + * Change the underlying register values to prove it is returning + * real values not cached values. + */ + for (i = 0; i < ARRAY_SIZE(val); i++) { + val[i] = ~val[i]; + data->vals[param->from_reg + i] = val[i]; + } + + for (i = 0; i < ARRAY_SIZE(val); i++) { + rval = ~val[i]; + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval)); + KUNIT_EXPECT_NE(test, val[i], rval); + + rval = ~val[i]; + KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); + KUNIT_EXPECT_EQ(test, val[i], rval); + KUNIT_EXPECT_TRUE(test, map->cache_only); + KUNIT_EXPECT_FALSE(test, map->cache_bypass); + } +} + +static void read_bypassed_volatile(struct kunit *test) +{ + const struct regmap_test_param *param = test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int val[BLOCK_TEST_SIZE], rval; + int i; + + config = test_regmap_config; + /* All registers except #5 volatile */ + config.volatile_reg = reg_5_false; + + map = gen_regmap(test, &config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + KUNIT_EXPECT_FALSE(test, map->cache_bypass); + + get_random_bytes(&val, sizeof(val)); + + /* Write some test values */ + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val))); + + regcache_cache_only(map, true); + + /* + * While in cache-only regmap_read_bypassed() should return the register + * value and leave the map in cache-only. + */ + for (i = 0; i < ARRAY_SIZE(val); i++) { + /* Register #5 is non-volatile so should read from cache */ + KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY, + regmap_read(map, param->from_reg + i, &rval)); + + /* Put inverted bits in rval to prove we really read the value */ + rval = ~val[i]; + KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); + KUNIT_EXPECT_EQ(test, val[i], rval); + KUNIT_EXPECT_TRUE(test, map->cache_only); + KUNIT_EXPECT_FALSE(test, map->cache_bypass); + } + + /* + * Change the underlying register values to prove it is returning + * real values not cached values. + */ + for (i = 0; i < ARRAY_SIZE(val); i++) { + val[i] = ~val[i]; + data->vals[param->from_reg + i] = val[i]; + } + + for (i = 0; i < ARRAY_SIZE(val); i++) { + if (i == 5) + continue; + + rval = ~val[i]; + KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); + KUNIT_EXPECT_EQ(test, val[i], rval); + KUNIT_EXPECT_TRUE(test, map->cache_only); + KUNIT_EXPECT_FALSE(test, map->cache_bypass); + } } static void write_readonly(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -226,11 +433,10 @@ static void write_readonly(struct kunit *test) int i; config = test_regmap_config; - config.cache_type = t->type; config.num_reg_defaults = BLOCK_TEST_SIZE; config.writeable_reg = reg_5_false; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -247,13 +453,10 @@ static void write_readonly(struct kunit *test) /* Did that match what we see on the device? */ for (i = 0; i < BLOCK_TEST_SIZE; i++) KUNIT_EXPECT_EQ(test, i != 5, data->written[i]); - - regmap_exit(map); } static void read_writeonly(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -261,10 +464,9 @@ static void read_writeonly(struct kunit *test) int i; config = test_regmap_config; - config.cache_type = t->type; config.readable_reg = reg_5_false; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -277,7 +479,7 @@ static void read_writeonly(struct kunit *test) * fail if we aren't using the flat cache. */ for (i = 0; i < BLOCK_TEST_SIZE; i++) { - if (t->type != REGCACHE_FLAT) { + if (config.cache_type != REGCACHE_FLAT) { KUNIT_EXPECT_EQ(test, i != 5, regmap_read(map, i, &val) == 0); } else { @@ -287,13 +489,10 @@ static void read_writeonly(struct kunit *test) /* Did we trigger a hardware access? */ KUNIT_EXPECT_FALSE(test, data->read[5]); - - regmap_exit(map); } static void reg_defaults(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -301,10 +500,9 @@ static void reg_defaults(struct kunit *test) int i; config = test_regmap_config; - config.cache_type = t->type; config.num_reg_defaults = BLOCK_TEST_SIZE; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -316,12 +514,11 @@ static void reg_defaults(struct kunit *test) /* The data should have been read from cache if there was one */ for (i = 0; i < BLOCK_TEST_SIZE; i++) - KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); } static void reg_defaults_read_dev(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -329,17 +526,16 @@ static void reg_defaults_read_dev(struct kunit *test) int i; config = test_regmap_config; - config.cache_type = t->type; config.num_reg_defaults_raw = BLOCK_TEST_SIZE; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; /* We should have read the cache defaults back from the map */ for (i = 0; i < BLOCK_TEST_SIZE; i++) { - KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]); + KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]); data->read[i] = false; } @@ -350,12 +546,11 @@ static void reg_defaults_read_dev(struct kunit *test) /* The data should have been read from cache if there was one */ for (i = 0; i < BLOCK_TEST_SIZE; i++) - KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); } static void register_patch(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -365,10 +560,9 @@ static void register_patch(struct kunit *test) /* We need defaults so readback works */ config = test_regmap_config; - config.cache_type = t->type; config.num_reg_defaults = BLOCK_TEST_SIZE; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -401,13 +595,10 @@ static void register_patch(struct kunit *test) break; } } - - regmap_exit(map); } static void stride(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -415,16 +606,22 @@ static void stride(struct kunit *test) int i; config = test_regmap_config; - config.cache_type = t->type; config.reg_stride = 2; config.num_reg_defaults = BLOCK_TEST_SIZE / 2; - map = gen_regmap(&config, &data); + /* + * Allow one extra register so that the read/written arrays + * are sized big enough to include an entry for the odd + * address past the final reg_default register. + */ + config.max_register = BLOCK_TEST_SIZE; + + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; - /* Only even registers can be accessed, try both read and write */ + /* Only even addresses can be accessed, try both read and write */ for (i = 0; i < BLOCK_TEST_SIZE; i++) { data->read[i] = false; data->written[i] = false; @@ -437,15 +634,13 @@ static void stride(struct kunit *test) } else { KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); KUNIT_EXPECT_EQ(test, data->vals[i], rval); - KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval)); KUNIT_EXPECT_TRUE(test, data->written[i]); } } - - regmap_exit(map); } static struct regmap_range_cfg test_range = { @@ -481,7 +676,6 @@ static bool test_range_all_volatile(struct device *dev, unsigned int reg) static void basic_ranges(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -489,13 +683,12 @@ static void basic_ranges(struct kunit *test) int i; config = test_regmap_config; - config.cache_type = t->type; config.volatile_reg = test_range_all_volatile; config.ranges = &test_range; config.num_ranges = 1; config.max_register = test_range.range_max; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -546,14 +739,11 @@ static void basic_ranges(struct kunit *test) KUNIT_EXPECT_FALSE(test, data->read[i]); KUNIT_EXPECT_FALSE(test, data->written[i]); } - - regmap_exit(map); } /* Try to stress dynamic creation of cache data structures */ static void stress_insert(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -562,10 +752,9 @@ static void stress_insert(struct kunit *test) int i; config = test_regmap_config; - config.cache_type = t->type; config.max_register = 300; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -599,24 +788,21 @@ static void stress_insert(struct kunit *test) for (i = 0; i < config.max_register; i ++) { KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); KUNIT_EXPECT_EQ(test, rval, vals[i]); - KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); } - - regmap_exit(map); } static void cache_bypass(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; + const struct regmap_test_param *param = test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; unsigned int val, rval; config = test_regmap_config; - config.cache_type = t->type; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -624,28 +810,26 @@ static void cache_bypass(struct kunit *test) get_random_bytes(&val, sizeof(val)); /* Ensure the cache has a value in it */ - KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val)); + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val)); /* Bypass then write a different value */ regcache_cache_bypass(map, true); - KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1)); + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1)); /* Read the bypassed value */ - KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval)); KUNIT_EXPECT_EQ(test, val + 1, rval); - KUNIT_EXPECT_EQ(test, data->vals[0], rval); + KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval); /* Disable bypass, the cache should still return the original value */ regcache_cache_bypass(map, false); - KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval)); KUNIT_EXPECT_EQ(test, val, rval); - - regmap_exit(map); } -static void cache_sync(struct kunit *test) +static void cache_sync_marked_dirty(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; + const struct regmap_test_param *param = test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -653,9 +837,8 @@ static void cache_sync(struct kunit *test) int i; config = test_regmap_config; - config.cache_type = t->type; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -663,10 +846,10 @@ static void cache_sync(struct kunit *test) get_random_bytes(&val, sizeof(val)); /* Put some data into the cache */ - KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val, + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, BLOCK_TEST_SIZE)); for (i = 0; i < BLOCK_TEST_SIZE; i++) - data->written[i] = false; + data->written[param->from_reg + i] = false; /* Trash the data on the device itself then resync */ regcache_mark_dirty(map); @@ -674,16 +857,63 @@ static void cache_sync(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); /* Did we just write the correct data out? */ - KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val)); + KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val)); for (i = 0; i < BLOCK_TEST_SIZE; i++) - KUNIT_EXPECT_EQ(test, true, data->written[i]); + KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]); +} - regmap_exit(map); +static void cache_sync_after_cache_only(struct kunit *test) +{ + const struct regmap_test_param *param = test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int val[BLOCK_TEST_SIZE]; + unsigned int val_mask; + int i; + + config = test_regmap_config; + + map = gen_regmap(test, &config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + val_mask = GENMASK(config.val_bits - 1, 0); + get_random_bytes(&val, sizeof(val)); + + /* Put some data into the cache */ + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, + BLOCK_TEST_SIZE)); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + data->written[param->from_reg + i] = false; + + /* Set cache-only and change the values */ + regcache_cache_only(map, true); + for (i = 0; i < ARRAY_SIZE(val); ++i) + val[i] = ~val[i] & val_mask; + + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, + BLOCK_TEST_SIZE)); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]); + + KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val)); + + /* Exit cache-only and sync the cache without marking hardware registers dirty */ + regcache_cache_only(map, false); + + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); + + /* Did we just write the correct data out? */ + KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val)); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]); } -static void cache_sync_defaults(struct kunit *test) +static void cache_sync_defaults_marked_dirty(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; + const struct regmap_test_param *param = test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -691,10 +921,9 @@ static void cache_sync_defaults(struct kunit *test) int i; config = test_regmap_config; - config.cache_type = t->type; config.num_reg_defaults = BLOCK_TEST_SIZE; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -702,24 +931,85 @@ static void cache_sync_defaults(struct kunit *test) get_random_bytes(&val, sizeof(val)); /* Change the value of one register */ - KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val)); + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val)); /* Resync */ regcache_mark_dirty(map); for (i = 0; i < BLOCK_TEST_SIZE; i++) - data->written[i] = false; + data->written[param->from_reg + i] = false; KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); /* Did we just sync the one register we touched? */ for (i = 0; i < BLOCK_TEST_SIZE; i++) - KUNIT_EXPECT_EQ(test, i == 2, data->written[i]); + KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]); + + /* Rewrite registers back to their defaults */ + for (i = 0; i < config.num_reg_defaults; ++i) + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg, + config.reg_defaults[i].def)); + + /* + * Resync after regcache_mark_dirty() should not write out registers + * that are at default value + */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + data->written[param->from_reg + i] = false; + regcache_mark_dirty(map); + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]); +} + +static void cache_sync_default_after_cache_only(struct kunit *test) +{ + const struct regmap_test_param *param = test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int orig_val; + int i; + + config = test_regmap_config; + config.num_reg_defaults = BLOCK_TEST_SIZE; + + map = gen_regmap(test, &config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val)); + + /* Enter cache-only and change the value of one register */ + regcache_cache_only(map, true); + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1)); + + /* Exit cache-only and resync, should write out the changed register */ + regcache_cache_only(map, false); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + data->written[param->from_reg + i] = false; + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); - regmap_exit(map); + /* Was the register written out? */ + KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]); + KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1); + + /* Enter cache-only and write register back to its default value */ + regcache_cache_only(map, true); + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val)); + + /* Resync should write out the new value */ + regcache_cache_only(map, false); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + data->written[param->from_reg + i] = false; + + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); + KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]); + KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val); } static void cache_sync_readonly(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; + const struct regmap_test_param *param = test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -727,40 +1017,37 @@ static void cache_sync_readonly(struct kunit *test) int i; config = test_regmap_config; - config.cache_type = t->type; config.writeable_reg = reg_5_false; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; /* Read all registers to fill the cache */ for (i = 0; i < BLOCK_TEST_SIZE; i++) - KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val)); + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val)); /* Change the value of all registers, readonly should fail */ get_random_bytes(&val, sizeof(val)); regcache_cache_only(map, true); for (i = 0; i < BLOCK_TEST_SIZE; i++) - KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0); + KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0); regcache_cache_only(map, false); /* Resync */ for (i = 0; i < BLOCK_TEST_SIZE; i++) - data->written[i] = false; + data->written[param->from_reg + i] = false; KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); /* Did that match what we see on the device? */ for (i = 0; i < BLOCK_TEST_SIZE; i++) - KUNIT_EXPECT_EQ(test, i != 5, data->written[i]); - - regmap_exit(map); + KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]); } static void cache_sync_patch(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; + const struct regmap_test_param *param = test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -770,23 +1057,22 @@ static void cache_sync_patch(struct kunit *test) /* We need defaults so readback works */ config = test_regmap_config; - config.cache_type = t->type; config.num_reg_defaults = BLOCK_TEST_SIZE; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; /* Stash the original values */ - KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, BLOCK_TEST_SIZE)); /* Patch a couple of values */ - patch[0].reg = 2; + patch[0].reg = param->from_reg + 2; patch[0].def = rval[2] + 1; patch[0].delay_us = 0; - patch[1].reg = 5; + patch[1].reg = param->from_reg + 5; patch[1].def = rval[5] + 1; patch[1].delay_us = 0; KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch, @@ -795,33 +1081,31 @@ static void cache_sync_patch(struct kunit *test) /* Sync the cache */ regcache_mark_dirty(map); for (i = 0; i < BLOCK_TEST_SIZE; i++) - data->written[i] = false; + data->written[param->from_reg + i] = false; KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); /* The patch should be on the device but not in the cache */ for (i = 0; i < BLOCK_TEST_SIZE; i++) { - KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val)); + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val)); KUNIT_EXPECT_EQ(test, val, rval[i]); switch (i) { case 2: case 5: - KUNIT_EXPECT_EQ(test, true, data->written[i]); - KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1); + KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]); + KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1); break; default: - KUNIT_EXPECT_EQ(test, false, data->written[i]); - KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]); + KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]); + KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]); break; } } - - regmap_exit(map); } static void cache_drop(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; + const struct regmap_test_param *param = test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -829,41 +1113,267 @@ static void cache_drop(struct kunit *test) int i; config = test_regmap_config; - config.cache_type = t->type; config.num_reg_defaults = BLOCK_TEST_SIZE; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; /* Ensure the data is read from the cache */ for (i = 0; i < BLOCK_TEST_SIZE; i++) - data->read[i] = false; - KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, + data->read[param->from_reg + i] = false; + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, BLOCK_TEST_SIZE)); for (i = 0; i < BLOCK_TEST_SIZE; i++) { - KUNIT_EXPECT_FALSE(test, data->read[i]); - data->read[i] = false; + KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]); + data->read[param->from_reg + i] = false; } - KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); + KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); /* Drop some registers */ - KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5)); + KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3, + param->from_reg + 5)); /* Reread and check only the dropped registers hit the device. */ - KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, BLOCK_TEST_SIZE)); for (i = 0; i < BLOCK_TEST_SIZE; i++) - KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5); - KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); + KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5); + KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); +} + +static void cache_drop_with_non_contiguous_ranges(struct kunit *test) +{ + const struct regmap_test_param *param = test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int val[4][BLOCK_TEST_SIZE]; + unsigned int reg; + const int num_ranges = ARRAY_SIZE(val) * 2; + int rangeidx, i; + + static_assert(ARRAY_SIZE(val) == 4); + + config = test_regmap_config; + config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE); + + map = gen_regmap(test, &config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + for (i = 0; i < config.max_register + 1; i++) + data->written[i] = false; + + /* Create non-contiguous cache blocks by writing every other range */ + get_random_bytes(&val, sizeof(val)); + for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) { + reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg, + &val[rangeidx / 2], + BLOCK_TEST_SIZE)); + KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], + &val[rangeidx / 2], sizeof(val[rangeidx / 2])); + } + + /* Check that odd ranges weren't written */ + for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) { + reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_FALSE(test, data->written[reg + i]); + } + + /* Drop range 2 */ + reg = param->from_reg + (2 * BLOCK_TEST_SIZE); + KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1)); + + /* Drop part of range 4 */ + reg = param->from_reg + (4 * BLOCK_TEST_SIZE); + KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5)); + + /* Mark dirty and reset mock registers to 0 */ + regcache_mark_dirty(map); + for (i = 0; i < config.max_register + 1; i++) { + data->vals[i] = 0; + data->written[i] = false; + } + + /* The registers that were dropped from range 4 should now remain at 0 */ + val[4 / 2][3] = 0; + val[4 / 2][4] = 0; + val[4 / 2][5] = 0; + + /* Sync and check that the expected register ranges were written */ + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); + + /* Check that odd ranges weren't written */ + for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) { + reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_FALSE(test, data->written[reg + i]); + } + + /* Check that even ranges (except 2 and 4) were written */ + for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) { + if ((rangeidx == 2) || (rangeidx == 4)) + continue; + + reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_TRUE(test, data->written[reg + i]); + + KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], + &val[rangeidx / 2], sizeof(val[rangeidx / 2])); + } + + /* Check that range 2 wasn't written */ + reg = param->from_reg + (2 * BLOCK_TEST_SIZE); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_FALSE(test, data->written[reg + i]); + + /* Check that range 4 was partially written */ + reg = param->from_reg + (4 * BLOCK_TEST_SIZE); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5); - regmap_exit(map); + KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2])); + + /* Nothing before param->from_reg should have been written */ + for (i = 0; i < param->from_reg; i++) + KUNIT_EXPECT_FALSE(test, data->written[i]); +} + +static void cache_drop_all_and_sync_marked_dirty(struct kunit *test) +{ + const struct regmap_test_param *param = test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int rval[BLOCK_TEST_SIZE]; + int i; + + config = test_regmap_config; + config.num_reg_defaults = BLOCK_TEST_SIZE; + + map = gen_regmap(test, &config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + /* Ensure the data is read from the cache */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + data->read[param->from_reg + i] = false; + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, + BLOCK_TEST_SIZE)); + KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); + + /* Change all values in cache from defaults */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1)); + + /* Drop all registers */ + KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register)); + + /* Mark dirty and cache sync should not write anything. */ + regcache_mark_dirty(map); + for (i = 0; i < BLOCK_TEST_SIZE; i++) + data->written[param->from_reg + i] = false; + + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); + for (i = 0; i <= config.max_register; i++) + KUNIT_EXPECT_FALSE(test, data->written[i]); +} + +static void cache_drop_all_and_sync_no_defaults(struct kunit *test) +{ + const struct regmap_test_param *param = test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int rval[BLOCK_TEST_SIZE]; + int i; + + config = test_regmap_config; + + map = gen_regmap(test, &config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + /* Ensure the data is read from the cache */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + data->read[param->from_reg + i] = false; + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, + BLOCK_TEST_SIZE)); + KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); + + /* Change all values in cache */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1)); + + /* Drop all registers */ + KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register)); + + /* + * Sync cache without marking it dirty. All registers were dropped + * so the cache should not have any entries to write out. + */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + data->written[param->from_reg + i] = false; + + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); + for (i = 0; i <= config.max_register; i++) + KUNIT_EXPECT_FALSE(test, data->written[i]); +} + +static void cache_drop_all_and_sync_has_defaults(struct kunit *test) +{ + const struct regmap_test_param *param = test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int rval[BLOCK_TEST_SIZE]; + int i; + + config = test_regmap_config; + config.num_reg_defaults = BLOCK_TEST_SIZE; + + map = gen_regmap(test, &config, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + /* Ensure the data is read from the cache */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + data->read[param->from_reg + i] = false; + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, + BLOCK_TEST_SIZE)); + KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); + + /* Change all values in cache from defaults */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1)); + + /* Drop all registers */ + KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register)); + + /* + * Sync cache without marking it dirty. All registers were dropped + * so the cache should not have any entries to write out. + */ + for (i = 0; i < BLOCK_TEST_SIZE; i++) + data->written[param->from_reg + i] = false; + + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); + for (i = 0; i <= config.max_register; i++) + KUNIT_EXPECT_FALSE(test, data->written[i]); } static void cache_present(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; + const struct regmap_test_param *param = test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -871,39 +1381,35 @@ static void cache_present(struct kunit *test) int i; config = test_regmap_config; - config.cache_type = t->type; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; for (i = 0; i < BLOCK_TEST_SIZE; i++) - data->read[i] = false; + data->read[param->from_reg + i] = false; /* No defaults so no registers cached. */ for (i = 0; i < BLOCK_TEST_SIZE; i++) - KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, i)); + KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i)); /* We didn't trigger any reads */ for (i = 0; i < BLOCK_TEST_SIZE; i++) - KUNIT_ASSERT_FALSE(test, data->read[i]); + KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]); /* Fill the cache */ for (i = 0; i < BLOCK_TEST_SIZE; i++) - KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val)); + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val)); /* Now everything should be cached */ for (i = 0; i < BLOCK_TEST_SIZE; i++) - KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, i)); - - regmap_exit(map); + KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i)); } /* Check that caching the window register works with sync */ static void cache_range_window_reg(struct kunit *test) { - struct regcache_types *t = (struct regcache_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -911,13 +1417,12 @@ static void cache_range_window_reg(struct kunit *test) int i; config = test_regmap_config; - config.cache_type = t->type; config.volatile_reg = test_range_window_volatile; config.ranges = &test_range; config.num_ranges = 1; config.max_register = test_range.range_max; - map = gen_regmap(&config, &data); + map = gen_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -953,41 +1458,29 @@ static void cache_range_window_reg(struct kunit *test) KUNIT_ASSERT_EQ(test, val, 2); } -struct raw_test_types { - const char *name; - - enum regcache_type cache_type; - enum regmap_endian val_endian; -}; - -static void raw_to_desc(const struct raw_test_types *t, char *desc) -{ - strcpy(desc, t->name); -} - -static const struct raw_test_types raw_types_list[] = { - { "none-little", REGCACHE_NONE, REGMAP_ENDIAN_LITTLE }, - { "none-big", REGCACHE_NONE, REGMAP_ENDIAN_BIG }, - { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE }, - { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG }, - { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE }, - { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG }, - { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE }, - { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG }, +static const struct regmap_test_param raw_types_list[] = { + { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE }, + { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG }, + { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE }, + { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG }, + { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE }, + { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG }, + { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE }, + { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG }, }; -KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc); +KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc); -static const struct raw_test_types raw_cache_types_list[] = { - { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE }, - { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG }, - { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE }, - { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG }, - { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE }, - { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG }, +static const struct regmap_test_param raw_cache_types_list[] = { + { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE }, + { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG }, + { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE }, + { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG }, + { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE }, + { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG }, }; -KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc); +KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc); static const struct regmap_config raw_regmap_config = { .max_register = BLOCK_TEST_SIZE, @@ -997,18 +1490,20 @@ static const struct regmap_config raw_regmap_config = { .val_bits = 16, }; -static struct regmap *gen_raw_regmap(struct regmap_config *config, - struct raw_test_types *test_type, +static struct regmap *gen_raw_regmap(struct kunit *test, + struct regmap_config *config, struct regmap_ram_data **data) { + struct regmap_test_priv *priv = test->priv; + const struct regmap_test_param *param = test->param_value; u16 *buf; struct regmap *ret; size_t size = (config->max_register + 1) * config->reg_bits / 8; int i; struct reg_default *defaults; - config->cache_type = test_type->cache_type; - config->val_format_endian = test_type->val_endian; + config->cache_type = param->cache; + config->val_format_endian = param->val_endian; config->disable_locking = config->cache_type == REGCACHE_RBTREE || config->cache_type == REGCACHE_MAPLE; @@ -1033,7 +1528,7 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config, for (i = 0; i < config->num_reg_defaults; i++) { defaults[i].reg = i; - switch (test_type->val_endian) { + switch (param->val_endian) { case REGMAP_ENDIAN_LITTLE: defaults[i].def = le16_to_cpu(buf[i]); break; @@ -1052,10 +1547,12 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config, if (config->cache_type == REGCACHE_NONE) config->num_reg_defaults = 0; - ret = regmap_init_raw_ram(config, *data); + ret = regmap_init_raw_ram(priv->dev, config, *data); if (IS_ERR(ret)) { kfree(buf); kfree(*data); + } else { + kunit_add_action(test, regmap_exit_action, ret); } return ret; @@ -1063,7 +1560,6 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config, static void raw_read_defaults_single(struct kunit *test) { - struct raw_test_types *t = (struct raw_test_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -1072,7 +1568,7 @@ static void raw_read_defaults_single(struct kunit *test) config = raw_regmap_config; - map = gen_raw_regmap(&config, t, &data); + map = gen_raw_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -1082,13 +1578,10 @@ static void raw_read_defaults_single(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); } - - regmap_exit(map); } static void raw_read_defaults(struct kunit *test) { - struct raw_test_types *t = (struct raw_test_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -1099,35 +1592,31 @@ static void raw_read_defaults(struct kunit *test) config = raw_regmap_config; - map = gen_raw_regmap(&config, t, &data); + map = gen_raw_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; val_len = sizeof(*rval) * (config.max_register + 1); - rval = kmalloc(val_len, GFP_KERNEL); + rval = kunit_kmalloc(test, val_len, GFP_KERNEL); KUNIT_ASSERT_TRUE(test, rval != NULL); if (!rval) return; - + /* Check that we can read the defaults via the API */ KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len)); for (i = 0; i < config.max_register + 1; i++) { def = config.reg_defaults[i].def; if (config.val_format_endian == REGMAP_ENDIAN_BIG) { - KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i])); + KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i])); } else { - KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i])); + KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i])); } } - - kfree(rval); - regmap_exit(map); } static void raw_write_read_single(struct kunit *test) { - struct raw_test_types *t = (struct raw_test_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -1136,7 +1625,7 @@ static void raw_write_read_single(struct kunit *test) config = raw_regmap_config; - map = gen_raw_regmap(&config, t, &data); + map = gen_raw_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -1147,13 +1636,10 @@ static void raw_write_read_single(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val)); KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); KUNIT_EXPECT_EQ(test, val, rval); - - regmap_exit(map); } static void raw_write(struct kunit *test) { - struct raw_test_types *t = (struct raw_test_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -1164,7 +1650,7 @@ static void raw_write(struct kunit *test) config = raw_regmap_config; - map = gen_raw_regmap(&config, t, &data); + map = gen_raw_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -1185,10 +1671,10 @@ static void raw_write(struct kunit *test) case 3: if (config.val_format_endian == REGMAP_ENDIAN_BIG) { KUNIT_EXPECT_EQ(test, rval, - be16_to_cpu(val[i % 2])); + be16_to_cpu((__force __be16)val[i % 2])); } else { KUNIT_EXPECT_EQ(test, rval, - le16_to_cpu(val[i % 2])); + le16_to_cpu((__force __le16)val[i % 2])); } break; default: @@ -1199,8 +1685,6 @@ static void raw_write(struct kunit *test) /* The values should appear in the "hardware" */ KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val)); - - regmap_exit(map); } static bool reg_zero(struct device *dev, unsigned int reg) @@ -1215,7 +1699,6 @@ static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg) static void raw_noinc_write(struct kunit *test) { - struct raw_test_types *t = (struct raw_test_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -1228,7 +1711,7 @@ static void raw_noinc_write(struct kunit *test) config.writeable_noinc_reg = reg_zero; config.readable_noinc_reg = reg_zero; - map = gen_raw_regmap(&config, t, &data); + map = gen_raw_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -1259,13 +1742,10 @@ static void raw_noinc_write(struct kunit *test) /* Make sure we didn't touch the register after the noinc register */ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val)); KUNIT_ASSERT_EQ(test, val_test, val); - - regmap_exit(map); } static void raw_sync(struct kunit *test) { - struct raw_test_types *t = (struct raw_test_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -1276,7 +1756,7 @@ static void raw_sync(struct kunit *test) config = raw_regmap_config; - map = gen_raw_regmap(&config, t, &data); + map = gen_raw_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -1300,10 +1780,10 @@ static void raw_sync(struct kunit *test) case 3: if (config.val_format_endian == REGMAP_ENDIAN_BIG) { KUNIT_EXPECT_EQ(test, rval, - be16_to_cpu(val[i - 2])); + be16_to_cpu((__force __be16)val[i - 2])); } else { KUNIT_EXPECT_EQ(test, rval, - le16_to_cpu(val[i - 2])); + le16_to_cpu((__force __le16)val[i - 2])); } break; case 4: @@ -1323,7 +1803,7 @@ static void raw_sync(struct kunit *test) val[2] = cpu_to_be16(val[2]); else val[2] = cpu_to_le16(val[2]); - + /* The values should not appear in the "hardware" */ KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val)); @@ -1337,13 +1817,10 @@ static void raw_sync(struct kunit *test) /* The values should now appear in the "hardware" */ KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val)); - - regmap_exit(map); } static void raw_ranges(struct kunit *test) { - struct raw_test_types *t = (struct raw_test_types *)test->param_value; struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; @@ -1356,7 +1833,7 @@ static void raw_ranges(struct kunit *test) config.num_ranges = 1; config.max_register = test_range.range_max; - map = gen_raw_regmap(&config, t, &data); + map = gen_raw_regmap(test, &config, &data); KUNIT_ASSERT_FALSE(test, IS_ERR(map)); if (IS_ERR(map)) return; @@ -1402,12 +1879,12 @@ static void raw_ranges(struct kunit *test) KUNIT_EXPECT_FALSE(test, data->read[i]); KUNIT_EXPECT_FALSE(test, data->written[i]); } - - regmap_exit(map); } static struct kunit_case regmap_test_cases[] = { KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params), + KUNIT_CASE_PARAM(read_bypassed, real_cache_types_gen_params), + KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params), KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params), KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params), KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params), @@ -1419,13 +1896,19 @@ static struct kunit_case regmap_test_cases[] = { KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params), KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params), KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params), - KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params), - KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params), + KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params), + KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params), + KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params), + KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params), KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params), KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params), KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params), + KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params), + KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params), + KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params), + KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params), KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params), - KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_gen_params), + KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params), KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params), KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params), @@ -1437,8 +1920,40 @@ static struct kunit_case regmap_test_cases[] = { {} }; +static int regmap_test_init(struct kunit *test) +{ + struct regmap_test_priv *priv; + struct device *dev; + + priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + test->priv = priv; + + dev = kunit_device_register(test, "regmap_test"); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + priv->dev = get_device(dev); + dev_set_drvdata(dev, test); + + return 0; +} + +static void regmap_test_exit(struct kunit *test) +{ + struct regmap_test_priv *priv = test->priv; + + /* Destroy the dummy struct device */ + if (priv && priv->dev) + put_device(priv->dev); +} + static struct kunit_suite regmap_test_suite = { .name = "regmap", + .init = regmap_test_init, + .exit = regmap_test_exit, .test_cases = regmap_test_cases, }; kunit_test_suite(regmap_test_suite); diff --git a/drivers/base/regmap/regmap-mdio.c b/drivers/base/regmap/regmap-mdio.c index 6aa6a24094..9573bf3b52 100644 --- a/drivers/base/regmap/regmap-mdio.c +++ b/drivers/base/regmap/regmap-mdio.c @@ -117,5 +117,5 @@ struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev, EXPORT_SYMBOL_GPL(__devm_regmap_init_mdio); MODULE_AUTHOR("Sander Vanheule "); -MODULE_DESCRIPTION("Regmap MDIO Module"); +MODULE_DESCRIPTION("regmap MDIO Module"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-ram.c b/drivers/base/regmap/regmap-ram.c index 192d6b131d..5b4cbf982a 100644 --- a/drivers/base/regmap/regmap-ram.c +++ b/drivers/base/regmap/regmap-ram.c @@ -53,7 +53,8 @@ static const struct regmap_bus regmap_ram = { .free_context = regmap_ram_free_context, }; -struct regmap *__regmap_init_ram(const struct regmap_config *config, +struct regmap *__regmap_init_ram(struct device *dev, + const struct regmap_config *config, struct regmap_ram_data *data, struct lock_class_key *lock_key, const char *lock_name) @@ -75,7 +76,7 @@ struct regmap *__regmap_init_ram(const struct regmap_config *config, if (!data->written) return ERR_PTR(-ENOMEM); - map = __regmap_init(NULL, ®map_ram, data, config, + map = __regmap_init(dev, ®map_ram, data, config, lock_key, lock_name); return map; diff --git a/drivers/base/regmap/regmap-raw-ram.c b/drivers/base/regmap/regmap-raw-ram.c index 93ae07b503..69eabfb89e 100644 --- a/drivers/base/regmap/regmap-raw-ram.c +++ b/drivers/base/regmap/regmap-raw-ram.c @@ -107,7 +107,8 @@ static const struct regmap_bus regmap_raw_ram = { .free_context = regmap_raw_ram_free_context, }; -struct regmap *__regmap_init_raw_ram(const struct regmap_config *config, +struct regmap *__regmap_init_raw_ram(struct device *dev, + const struct regmap_config *config, struct regmap_ram_data *data, struct lock_class_key *lock_key, const char *lock_name) @@ -134,7 +135,7 @@ struct regmap *__regmap_init_raw_ram(const struct regmap_config *config, data->reg_endian = config->reg_format_endian; - map = __regmap_init(NULL, ®map_raw_ram, data, config, + map = __regmap_init(dev, ®map_raw_ram, data, config, lock_key, lock_name); return map; diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c index 388c3a087b..c99eada837 100644 --- a/drivers/base/regmap/regmap-sdw-mbq.c +++ b/drivers/base/regmap/regmap-sdw-mbq.c @@ -97,5 +97,5 @@ struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw, } EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq); -MODULE_DESCRIPTION("Regmap SoundWire MBQ Module"); +MODULE_DESCRIPTION("regmap SoundWire MBQ Module"); MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c index 159c0b740b..ea631ac7c7 100644 --- a/drivers/base/regmap/regmap-sdw.c +++ b/drivers/base/regmap/regmap-sdw.c @@ -98,5 +98,5 @@ struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw, } EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw); -MODULE_DESCRIPTION("Regmap SoundWire Module"); +MODULE_DESCRIPTION("regmap SoundWire Module"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c index 37ab23a9d0..094cf2a2ca 100644 --- a/drivers/base/regmap/regmap-spi.c +++ b/drivers/base/regmap/regmap-spi.c @@ -165,4 +165,5 @@ struct regmap *__devm_regmap_init_spi(struct spi_device *spi, } EXPORT_SYMBOL_GPL(__devm_regmap_init_spi); +MODULE_DESCRIPTION("regmap SPI Module"); MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/trace.h b/drivers/base/regmap/trace.h index 704e106e5d..bcc5a8b226 100644 --- a/drivers/base/regmap/trace.h +++ b/drivers/base/regmap/trace.h @@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(regmap_reg, ), TP_fast_assign( - __assign_str(name, regmap_name(map)); + __assign_str(name); __entry->reg = reg; __entry->val = val; ), @@ -74,7 +74,7 @@ DECLARE_EVENT_CLASS(regmap_bulk, ), TP_fast_assign( - __assign_str(name, regmap_name(map)); + __assign_str(name); __entry->reg = reg; __entry->val_len = val_len; memcpy(__get_dynamic_array(buf), val, val_len); @@ -113,7 +113,7 @@ DECLARE_EVENT_CLASS(regmap_block, ), TP_fast_assign( - __assign_str(name, regmap_name(map)); + __assign_str(name); __entry->reg = reg; __entry->count = count; ), @@ -163,9 +163,9 @@ TRACE_EVENT(regcache_sync, ), TP_fast_assign( - __assign_str(name, regmap_name(map)); - __assign_str(status, status); - __assign_str(type, type); + __assign_str(name); + __assign_str(status); + __assign_str(type); ), TP_printk("%s type=%s status=%s", __get_str(name), @@ -184,7 +184,7 @@ DECLARE_EVENT_CLASS(regmap_bool, ), TP_fast_assign( - __assign_str(name, regmap_name(map)); + __assign_str(name); __entry->flag = flag; ), @@ -216,7 +216,7 @@ DECLARE_EVENT_CLASS(regmap_async, ), TP_fast_assign( - __assign_str(name, regmap_name(map)); + __assign_str(name); ), TP_printk("%s", __get_str(name)) @@ -264,7 +264,7 @@ TRACE_EVENT(regcache_drop_region, ), TP_fast_assign( - __assign_str(name, regmap_name(map)); + __assign_str(name); __entry->from = from; __entry->to = to; ), diff --git a/drivers/base/trace.h b/drivers/base/trace.h index 3192e18f87..e52b6eae06 100644 --- a/drivers/base/trace.h +++ b/drivers/base/trace.h @@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(devres, __field(size_t, size) ), TP_fast_assign( - __assign_str(devname, dev_name(dev)); + __assign_str(devname); __entry->op = op; __entry->node = node; __entry->name = name; diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c index 90d5bdc12e..8ae0b918e7 100644 --- a/drivers/bcma/host_soc.c +++ b/drivers/bcma/host_soc.c @@ -240,15 +240,13 @@ err_unmap_mmio: return err; } -static int bcma_host_soc_remove(struct platform_device *pdev) +static void bcma_host_soc_remove(struct platform_device *pdev) { struct bcma_bus *bus = platform_get_drvdata(pdev); bcma_bus_unregister(bus); iounmap(bus->mmio); platform_set_drvdata(pdev, NULL); - - return 0; } static const struct of_device_id bcma_host_soc_of_match[] = { @@ -263,7 +261,7 @@ static struct platform_driver bcma_host_soc_driver = { .of_match_table = bcma_host_soc_of_match, }, .probe = bcma_host_soc_probe, - .remove = bcma_host_soc_remove, + .remove_new = bcma_host_soc_remove, }; int __init bcma_host_soc_register_driver(void) diff --git a/drivers/block/brd.c b/drivers/block/brd.c index e322cef659..558d8e6705 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -29,10 +29,7 @@ /* * Each block ramdisk device has a xarray brd_pages of pages that stores - * the pages containing the block device's contents. A brd page's ->index is - * its offset in PAGE_SIZE units. This is similar to, but in no way connected - * with, the kernel's pagecache or buffer cache (which sit above our block - * device). + * the pages containing the block device's contents. */ struct brd_device { int brd_number; @@ -51,15 +48,7 @@ struct brd_device { */ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) { - pgoff_t idx; - struct page *page; - - idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */ - page = xa_load(&brd->brd_pages, idx); - - BUG_ON(page && page->index != idx); - - return page; + return xa_load(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT); } /* @@ -67,8 +56,8 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) */ static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp) { - pgoff_t idx; - struct page *page, *cur; + pgoff_t idx = sector >> PAGE_SECTORS_SHIFT; + struct page *page; int ret = 0; page = brd_lookup_page(brd, sector); @@ -80,23 +69,16 @@ static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp) return -ENOMEM; xa_lock(&brd->brd_pages); - - idx = sector >> PAGE_SECTORS_SHIFT; - page->index = idx; - - cur = __xa_cmpxchg(&brd->brd_pages, idx, NULL, page, gfp); - - if (unlikely(cur)) { - __free_page(page); - ret = xa_err(cur); - if (!ret && (cur->index != idx)) - ret = -EIO; - } else { + ret = __xa_insert(&brd->brd_pages, idx, page, gfp); + if (!ret) brd->brd_nr_pages++; - } - xa_unlock(&brd->brd_pages); + if (ret < 0) { + __free_page(page); + if (ret == -EBUSY) + ret = 0; + } return ret; } @@ -240,6 +222,23 @@ out: return err; } +static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size) +{ + sector_t aligned_sector = (sector + PAGE_SECTORS) & ~PAGE_SECTORS; + struct page *page; + + size -= (aligned_sector - sector) * SECTOR_SIZE; + xa_lock(&brd->brd_pages); + while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) { + page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT); + if (page) + __free_page(page); + aligned_sector += PAGE_SECTORS; + size -= PAGE_SIZE; + } + xa_unlock(&brd->brd_pages); +} + static void brd_submit_bio(struct bio *bio) { struct brd_device *brd = bio->bi_bdev->bd_disk->private_data; @@ -247,6 +246,12 @@ static void brd_submit_bio(struct bio *bio) struct bio_vec bvec; struct bvec_iter iter; + if (unlikely(op_is_discard(bio->bi_opf))) { + brd_do_discard(brd, sector, bio->bi_iter.bi_size); + bio_endio(bio); + return; + } + bio_for_each_segment(bvec, bio, iter) { unsigned int len = bvec.bv_len; int err; @@ -327,6 +332,9 @@ static int brd_alloc(int i) * is harmless) */ .physical_block_size = PAGE_SIZE, + .max_hw_discard_sectors = UINT_MAX, + .max_discard_segments = 1, + .discard_granularity = PAGE_SIZE, }; list_for_each_entry(brd, &brd_devices, brd_list) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 95a468eaa7..1153721bc7 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -468,9 +468,9 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); if (rw == ITER_SOURCE) - ret = call_write_iter(file, &cmd->iocb, &iter); + ret = file->f_op->write_iter(&cmd->iocb, &iter); else - ret = call_read_iter(file, &cmd->iocb, &iter); + ret = file->f_op->read_iter(&cmd->iocb, &iter); lo_rw_aio_do_completion(cmd); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 1ddd3e5497..b87aa80a46 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -222,7 +222,7 @@ static ssize_t pid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); - struct nbd_device *nbd = (struct nbd_device *)disk->private_data; + struct nbd_device *nbd = disk->private_data; return sprintf(buf, "%d\n", nbd->pid); } @@ -236,7 +236,7 @@ static ssize_t backend_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); - struct nbd_device *nbd = (struct nbd_device *)disk->private_data; + struct nbd_device *nbd = disk->private_data; return sprintf(buf, "%s\n", nbd->backend ?: ""); } @@ -589,10 +589,11 @@ static inline int was_interrupted(int result) } /* - * Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns - * -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed. + * Returns BLK_STS_RESOURCE if the caller should retry after a delay. + * Returns BLK_STS_IOERR if sending failed. */ -static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) +static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, + int index) { struct request *req = blk_mq_rq_from_pdu(cmd); struct nbd_config *config = nbd->config; @@ -601,7 +602,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; struct iov_iter from; - unsigned long size = blk_rq_bytes(req); struct bio *bio; u64 handle; u32 type; @@ -615,13 +615,13 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) type = req_to_nbd_cmd_type(req); if (type == U32_MAX) - return -EIO; + return BLK_STS_IOERR; if (rq_data_dir(req) == WRITE && (config->flags & NBD_FLAG_READ_ONLY)) { dev_err_ratelimited(disk_to_dev(nbd->disk), "Write on read-only\n"); - return -EIO; + return BLK_STS_IOERR; } if (req->cmd_flags & REQ_FUA) @@ -650,7 +650,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) request.type = htonl(type | nbd_cmd_flags); if (type != NBD_CMD_FLUSH) { request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); - request.len = htonl(size); + request.len = htonl(blk_rq_bytes(req)); } handle = nbd_cmd_handle(cmd); request.cookie = cpu_to_be64(handle); @@ -675,11 +675,11 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) nsock->sent = sent; } set_bit(NBD_CMD_REQUEUED, &cmd->flags); - return (__force int)BLK_STS_RESOURCE; + return BLK_STS_RESOURCE; } dev_err_ratelimited(disk_to_dev(nbd->disk), "Send control failed (result %d)\n", result); - return -EAGAIN; + goto requeue; } send_pages: if (type != NBD_CMD_WRITE) @@ -716,12 +716,12 @@ send_pages: nsock->pending = req; nsock->sent = sent; set_bit(NBD_CMD_REQUEUED, &cmd->flags); - return (__force int)BLK_STS_RESOURCE; + return BLK_STS_RESOURCE; } dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", result); - return -EAGAIN; + goto requeue; } /* * The completion might already have come in, @@ -738,7 +738,16 @@ out: trace_nbd_payload_sent(req, handle); nsock->pending = NULL; nsock->sent = 0; - return 0; + __set_bit(NBD_CMD_INFLIGHT, &cmd->flags); + return BLK_STS_OK; + +requeue: + /* retry on a different socket */ + dev_err_ratelimited(disk_to_dev(nbd->disk), + "Request send failed, requeueing\n"); + nbd_mark_nsock_dead(nbd, nsock, 1); + nbd_requeue_cmd(cmd); + return BLK_STS_OK; } static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock, @@ -1019,7 +1028,7 @@ static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index) struct nbd_device *nbd = cmd->nbd; struct nbd_config *config; struct nbd_sock *nsock; - int ret; + blk_status_t ret; lockdep_assert_held(&cmd->lock); @@ -1073,28 +1082,11 @@ again: ret = BLK_STS_OK; goto out; } - /* - * Some failures are related to the link going down, so anything that - * returns EAGAIN can be retried on a different socket. - */ ret = nbd_send_cmd(nbd, cmd, index); - /* - * Access to this flag is protected by cmd->lock, thus it's safe to set - * the flag after nbd_send_cmd() succeed to send request to server. - */ - if (!ret) - __set_bit(NBD_CMD_INFLIGHT, &cmd->flags); - else if (ret == -EAGAIN) { - dev_err_ratelimited(disk_to_dev(nbd->disk), - "Request send failed, requeueing\n"); - nbd_mark_nsock_dead(nbd, nsock, 1); - nbd_requeue_cmd(cmd); - ret = BLK_STS_OK; - } out: mutex_unlock(&nsock->tx_lock); nbd_config_put(nbd); - return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret; + return ret; } static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 26e2c22a87..f940580193 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -225,6 +225,10 @@ static unsigned long g_cache_size; module_param_named(cache_size, g_cache_size, ulong, 0444); MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)"); +static bool g_fua = true; +module_param_named(fua, g_fua, bool, 0444); +MODULE_PARM_DESC(fua, "Enable/disable FUA support when cache_size is used. Default: true"); + static unsigned int g_mbps; module_param_named(mbps, g_mbps, uint, 0444); MODULE_PARM_DESC(mbps, "Limit maximum bandwidth (in MiB/s). Default: 0 (no limit)"); @@ -253,6 +257,11 @@ static unsigned int g_zone_max_active; module_param_named(zone_max_active, g_zone_max_active, uint, 0444); MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)"); +static int g_zone_append_max_sectors = INT_MAX; +module_param_named(zone_append_max_sectors, g_zone_append_max_sectors, int, 0444); +MODULE_PARM_DESC(zone_append_max_sectors, + "Maximum size of a zone append command (in 512B sectors). Specify 0 for zone append emulation"); + static struct nullb_device *null_alloc_dev(void); static void null_free_dev(struct nullb_device *dev); static void null_del_dev(struct nullb *nullb); @@ -448,10 +457,12 @@ NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL); NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL); NULLB_DEVICE_ATTR(zone_max_open, uint, NULL); NULLB_DEVICE_ATTR(zone_max_active, uint, NULL); +NULLB_DEVICE_ATTR(zone_append_max_sectors, uint, NULL); NULLB_DEVICE_ATTR(virt_boundary, bool, NULL); NULLB_DEVICE_ATTR(no_sched, bool, NULL); NULLB_DEVICE_ATTR(shared_tags, bool, NULL); NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL); +NULLB_DEVICE_ATTR(fua, bool, NULL); static ssize_t nullb_device_power_show(struct config_item *item, char *page) { @@ -596,12 +607,14 @@ static struct configfs_attribute *nullb_device_attrs[] = { &nullb_device_attr_zone_nr_conv, &nullb_device_attr_zone_max_open, &nullb_device_attr_zone_max_active, + &nullb_device_attr_zone_append_max_sectors, &nullb_device_attr_zone_readonly, &nullb_device_attr_zone_offline, &nullb_device_attr_virt_boundary, &nullb_device_attr_no_sched, &nullb_device_attr_shared_tags, &nullb_device_attr_shared_tag_bitmap, + &nullb_device_attr_fua, NULL, }; @@ -680,14 +693,14 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item) static ssize_t memb_group_features_show(struct config_item *item, char *page) { return snprintf(page, PAGE_SIZE, - "badblocks,blocking,blocksize,cache_size," + "badblocks,blocking,blocksize,cache_size,fua," "completion_nsec,discard,home_node,hw_queue_depth," "irqmode,max_sectors,mbps,memory_backed,no_sched," "poll_queues,power,queue_mode,shared_tag_bitmap," "shared_tags,size,submit_queues,use_per_node_hctx," "virt_boundary,zoned,zone_capacity,zone_max_active," "zone_max_open,zone_nr_conv,zone_offline,zone_readonly," - "zone_size\n"); + "zone_size,zone_append_max_sectors\n"); } CONFIGFS_ATTR_RO(memb_group_, features); @@ -767,10 +780,13 @@ static struct nullb_device *null_alloc_dev(void) dev->zone_nr_conv = g_zone_nr_conv; dev->zone_max_open = g_zone_max_open; dev->zone_max_active = g_zone_max_active; + dev->zone_append_max_sectors = g_zone_append_max_sectors; dev->virt_boundary = g_virt_boundary; dev->no_sched = g_no_sched; dev->shared_tags = g_shared_tags; dev->shared_tag_bitmap = g_shared_tag_bitmap; + dev->fua = g_fua; + return dev; } @@ -1167,7 +1183,7 @@ blk_status_t null_handle_discard(struct nullb_device *dev, return BLK_STS_OK; } -static int null_handle_flush(struct nullb *nullb) +static blk_status_t null_handle_flush(struct nullb *nullb) { int err; @@ -1184,7 +1200,7 @@ static int null_handle_flush(struct nullb *nullb) WARN_ON(!radix_tree_empty(&nullb->dev->cache)); spin_unlock_irq(&nullb->lock); - return err; + return errno_to_blk_status(err); } static int null_transfer(struct nullb *nullb, struct page *page, @@ -1218,11 +1234,11 @@ static int null_transfer(struct nullb *nullb, struct page *page, return err; } -static int null_handle_rq(struct nullb_cmd *cmd) +static blk_status_t null_handle_rq(struct nullb_cmd *cmd) { struct request *rq = blk_mq_rq_from_pdu(cmd); struct nullb *nullb = cmd->nq->dev->nullb; - int err; + int err = 0; unsigned int len; sector_t sector = blk_rq_pos(rq); struct req_iterator iter; @@ -1234,15 +1250,13 @@ static int null_handle_rq(struct nullb_cmd *cmd) err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, op_is_write(req_op(rq)), sector, rq->cmd_flags & REQ_FUA); - if (err) { - spin_unlock_irq(&nullb->lock); - return err; - } + if (err) + break; sector += len >> SECTOR_SHIFT; } spin_unlock_irq(&nullb->lock); - return 0; + return errno_to_blk_status(err); } static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd) @@ -1289,8 +1303,8 @@ static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, if (op == REQ_OP_DISCARD) return null_handle_discard(dev, sector, nr_sectors); - return errno_to_blk_status(null_handle_rq(cmd)); + return null_handle_rq(cmd); } static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd) @@ -1359,7 +1373,7 @@ static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, blk_status_t sts; if (op == REQ_OP_FLUSH) { - cmd->error = errno_to_blk_status(null_handle_flush(nullb)); + cmd->error = null_handle_flush(nullb); goto out; } @@ -1928,7 +1942,7 @@ static int null_add_dev(struct nullb_device *dev) if (dev->cache_size > 0) { set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); - blk_queue_write_cache(nullb->q, true, true); + blk_queue_write_cache(nullb->q, true, dev->fua); } nullb->q->queuedata = nullb; diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h index 477b977468..3234e6c85e 100644 --- a/drivers/block/null_blk/null_blk.h +++ b/drivers/block/null_blk/null_blk.h @@ -82,6 +82,7 @@ struct nullb_device { unsigned int zone_nr_conv; /* number of conventional zones */ unsigned int zone_max_open; /* max number of open zones */ unsigned int zone_max_active; /* max number of active zones */ + unsigned int zone_append_max_sectors; /* Max sectors per zone append command */ unsigned int submit_queues; /* number of submission queues */ unsigned int prev_submit_queues; /* number of submission queues before change */ unsigned int poll_queues; /* number of IOPOLL submission queues */ @@ -104,6 +105,7 @@ struct nullb_device { bool no_sched; /* no IO scheduler for the device */ bool shared_tags; /* share tag set between devices for blk-mq */ bool shared_tag_bitmap; /* use hostwide shared tags */ + bool fua; /* Support FUA */ }; struct nullb { diff --git a/drivers/block/null_blk/trace.h b/drivers/block/null_blk/trace.h index ef2d05d5f0..82b8f6a5e5 100644 --- a/drivers/block/null_blk/trace.h +++ b/drivers/block/null_blk/trace.h @@ -36,7 +36,12 @@ TRACE_EVENT(nullb_zone_op, TP_ARGS(cmd, zone_no, zone_cond), TP_STRUCT__entry( __array(char, disk, DISK_NAME_LEN) - __field(enum req_op, op) + /* + * __field() uses is_signed_type(). is_signed_type() does not + * support bitwise types. Use __field_struct() instead because + * it does not use is_signed_type(). + */ + __field_struct(enum req_op, op) __field(unsigned int, zone_no) __field(unsigned int, zone_cond) ), diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c index 74d0418dda..f118d304f3 100644 --- a/drivers/block/null_blk/zoned.c +++ b/drivers/block/null_blk/zoned.c @@ -9,6 +9,8 @@ #undef pr_fmt #define pr_fmt(fmt) "null_blk: " fmt +#define NULL_ZONE_INVALID_WP ((sector_t)-1) + static inline sector_t mb_to_sects(unsigned long mb) { return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT; @@ -19,18 +21,6 @@ static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect) return sect >> ilog2(dev->zone_size_sects); } -static inline void null_lock_zone_res(struct nullb_device *dev) -{ - if (dev->need_zone_res_mgmt) - spin_lock_irq(&dev->zone_res_lock); -} - -static inline void null_unlock_zone_res(struct nullb_device *dev) -{ - if (dev->need_zone_res_mgmt) - spin_unlock_irq(&dev->zone_res_lock); -} - static inline void null_init_zone_lock(struct nullb_device *dev, struct nullb_zone *zone) { @@ -114,6 +104,11 @@ int null_init_zoned_dev(struct nullb_device *dev, dev->zone_nr_conv); } + dev->zone_append_max_sectors = + min(ALIGN_DOWN(dev->zone_append_max_sectors, + dev->blocksize >> SECTOR_SHIFT), + zone_capacity_sects); + /* Max active zones has to be < nbr of seq zones in order to be enforceable */ if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) { dev->zone_max_active = 0; @@ -165,7 +160,7 @@ int null_init_zoned_dev(struct nullb_device *dev, lim->zoned = true; lim->chunk_sectors = dev->zone_size_sects; - lim->max_zone_append_sectors = dev->zone_size_sects; + lim->max_zone_append_sectors = dev->zone_append_max_sectors; lim->max_open_zones = dev->zone_max_open; lim->max_active_zones = dev->zone_max_active; return 0; @@ -174,11 +169,16 @@ int null_init_zoned_dev(struct nullb_device *dev, int null_register_zoned_dev(struct nullb *nullb) { struct request_queue *q = nullb->q; + struct gendisk *disk = nullb->disk; blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); - blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE); - nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0); - return blk_revalidate_disk_zones(nullb->disk, NULL); + disk->nr_zones = bdev_nr_zones(disk->part0); + + pr_info("%s: using %s zone append\n", + disk->disk_name, + queue_emulates_zone_append(q) ? "emulated" : "native"); + + return blk_revalidate_disk_zones(disk); } void null_free_zoned_dev(struct nullb_device *dev) @@ -252,35 +252,6 @@ size_t null_zone_valid_read_len(struct nullb *nullb, return (zone->wp - sector) << SECTOR_SHIFT; } -static blk_status_t __null_close_zone(struct nullb_device *dev, - struct nullb_zone *zone) -{ - switch (zone->cond) { - case BLK_ZONE_COND_CLOSED: - /* close operation on closed is not an error */ - return BLK_STS_OK; - case BLK_ZONE_COND_IMP_OPEN: - dev->nr_zones_imp_open--; - break; - case BLK_ZONE_COND_EXP_OPEN: - dev->nr_zones_exp_open--; - break; - case BLK_ZONE_COND_EMPTY: - case BLK_ZONE_COND_FULL: - default: - return BLK_STS_IOERR; - } - - if (zone->wp == zone->start) { - zone->cond = BLK_ZONE_COND_EMPTY; - } else { - zone->cond = BLK_ZONE_COND_CLOSED; - dev->nr_zones_closed++; - } - - return BLK_STS_OK; -} - static void null_close_imp_open_zone(struct nullb_device *dev) { struct nullb_zone *zone; @@ -297,7 +268,13 @@ static void null_close_imp_open_zone(struct nullb_device *dev) zno = dev->zone_nr_conv; if (zone->cond == BLK_ZONE_COND_IMP_OPEN) { - __null_close_zone(dev, zone); + dev->nr_zones_imp_open--; + if (zone->wp == zone->start) { + zone->cond = BLK_ZONE_COND_EMPTY; + } else { + zone->cond = BLK_ZONE_COND_CLOSED; + dev->nr_zones_closed++; + } dev->imp_close_zone_no = zno; return; } @@ -385,73 +362,73 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, null_lock_zone(dev, zone); - if (zone->cond == BLK_ZONE_COND_FULL || - zone->cond == BLK_ZONE_COND_READONLY || - zone->cond == BLK_ZONE_COND_OFFLINE) { - /* Cannot write to the zone */ - ret = BLK_STS_IOERR; - goto unlock; - } - /* - * Regular writes must be at the write pointer position. - * Zone append writes are automatically issued at the write - * pointer and the position returned using the request or BIO - * sector. + * Regular writes must be at the write pointer position. Zone append + * writes are automatically issued at the write pointer and the position + * returned using the request sector. Note that we do not check the zone + * condition because for FULL, READONLY and OFFLINE zones, the sector + * check against the zone write pointer will always result in failing + * the command. */ if (append) { + if (WARN_ON_ONCE(!dev->zone_append_max_sectors) || + zone->wp == NULL_ZONE_INVALID_WP) { + ret = BLK_STS_IOERR; + goto unlock_zone; + } sector = zone->wp; blk_mq_rq_from_pdu(cmd)->__sector = sector; - } else if (sector != zone->wp) { - ret = BLK_STS_IOERR; - goto unlock; } - if (zone->wp + nr_sectors > zone->start + zone->capacity) { + if (sector != zone->wp || + zone->wp + nr_sectors > zone->start + zone->capacity) { ret = BLK_STS_IOERR; - goto unlock; + goto unlock_zone; } if (zone->cond == BLK_ZONE_COND_CLOSED || zone->cond == BLK_ZONE_COND_EMPTY) { - null_lock_zone_res(dev); + if (dev->need_zone_res_mgmt) { + spin_lock(&dev->zone_res_lock); - ret = null_check_zone_resources(dev, zone); - if (ret != BLK_STS_OK) { - null_unlock_zone_res(dev); - goto unlock; - } - if (zone->cond == BLK_ZONE_COND_CLOSED) { - dev->nr_zones_closed--; - dev->nr_zones_imp_open++; - } else if (zone->cond == BLK_ZONE_COND_EMPTY) { - dev->nr_zones_imp_open++; - } + ret = null_check_zone_resources(dev, zone); + if (ret != BLK_STS_OK) { + spin_unlock(&dev->zone_res_lock); + goto unlock_zone; + } + if (zone->cond == BLK_ZONE_COND_CLOSED) { + dev->nr_zones_closed--; + dev->nr_zones_imp_open++; + } else if (zone->cond == BLK_ZONE_COND_EMPTY) { + dev->nr_zones_imp_open++; + } - if (zone->cond != BLK_ZONE_COND_EXP_OPEN) - zone->cond = BLK_ZONE_COND_IMP_OPEN; + spin_unlock(&dev->zone_res_lock); + } - null_unlock_zone_res(dev); + zone->cond = BLK_ZONE_COND_IMP_OPEN; } ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); if (ret != BLK_STS_OK) - goto unlock; + goto unlock_zone; zone->wp += nr_sectors; if (zone->wp == zone->start + zone->capacity) { - null_lock_zone_res(dev); - if (zone->cond == BLK_ZONE_COND_EXP_OPEN) - dev->nr_zones_exp_open--; - else if (zone->cond == BLK_ZONE_COND_IMP_OPEN) - dev->nr_zones_imp_open--; + if (dev->need_zone_res_mgmt) { + spin_lock(&dev->zone_res_lock); + if (zone->cond == BLK_ZONE_COND_EXP_OPEN) + dev->nr_zones_exp_open--; + else if (zone->cond == BLK_ZONE_COND_IMP_OPEN) + dev->nr_zones_imp_open--; + spin_unlock(&dev->zone_res_lock); + } zone->cond = BLK_ZONE_COND_FULL; - null_unlock_zone_res(dev); } ret = BLK_STS_OK; -unlock: +unlock_zone: null_unlock_zone(dev, zone); return ret; @@ -465,54 +442,100 @@ static blk_status_t null_open_zone(struct nullb_device *dev, if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) return BLK_STS_IOERR; - null_lock_zone_res(dev); - switch (zone->cond) { case BLK_ZONE_COND_EXP_OPEN: - /* open operation on exp open is not an error */ - goto unlock; + /* Open operation on exp open is not an error */ + return BLK_STS_OK; case BLK_ZONE_COND_EMPTY: - ret = null_check_zone_resources(dev, zone); - if (ret != BLK_STS_OK) - goto unlock; - break; case BLK_ZONE_COND_IMP_OPEN: - dev->nr_zones_imp_open--; - break; case BLK_ZONE_COND_CLOSED: - ret = null_check_zone_resources(dev, zone); - if (ret != BLK_STS_OK) - goto unlock; - dev->nr_zones_closed--; break; case BLK_ZONE_COND_FULL: default: - ret = BLK_STS_IOERR; - goto unlock; + return BLK_STS_IOERR; } - zone->cond = BLK_ZONE_COND_EXP_OPEN; - dev->nr_zones_exp_open++; + if (dev->need_zone_res_mgmt) { + spin_lock(&dev->zone_res_lock); -unlock: - null_unlock_zone_res(dev); + switch (zone->cond) { + case BLK_ZONE_COND_EMPTY: + ret = null_check_zone_resources(dev, zone); + if (ret != BLK_STS_OK) { + spin_unlock(&dev->zone_res_lock); + return ret; + } + break; + case BLK_ZONE_COND_IMP_OPEN: + dev->nr_zones_imp_open--; + break; + case BLK_ZONE_COND_CLOSED: + ret = null_check_zone_resources(dev, zone); + if (ret != BLK_STS_OK) { + spin_unlock(&dev->zone_res_lock); + return ret; + } + dev->nr_zones_closed--; + break; + default: + break; + } - return ret; + dev->nr_zones_exp_open++; + + spin_unlock(&dev->zone_res_lock); + } + + zone->cond = BLK_ZONE_COND_EXP_OPEN; + + return BLK_STS_OK; } static blk_status_t null_close_zone(struct nullb_device *dev, struct nullb_zone *zone) { - blk_status_t ret; - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) return BLK_STS_IOERR; - null_lock_zone_res(dev); - ret = __null_close_zone(dev, zone); - null_unlock_zone_res(dev); + switch (zone->cond) { + case BLK_ZONE_COND_CLOSED: + /* close operation on closed is not an error */ + return BLK_STS_OK; + case BLK_ZONE_COND_IMP_OPEN: + case BLK_ZONE_COND_EXP_OPEN: + break; + case BLK_ZONE_COND_EMPTY: + case BLK_ZONE_COND_FULL: + default: + return BLK_STS_IOERR; + } + + if (dev->need_zone_res_mgmt) { + spin_lock(&dev->zone_res_lock); - return ret; + switch (zone->cond) { + case BLK_ZONE_COND_IMP_OPEN: + dev->nr_zones_imp_open--; + break; + case BLK_ZONE_COND_EXP_OPEN: + dev->nr_zones_exp_open--; + break; + default: + break; + } + + if (zone->wp > zone->start) + dev->nr_zones_closed++; + + spin_unlock(&dev->zone_res_lock); + } + + if (zone->wp == zone->start) + zone->cond = BLK_ZONE_COND_EMPTY; + else + zone->cond = BLK_ZONE_COND_CLOSED; + + return BLK_STS_OK; } static blk_status_t null_finish_zone(struct nullb_device *dev, @@ -523,41 +546,47 @@ static blk_status_t null_finish_zone(struct nullb_device *dev, if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) return BLK_STS_IOERR; - null_lock_zone_res(dev); + if (dev->need_zone_res_mgmt) { + spin_lock(&dev->zone_res_lock); - switch (zone->cond) { - case BLK_ZONE_COND_FULL: - /* finish operation on full is not an error */ - goto unlock; - case BLK_ZONE_COND_EMPTY: - ret = null_check_zone_resources(dev, zone); - if (ret != BLK_STS_OK) - goto unlock; - break; - case BLK_ZONE_COND_IMP_OPEN: - dev->nr_zones_imp_open--; - break; - case BLK_ZONE_COND_EXP_OPEN: - dev->nr_zones_exp_open--; - break; - case BLK_ZONE_COND_CLOSED: - ret = null_check_zone_resources(dev, zone); - if (ret != BLK_STS_OK) - goto unlock; - dev->nr_zones_closed--; - break; - default: - ret = BLK_STS_IOERR; - goto unlock; + switch (zone->cond) { + case BLK_ZONE_COND_FULL: + /* Finish operation on full is not an error */ + spin_unlock(&dev->zone_res_lock); + return BLK_STS_OK; + case BLK_ZONE_COND_EMPTY: + ret = null_check_zone_resources(dev, zone); + if (ret != BLK_STS_OK) { + spin_unlock(&dev->zone_res_lock); + return ret; + } + break; + case BLK_ZONE_COND_IMP_OPEN: + dev->nr_zones_imp_open--; + break; + case BLK_ZONE_COND_EXP_OPEN: + dev->nr_zones_exp_open--; + break; + case BLK_ZONE_COND_CLOSED: + ret = null_check_zone_resources(dev, zone); + if (ret != BLK_STS_OK) { + spin_unlock(&dev->zone_res_lock); + return ret; + } + dev->nr_zones_closed--; + break; + default: + spin_unlock(&dev->zone_res_lock); + return BLK_STS_IOERR; + } + + spin_unlock(&dev->zone_res_lock); } zone->cond = BLK_ZONE_COND_FULL; zone->wp = zone->start + zone->len; -unlock: - null_unlock_zone_res(dev); - - return ret; + return BLK_STS_OK; } static blk_status_t null_reset_zone(struct nullb_device *dev, @@ -566,34 +595,33 @@ static blk_status_t null_reset_zone(struct nullb_device *dev, if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) return BLK_STS_IOERR; - null_lock_zone_res(dev); + if (dev->need_zone_res_mgmt) { + spin_lock(&dev->zone_res_lock); - switch (zone->cond) { - case BLK_ZONE_COND_EMPTY: - /* reset operation on empty is not an error */ - null_unlock_zone_res(dev); - return BLK_STS_OK; - case BLK_ZONE_COND_IMP_OPEN: - dev->nr_zones_imp_open--; - break; - case BLK_ZONE_COND_EXP_OPEN: - dev->nr_zones_exp_open--; - break; - case BLK_ZONE_COND_CLOSED: - dev->nr_zones_closed--; - break; - case BLK_ZONE_COND_FULL: - break; - default: - null_unlock_zone_res(dev); - return BLK_STS_IOERR; + switch (zone->cond) { + case BLK_ZONE_COND_IMP_OPEN: + dev->nr_zones_imp_open--; + break; + case BLK_ZONE_COND_EXP_OPEN: + dev->nr_zones_exp_open--; + break; + case BLK_ZONE_COND_CLOSED: + dev->nr_zones_closed--; + break; + case BLK_ZONE_COND_EMPTY: + case BLK_ZONE_COND_FULL: + break; + default: + spin_unlock(&dev->zone_res_lock); + return BLK_STS_IOERR; + } + + spin_unlock(&dev->zone_res_lock); } zone->cond = BLK_ZONE_COND_EMPTY; zone->wp = zone->start; - null_unlock_zone_res(dev); - if (dev->memory_backed) return null_handle_discard(dev, zone->start, zone->len); @@ -722,7 +750,7 @@ static void null_set_zone_cond(struct nullb_device *dev, zone->cond != BLK_ZONE_COND_OFFLINE) null_finish_zone(dev, zone); zone->cond = cond; - zone->wp = (sector_t)-1; + zone->wp = NULL_ZONE_INVALID_WP; } null_unlock_zone(dev, zone); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 21728e9ea5..8a2ce80700 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2215,6 +2215,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write) } dev_info(ddev, "%lukB available on disc\n", lba << 1); } + set_blocksize(bdev_file, CD_FRAMESIZE); return 0; @@ -2278,11 +2279,6 @@ static int pkt_open(struct gendisk *disk, blk_mode_t mode) ret = pkt_open_dev(pd, mode & BLK_OPEN_WRITE); if (ret) goto out_dec; - /* - * needed here as well, since ext2 (among others) may change - * the blocksize at mount time - */ - set_blocksize(disk->part0, CD_FRAMESIZE); } mutex_unlock(&ctl_mutex); mutex_unlock(&pktcdvd_mutex); @@ -2526,7 +2522,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) __module_get(THIS_MODULE); pd->bdev_file = bdev_file; - set_blocksize(file_bdev(bdev_file), CD_FRAMESIZE); atomic_set(&pd->cdrw.pending_bios, 0); pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 26ff5cd2bf..da22ce38c0 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -362,7 +362,7 @@ enum rbd_watch_state { enum rbd_lock_state { RBD_LOCK_STATE_UNLOCKED, RBD_LOCK_STATE_LOCKED, - RBD_LOCK_STATE_RELEASING, + RBD_LOCK_STATE_QUIESCING, }; /* WatchNotify::ClientId */ @@ -422,7 +422,7 @@ struct rbd_device { struct list_head running_list; struct completion acquire_wait; int acquire_err; - struct completion releasing_wait; + struct completion quiescing_wait; spinlock_t object_map_lock; u8 *object_map; @@ -525,7 +525,7 @@ static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev) lockdep_assert_held(&rbd_dev->lock_rwsem); return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED || - rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING; + rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING; } static bool rbd_is_lock_owner(struct rbd_device *rbd_dev) @@ -3457,13 +3457,14 @@ static void rbd_lock_del_request(struct rbd_img_request *img_req) lockdep_assert_held(&rbd_dev->lock_rwsem); spin_lock(&rbd_dev->lock_lists_lock); if (!list_empty(&img_req->lock_item)) { + rbd_assert(!list_empty(&rbd_dev->running_list)); list_del_init(&img_req->lock_item); - need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING && + need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING && list_empty(&rbd_dev->running_list)); } spin_unlock(&rbd_dev->lock_lists_lock); if (need_wakeup) - complete(&rbd_dev->releasing_wait); + complete(&rbd_dev->quiescing_wait); } static int rbd_img_exclusive_lock(struct rbd_img_request *img_req) @@ -3476,11 +3477,6 @@ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req) if (rbd_lock_add_request(img_req)) return 1; - if (rbd_dev->opts->exclusive) { - WARN_ON(1); /* lock got released? */ - return -EROFS; - } - /* * Note the use of mod_delayed_work() in rbd_acquire_lock() * and cancel_delayed_work() in wake_lock_waiters(). @@ -4181,16 +4177,16 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev) /* * Ensure that all in-flight IO is flushed. */ - rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING; - rbd_assert(!completion_done(&rbd_dev->releasing_wait)); + rbd_dev->lock_state = RBD_LOCK_STATE_QUIESCING; + rbd_assert(!completion_done(&rbd_dev->quiescing_wait)); if (list_empty(&rbd_dev->running_list)) return true; up_write(&rbd_dev->lock_rwsem); - wait_for_completion(&rbd_dev->releasing_wait); + wait_for_completion(&rbd_dev->quiescing_wait); down_write(&rbd_dev->lock_rwsem); - if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING) + if (rbd_dev->lock_state != RBD_LOCK_STATE_QUIESCING) return false; rbd_assert(list_empty(&rbd_dev->running_list)); @@ -4601,6 +4597,10 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev) rbd_warn(rbd_dev, "failed to update lock cookie: %d", ret); + if (rbd_dev->opts->exclusive) + rbd_warn(rbd_dev, + "temporarily releasing lock on exclusive mapping"); + /* * Lock cookie cannot be updated on older OSDs, so do * a manual release and queue an acquire. @@ -5383,7 +5383,7 @@ static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec) INIT_LIST_HEAD(&rbd_dev->acquiring_list); INIT_LIST_HEAD(&rbd_dev->running_list); init_completion(&rbd_dev->acquire_wait); - init_completion(&rbd_dev->releasing_wait); + init_completion(&rbd_dev->quiescing_wait); spin_lock_init(&rbd_dev->object_map_lock); @@ -6589,11 +6589,6 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) if (ret) return ret; - /* - * The lock may have been released by now, unless automatic lock - * transitions are disabled. - */ - rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev)); return 0; } diff --git a/drivers/block/rnbd/rnbd-srv-trace.h b/drivers/block/rnbd/rnbd-srv-trace.h index 8dedf73bdd..89d0bcb171 100644 --- a/drivers/block/rnbd/rnbd-srv-trace.h +++ b/drivers/block/rnbd/rnbd-srv-trace.h @@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(rnbd_srv_link_class, TP_fast_assign( __entry->qdepth = srv->queue_depth; - __assign_str(sessname, srv->sessname); + __assign_str(sessname); ), TP_printk("sessname: %s qdepth: %d", @@ -85,7 +85,7 @@ TRACE_EVENT(process_rdma, ), TP_fast_assign( - __assign_str(sessname, srv->sessname); + __assign_str(sessname); __entry->dir = id->dir; __entry->ver = srv->ver; __entry->device_id = le32_to_cpu(msg->device_id); @@ -130,7 +130,7 @@ TRACE_EVENT(process_msg_sess_info, __entry->proto_ver = srv->ver; __entry->clt_ver = msg->ver; __entry->srv_ver = RNBD_PROTO_VER_MAJOR; - __assign_str(sessname, srv->sessname); + __assign_str(sessname); ), TP_printk("Session %s using proto-ver %d (clt-ver: %d, srv-ver: %d)", @@ -165,8 +165,8 @@ TRACE_EVENT(process_msg_open, TP_fast_assign( __entry->access_mode = msg->access_mode; - __assign_str(sessname, srv->sessname); - __assign_str(dev_name, msg->dev_name); + __assign_str(sessname); + __assign_str(dev_name); ), TP_printk("Open message received: session='%s' path='%s' access_mode=%s", @@ -189,7 +189,7 @@ TRACE_EVENT(process_msg_close, TP_fast_assign( __entry->device_id = le32_to_cpu(msg->device_id); - __assign_str(sessname, srv->sessname); + __assign_str(sessname); ), TP_printk("Close message received: session='%s' device id='%d'", diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 374e4efa87..3b58839321 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -48,6 +48,9 @@ #define UBLK_MINORS (1U << MINORBITS) +/* private ioctl command mirror */ +#define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC) + /* All UBLK_F_* have to be included into UBLK_F_ALL */ #define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \ | UBLK_F_URING_CMD_COMP_IN_TASK \ @@ -221,7 +224,7 @@ static int ublk_get_nr_zones(const struct ublk_device *ub) static int ublk_revalidate_disk_zones(struct ublk_device *ub) { - return blk_revalidate_disk_zones(ub->ub_disk, NULL); + return blk_revalidate_disk_zones(ub->ub_disk); } static int ublk_dev_param_zoned_validate(const struct ublk_device *ub) @@ -249,8 +252,7 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub) static void ublk_dev_param_zoned_apply(struct ublk_device *ub) { blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue); - blk_queue_required_elevator_features(ub->ub_disk->queue, - ELEVATOR_F_ZBD_SEQ_WRITE); + ub->ub_disk->nr_zones = ublk_get_nr_zones(ub); } @@ -2179,6 +2181,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) .virt_boundary_mask = p->virt_boundary_mask, .max_segments = USHRT_MAX, .max_segment_size = UINT_MAX, + .dma_alignment = 3, }; struct gendisk *disk; int ret = -EINVAL; @@ -2904,7 +2907,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, case UBLK_CMD_DEL_DEV: ret = ublk_ctrl_del_dev(&ub, true); break; - case UBLK_U_CMD_DEL_DEV_ASYNC: + case UBLK_CMD_DEL_DEV_ASYNC: ret = ublk_ctrl_del_dev(&ub, false); break; case UBLK_CMD_GET_QUEUE_AFFINITY: diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 42dea7601d..2351f411fa 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -1543,7 +1543,7 @@ static int virtblk_probe(struct virtio_device *vdev) */ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && lim.zoned) { blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue); - err = blk_revalidate_disk_zones(vblk->disk, NULL); + err = blk_revalidate_disk_zones(vblk->disk); if (err) goto out_cleanup_disk; } @@ -1658,7 +1658,6 @@ static struct virtio_driver virtio_blk = { .feature_table_legacy = features_legacy, .feature_table_size_legacy = ARRAY_SIZE(features_legacy), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtblk_probe, .remove = virtblk_remove, diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index fd7c0ff213..67aa63dabc 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1063,8 +1063,7 @@ static char *encode_disk_name(char *ptr, unsigned int n) } static int xlvbd_alloc_gendisk(blkif_sector_t capacity, - struct blkfront_info *info, u16 sector_size, - unsigned int physical_sector_size) + struct blkfront_info *info) { struct queue_limits lim = {}; struct gendisk *gd; @@ -1159,8 +1158,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, info->rq = gd->queue; info->gd = gd; - info->sector_size = sector_size; - info->physical_sector_size = physical_sector_size; xlvbd_flush(info); @@ -2315,8 +2312,6 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) static void blkfront_connect(struct blkfront_info *info) { unsigned long long sectors; - unsigned long sector_size; - unsigned int physical_sector_size; int err, i; struct blkfront_ring_info *rinfo; @@ -2355,7 +2350,7 @@ static void blkfront_connect(struct blkfront_info *info) err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%llu", §ors, "info", "%u", &info->vdisk_info, - "sector-size", "%lu", §or_size, + "sector-size", "%lu", &info->sector_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, @@ -2369,9 +2364,9 @@ static void blkfront_connect(struct blkfront_info *info) * provide this. Assume physical sector size to be the same as * sector_size in that case. */ - physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend, + info->physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend, "physical-sector-size", - sector_size); + info->sector_size); blkfront_gather_backend_features(info); for_each_rinfo(info, rinfo, i) { err = blkfront_setup_indirect(rinfo); @@ -2383,8 +2378,7 @@ static void blkfront_connect(struct blkfront_info *info) } } - err = xlvbd_alloc_gendisk(sectors, info, sector_size, - physical_sector_size); + err = xlvbd_alloc_gendisk(sectors, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index f0639df6cd..3acd7006ad 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -426,11 +426,10 @@ static void reset_bdev(struct zram *zram) if (!zram->backing_dev) return; - fput(zram->bdev_file); /* hope filp_close flush all of IO */ filp_close(zram->backing_dev, NULL); zram->backing_dev = NULL; - zram->bdev_file = NULL; + zram->bdev = NULL; zram->disk->fops = &zram_devops; kvfree(zram->bitmap); zram->bitmap = NULL; @@ -473,10 +472,8 @@ static ssize_t backing_dev_store(struct device *dev, size_t sz; struct file *backing_dev = NULL; struct inode *inode; - struct address_space *mapping; unsigned int bitmap_sz; unsigned long nr_pages, *bitmap = NULL; - struct file *bdev_file = NULL; int err; struct zram *zram = dev_to_zram(dev); @@ -497,15 +494,14 @@ static ssize_t backing_dev_store(struct device *dev, if (sz > 0 && file_name[sz - 1] == '\n') file_name[sz - 1] = 0x00; - backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0); + backing_dev = filp_open(file_name, O_RDWR | O_LARGEFILE | O_EXCL, 0); if (IS_ERR(backing_dev)) { err = PTR_ERR(backing_dev); backing_dev = NULL; goto out; } - mapping = backing_dev->f_mapping; - inode = mapping->host; + inode = backing_dev->f_mapping->host; /* Support only block device in this moment */ if (!S_ISBLK(inode->i_mode)) { @@ -513,14 +509,6 @@ static ssize_t backing_dev_store(struct device *dev, goto out; } - bdev_file = bdev_file_open_by_dev(inode->i_rdev, - BLK_OPEN_READ | BLK_OPEN_WRITE, zram, NULL); - if (IS_ERR(bdev_file)) { - err = PTR_ERR(bdev_file); - bdev_file = NULL; - goto out; - } - nr_pages = i_size_read(inode) >> PAGE_SHIFT; bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long); bitmap = kvzalloc(bitmap_sz, GFP_KERNEL); @@ -531,7 +519,7 @@ static ssize_t backing_dev_store(struct device *dev, reset_bdev(zram); - zram->bdev_file = bdev_file; + zram->bdev = I_BDEV(inode); zram->backing_dev = backing_dev; zram->bitmap = bitmap; zram->nr_pages = nr_pages; @@ -544,9 +532,6 @@ static ssize_t backing_dev_store(struct device *dev, out: kvfree(bitmap); - if (bdev_file) - fput(bdev_file); - if (backing_dev) filp_close(backing_dev, NULL); @@ -587,7 +572,7 @@ static void read_from_bdev_async(struct zram *zram, struct page *page, { struct bio *bio; - bio = bio_alloc(file_bdev(zram->bdev_file), 1, parent->bi_opf, GFP_NOIO); + bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO); bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); __bio_add_page(bio, page, PAGE_SIZE, 0); bio_chain(bio, parent); @@ -703,7 +688,7 @@ static ssize_t writeback_store(struct device *dev, continue; } - bio_init(&bio, file_bdev(zram->bdev_file), &bio_vec, 1, + bio_init(&bio, zram->bdev, &bio_vec, 1, REQ_OP_WRITE | REQ_SYNC); bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9); __bio_add_page(&bio, page, PAGE_SIZE, 0); @@ -785,7 +770,7 @@ static void zram_sync_read(struct work_struct *work) struct bio_vec bv; struct bio bio; - bio_init(&bio, file_bdev(zw->zram->bdev_file), &bv, 1, REQ_OP_READ); + bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ); bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9); __bio_add_page(&bio, zw->page, PAGE_SIZE, 0); zw->error = submit_bio_wait(&bio); @@ -1568,7 +1553,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, * Corresponding ZRAM slot should be locked. */ static int zram_recompress(struct zram *zram, u32 index, struct page *page, - u32 threshold, u32 prio, u32 prio_max) + u64 *num_recomp_pages, u32 threshold, u32 prio, + u32 prio_max) { struct zcomp_strm *zstrm = NULL; unsigned long handle_old; @@ -1645,6 +1631,15 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page, if (!zstrm) return 0; + /* + * Decrement the limit (if set) on pages we can recompress, even + * when current recompression was unsuccessful or did not compress + * the page below the threshold, because we still spent resources + * on it. + */ + if (*num_recomp_pages) + *num_recomp_pages -= 1; + if (class_index_new >= class_index_old) { /* * Secondary algorithms failed to re-compress the page @@ -1710,6 +1705,7 @@ static ssize_t recompress_store(struct device *dev, struct zram *zram = dev_to_zram(dev); unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; char *args, *param, *val, *algo = NULL; + u64 num_recomp_pages = ULLONG_MAX; u32 mode = 0, threshold = 0; unsigned long index; struct page *page; @@ -1732,6 +1728,17 @@ static ssize_t recompress_store(struct device *dev, continue; } + if (!strcmp(param, "max_pages")) { + /* + * Limit the number of entries (pages) we attempt to + * recompress. + */ + ret = kstrtoull(val, 10, &num_recomp_pages); + if (ret) + return ret; + continue; + } + if (!strcmp(param, "threshold")) { /* * We will re-compress only idle objects equal or @@ -1788,6 +1795,9 @@ static ssize_t recompress_store(struct device *dev, for (index = 0; index < nr_pages; index++) { int err = 0; + if (!num_recomp_pages) + break; + zram_slot_lock(zram, index); if (!zram_allocated(zram, index)) @@ -1807,8 +1817,8 @@ static ssize_t recompress_store(struct device *dev, zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) goto next; - err = zram_recompress(zram, index, page, threshold, - prio, prio_max); + err = zram_recompress(zram, index, page, &num_recomp_pages, + threshold, prio, prio_max); next: zram_slot_unlock(zram, index); if (err) { diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 37bf29f34d..35e3221446 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -132,7 +132,7 @@ struct zram { spinlock_t wb_limit_lock; bool wb_limit_enable; u64 bd_wb_limit; - struct file *bdev_file; + struct block_device *bdev; unsigned long *bitmap; unsigned long nr_pages; #endif diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index bc211c3242..0b5f218ac5 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -478,5 +478,16 @@ config BT_NXPUART Say Y here to compile support for NXP Bluetooth UART device into the kernel, or say M here to compile as a module (btnxpuart). +config BT_INTEL_PCIE + tristate "Intel HCI PCIe driver" + depends on PCI + select BT_INTEL + select FW_LOADER + help + Intel Bluetooth transport driver for PCIe. + This driver is required if you want to use Intel Bluetooth device + with PCIe interface. + Say Y here to compiler support for Intel Bluetooth PCIe device into + the kernel or say M to compile it as module (btintel_pcie) endmenu diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index 7a5967e9ac..0730d6684d 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_BT_HCIBTUSB) += btusb.o obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o obj-$(CONFIG_BT_INTEL) += btintel.o +obj-$(CONFIG_BT_INTEL_PCIE) += btintel_pcie.o btintel.o obj-$(CONFIG_BT_ATH3K) += ath3k.o obj-$(CONFIG_BT_MRVL) += btmrvl.o obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 6ba7f5d1b8..7ecc67deec 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -26,21 +26,11 @@ #define ECDSA_OFFSET 644 #define ECDSA_HEADER_LEN 320 -#define BTINTEL_PPAG_NAME "PPAG" - enum { DSM_SET_WDISABLE2_DELAY = 1, DSM_SET_RESET_METHOD = 3, }; -/* structure to store the PPAG data read from ACPI table */ -struct btintel_ppag { - u32 domain; - u32 mode; - acpi_status status; - struct hci_dev *hdev; -}; - #define CMD_WRITE_BOOT_PARAMS 0xfc0e struct cmd_write_boot_params { __le32 boot_addr; @@ -245,7 +235,7 @@ static int btintel_set_diag_combined(struct hci_dev *hdev, bool enable) return ret; } -static void btintel_hw_error(struct hci_dev *hdev, u8 code) +void btintel_hw_error(struct hci_dev *hdev, u8 code) { struct sk_buff *skb; u8 type = 0x00; @@ -277,6 +267,7 @@ static void btintel_hw_error(struct hci_dev *hdev, u8 code) kfree_skb(skb); } +EXPORT_SYMBOL_GPL(btintel_hw_error); int btintel_version_info(struct hci_dev *hdev, struct intel_version *ver) { @@ -455,8 +446,8 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver) } EXPORT_SYMBOL_GPL(btintel_read_version); -static int btintel_version_info_tlv(struct hci_dev *hdev, - struct intel_version_tlv *version) +int btintel_version_info_tlv(struct hci_dev *hdev, + struct intel_version_tlv *version) { const char *variant; @@ -481,6 +472,7 @@ static int btintel_version_info_tlv(struct hci_dev *hdev, case 0x19: /* Slr-F */ case 0x1b: /* Mgr */ case 0x1c: /* Gale Peak (GaP) */ + case 0x1e: /* BlazarI (Bzr) */ break; default: bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)", @@ -489,7 +481,7 @@ static int btintel_version_info_tlv(struct hci_dev *hdev, } switch (version->img_type) { - case 0x01: + case BTINTEL_IMG_BOOTLOADER: variant = "Bootloader"; /* It is required that every single firmware fragment is acknowledged * with a command complete event. If the boot parameters indicate @@ -521,7 +513,10 @@ static int btintel_version_info_tlv(struct hci_dev *hdev, version->min_fw_build_nn, version->min_fw_build_cw, 2000 + version->min_fw_build_yy); break; - case 0x03: + case BTINTEL_IMG_IML: + variant = "Intermediate loader"; + break; + case BTINTEL_IMG_OP: variant = "Firmware"; break; default: @@ -535,15 +530,16 @@ static int btintel_version_info_tlv(struct hci_dev *hdev, bt_dev_info(hdev, "%s timestamp %u.%u buildtype %u build %u", variant, 2000 + (version->timestamp >> 8), version->timestamp & 0xff, version->build_type, version->build_num); - if (version->img_type == 0x03) + if (version->img_type == BTINTEL_IMG_OP) bt_dev_info(hdev, "Firmware SHA1: 0x%8.8x", version->git_sha1); return 0; } +EXPORT_SYMBOL_GPL(btintel_version_info_tlv); -static int btintel_parse_version_tlv(struct hci_dev *hdev, - struct intel_version_tlv *version, - struct sk_buff *skb) +int btintel_parse_version_tlv(struct hci_dev *hdev, + struct intel_version_tlv *version, + struct sk_buff *skb) { /* Consume Command Complete Status field */ skb_pull(skb, 1); @@ -645,6 +641,7 @@ static int btintel_parse_version_tlv(struct hci_dev *hdev, return 0; } +EXPORT_SYMBOL_GPL(btintel_parse_version_tlv); static int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *version) @@ -1172,7 +1169,7 @@ static int btintel_download_fw_tlv(struct hci_dev *hdev, * If the firmware version has changed that means it needs to be reset * to bootloader when operational so the new firmware can be loaded. */ - if (ver->img_type == 0x03) + if (ver->img_type == BTINTEL_IMG_OP) return -EINVAL; /* iBT hardware variants 0x0b, 0x0c, 0x11, 0x12, 0x13, 0x14 support @@ -1317,65 +1314,6 @@ static int btintel_read_debug_features(struct hci_dev *hdev, return 0; } -static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data, - void **ret) -{ - acpi_status status; - size_t len; - struct btintel_ppag *ppag = data; - union acpi_object *p, *elements; - struct acpi_buffer string = {ACPI_ALLOCATE_BUFFER, NULL}; - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; - struct hci_dev *hdev = ppag->hdev; - - status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); - if (ACPI_FAILURE(status)) { - bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); - return status; - } - - len = strlen(string.pointer); - if (len < strlen(BTINTEL_PPAG_NAME)) { - kfree(string.pointer); - return AE_OK; - } - - if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) { - kfree(string.pointer); - return AE_OK; - } - kfree(string.pointer); - - status = acpi_evaluate_object(handle, NULL, NULL, &buffer); - if (ACPI_FAILURE(status)) { - ppag->status = status; - bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); - return status; - } - - p = buffer.pointer; - ppag = (struct btintel_ppag *)data; - - if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) { - kfree(buffer.pointer); - bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d", - p->type, p->package.count); - ppag->status = AE_ERROR; - return AE_ERROR; - } - - elements = p->package.elements; - - /* PPAG table is located at element[1] */ - p = &elements[1]; - - ppag->domain = (u32)p->package.elements[0].integer.value; - ppag->mode = (u32)p->package.elements[1].integer.value; - ppag->status = AE_OK; - kfree(buffer.pointer); - return AE_CTRL_TERMINATE; -} - static int btintel_set_debug_features(struct hci_dev *hdev, const struct intel_debug_features *features) { @@ -2194,10 +2132,26 @@ static void btintel_get_fw_name_tlv(const struct intel_version_tlv *ver, char *fw_name, size_t len, const char *suffix) { + const char *format; /* The firmware file name for new generation controllers will be * ibt-- */ - snprintf(fw_name, len, "intel/ibt-%04x-%04x.%s", + switch (ver->cnvi_top & 0xfff) { + /* Only Blazar product supports downloading of intermediate loader + * image + */ + case BTINTEL_CNVI_BLAZARI: + if (ver->img_type == BTINTEL_IMG_BOOTLOADER) + format = "intel/ibt-%04x-%04x-iml.%s"; + else + format = "intel/ibt-%04x-%04x.%s"; + break; + default: + format = "intel/ibt-%04x-%04x.%s"; + break; + } + + snprintf(fw_name, len, format, INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top), INTEL_CNVX_TOP_STEP(ver->cnvi_top)), INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top), @@ -2230,7 +2184,7 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev, * It is not possible to use the Secure Boot Parameters in this * case since that command is only available in bootloader mode. */ - if (ver->img_type == 0x03) { + if (ver->img_type == BTINTEL_IMG_OP) { btintel_clear_flag(hdev, INTEL_BOOTLOADER); btintel_check_bdaddr(hdev); } else { @@ -2404,10 +2358,13 @@ error: static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver) { - struct btintel_ppag ppag; struct sk_buff *skb; struct hci_ppag_enable_cmd ppag_cmd; acpi_handle handle; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *p, *elements; + u32 domain, mode; + acpi_status status; /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */ switch (ver->cnvr_top & 0xFFF) { @@ -2425,22 +2382,34 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver return; } - memset(&ppag, 0, sizeof(ppag)); - - ppag.hdev = hdev; - ppag.status = AE_NOT_FOUND; - acpi_walk_namespace(ACPI_TYPE_PACKAGE, handle, 1, NULL, - btintel_ppag_callback, &ppag, NULL); - - if (ACPI_FAILURE(ppag.status)) { - if (ppag.status == AE_NOT_FOUND) { + status = acpi_evaluate_object(handle, "PPAG", NULL, &buffer); + if (ACPI_FAILURE(status)) { + if (status == AE_NOT_FOUND) { bt_dev_dbg(hdev, "PPAG-BT: ACPI entry not found"); return; } + bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); + return; + } + + p = buffer.pointer; + if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) { + bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d", + p->type, p->package.count); + kfree(buffer.pointer); return; } - if (ppag.domain != 0x12) { + elements = p->package.elements; + + /* PPAG table is located at element[1] */ + p = &elements[1]; + + domain = (u32)p->package.elements[0].integer.value; + mode = (u32)p->package.elements[1].integer.value; + kfree(buffer.pointer); + + if (domain != 0x12) { bt_dev_dbg(hdev, "PPAG-BT: Bluetooth domain is disabled in ACPI firmware"); return; } @@ -2451,19 +2420,22 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver * BIT 1 : 0 Disabled in China * 1 Enabled in China */ - if ((ppag.mode & 0x01) != BIT(0) && (ppag.mode & 0x02) != BIT(1)) { - bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in CB/BIOS"); + mode &= 0x03; + + if (!mode) { + bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in BIOS"); return; } - ppag_cmd.ppag_enable_flags = cpu_to_le32(ppag.mode); + ppag_cmd.ppag_enable_flags = cpu_to_le32(mode); - skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd), &ppag_cmd, HCI_CMD_TIMEOUT); + skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd), + &ppag_cmd, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_warn(hdev, "Failed to send PPAG Enable (%ld)", PTR_ERR(skb)); return; } - bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", ppag.mode); + bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", mode); kfree_skb(skb); } @@ -2577,8 +2549,8 @@ static void btintel_set_dsm_reset_method(struct hci_dev *hdev, data->acpi_reset_method = btintel_acpi_reset_method; } -static int btintel_bootloader_setup_tlv(struct hci_dev *hdev, - struct intel_version_tlv *ver) +int btintel_bootloader_setup_tlv(struct hci_dev *hdev, + struct intel_version_tlv *ver) { u32 boot_param; char ddcname[64]; @@ -2600,13 +2572,30 @@ static int btintel_bootloader_setup_tlv(struct hci_dev *hdev, return err; /* check if controller is already having an operational firmware */ - if (ver->img_type == 0x03) + if (ver->img_type == BTINTEL_IMG_OP) goto finish; err = btintel_boot(hdev, boot_param); if (err) return err; + err = btintel_read_version_tlv(hdev, ver); + if (err) + return err; + + /* If image type returned is BTINTEL_IMG_IML, then controller supports + * intermediae loader image + */ + if (ver->img_type == BTINTEL_IMG_IML) { + err = btintel_prepare_fw_download_tlv(hdev, ver, &boot_param); + if (err) + return err; + + err = btintel_boot(hdev, boot_param); + if (err) + return err; + } + btintel_clear_flag(hdev, INTEL_BOOTLOADER); btintel_get_fw_name_tlv(ver, ddcname, sizeof(ddcname), "ddc"); @@ -2645,8 +2634,9 @@ finish: return 0; } +EXPORT_SYMBOL_GPL(btintel_bootloader_setup_tlv); -static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant) +void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant) { switch (hw_variant) { /* Legacy bootloader devices that supports MSFT Extension */ @@ -2662,6 +2652,7 @@ static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant) case 0x19: case 0x1b: case 0x1c: + case 0x1e: hci_set_msft_opcode(hdev, 0xFC1E); break; default: @@ -2669,6 +2660,7 @@ static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant) break; } } +EXPORT_SYMBOL_GPL(btintel_set_msft_opcode); static void btintel_print_fseq_info(struct hci_dev *hdev) { @@ -2920,6 +2912,11 @@ static int btintel_setup_combined(struct hci_dev *hdev) err = -EINVAL; } + hci_set_hw_info(hdev, + "INTEL platform=%u variant=%u revision=%u", + ver.hw_platform, ver.hw_variant, + ver.hw_revision); + goto exit_error; } @@ -2996,6 +2993,7 @@ static int btintel_setup_combined(struct hci_dev *hdev) case 0x19: case 0x1b: case 0x1c: + case 0x1e: /* Display version information of TLV type */ btintel_version_info_tlv(hdev, &ver_tlv); @@ -3024,13 +3022,17 @@ static int btintel_setup_combined(struct hci_dev *hdev) break; } + hci_set_hw_info(hdev, "INTEL platform=%u variant=%u", + INTEL_HW_PLATFORM(ver_tlv.cnvi_bt), + INTEL_HW_VARIANT(ver_tlv.cnvi_bt)); + exit_error: kfree_skb(skb); return err; } -static int btintel_shutdown_combined(struct hci_dev *hdev) +int btintel_shutdown_combined(struct hci_dev *hdev) { struct sk_buff *skb; int ret; @@ -3064,6 +3066,7 @@ static int btintel_shutdown_combined(struct hci_dev *hdev) return 0; } +EXPORT_SYMBOL_GPL(btintel_shutdown_combined); int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name) { diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index d19fcdb9ff..b5fea735e2 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -51,6 +51,12 @@ struct intel_tlv { u8 val[]; } __packed; +#define BTINTEL_CNVI_BLAZARI 0x900 + +#define BTINTEL_IMG_BOOTLOADER 0x01 /* Bootloader image */ +#define BTINTEL_IMG_IML 0x02 /* Intermediate image */ +#define BTINTEL_IMG_OP 0x03 /* Operational image */ + struct intel_version_tlv { u32 cnvi_top; u32 cnvr_top; @@ -203,7 +209,7 @@ struct btintel_data { #define btintel_wait_on_flag_timeout(hdev, nr, m, to) \ wait_on_bit_timeout(btintel_get_flag(hdev), (nr), m, to) -#if IS_ENABLED(CONFIG_BT_INTEL) +#if IS_ENABLED(CONFIG_BT_INTEL) || IS_ENABLED(CONFIG_BT_INTEL_PCIE) int btintel_check_bdaddr(struct hci_dev *hdev); int btintel_enter_mfg(struct hci_dev *hdev); @@ -228,6 +234,16 @@ void btintel_bootup(struct hci_dev *hdev, const void *ptr, unsigned int len); void btintel_secure_send_result(struct hci_dev *hdev, const void *ptr, unsigned int len); int btintel_set_quality_report(struct hci_dev *hdev, bool enable); +int btintel_version_info_tlv(struct hci_dev *hdev, + struct intel_version_tlv *version); +int btintel_parse_version_tlv(struct hci_dev *hdev, + struct intel_version_tlv *version, + struct sk_buff *skb); +void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant); +int btintel_bootloader_setup_tlv(struct hci_dev *hdev, + struct intel_version_tlv *ver); +int btintel_shutdown_combined(struct hci_dev *hdev); +void btintel_hw_error(struct hci_dev *hdev, u8 code); #else static inline int btintel_check_bdaddr(struct hci_dev *hdev) @@ -324,4 +340,37 @@ static inline int btintel_set_quality_report(struct hci_dev *hdev, bool enable) { return -ENODEV; } + +static inline int btintel_version_info_tlv(struct hci_dev *hdev, + struct intel_version_tlv *version) +{ + return -EOPNOTSUPP; +} + +static inline int btintel_parse_version_tlv(struct hci_dev *hdev, + struct intel_version_tlv *version, + struct sk_buff *skb) +{ + return -EOPNOTSUPP; +} + +static inline void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant) + +{ +} + +static inline int btintel_bootloader_setup_tlv(struct hci_dev *hdev, + struct intel_version_tlv *ver) +{ + return -ENODEV; +} + +static inline int btintel_shutdown_combined(struct hci_dev *hdev) +{ + return -ENODEV; +} + +static inline void btintel_hw_error(struct hci_dev *hdev, u8 code) +{ +} #endif diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c new file mode 100644 index 0000000000..b8120b98a2 --- /dev/null +++ b/drivers/bluetooth/btintel_pcie.c @@ -0,0 +1,1363 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Bluetooth support for Intel PCIe devices + * + * Copyright (C) 2024 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "btintel.h" +#include "btintel_pcie.h" + +#define VERSION "0.1" + +#define BTINTEL_PCI_DEVICE(dev, subdev) \ + .vendor = PCI_VENDOR_ID_INTEL, \ + .device = (dev), \ + .subvendor = PCI_ANY_ID, \ + .subdevice = (subdev), \ + .driver_data = 0 + +#define POLL_INTERVAL_US 10 + +/* Intel Bluetooth PCIe device id table */ +static const struct pci_device_id btintel_pcie_table[] = { + { BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, btintel_pcie_table); + +/* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */ +#define BTINTEL_PCIE_HCI_TYPE_LEN 4 +#define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001 +#define BTINTEL_PCIE_HCI_ACL_PKT 0x00000002 +#define BTINTEL_PCIE_HCI_SCO_PKT 0x00000003 +#define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004 + +static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia, + u16 queue_num) +{ + bt_dev_dbg(hdev, "IA: %s: tr-h:%02u tr-t:%02u cr-h:%02u cr-t:%02u", + queue_num == BTINTEL_PCIE_TXQ_NUM ? "TXQ" : "RXQ", + ia->tr_hia[queue_num], ia->tr_tia[queue_num], + ia->cr_hia[queue_num], ia->cr_tia[queue_num]); +} + +static inline void ipc_print_urbd1(struct hci_dev *hdev, struct urbd1 *urbd1, + u16 index) +{ + bt_dev_dbg(hdev, "RXQ:urbd1(%u) frbd_tag:%u status: 0x%x fixed:0x%x", + index, urbd1->frbd_tag, urbd1->status, urbd1->fixed); +} + +static int btintel_pcie_poll_bit(struct btintel_pcie_data *data, u32 offset, + u32 bits, u32 mask, int timeout_us) +{ + int t = 0; + u32 reg; + + do { + reg = btintel_pcie_rd_reg32(data, offset); + + if ((reg & mask) == (bits & mask)) + return t; + udelay(POLL_INTERVAL_US); + t += POLL_INTERVAL_US; + } while (t < timeout_us); + + return -ETIMEDOUT; +} + +static struct btintel_pcie_data *btintel_pcie_get_data(struct msix_entry *entry) +{ + u8 queue = entry->entry; + struct msix_entry *entries = entry - queue; + + return container_of(entries, struct btintel_pcie_data, msix_entries[0]); +} + +/* Set the doorbell for TXQ to notify the device that @index (actually index-1) + * of the TFD is updated and ready to transmit. + */ +static void btintel_pcie_set_tx_db(struct btintel_pcie_data *data, u16 index) +{ + u32 val; + + val = index; + val |= (BTINTEL_PCIE_TX_DB_VEC << 16); + + btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val); +} + +/* Copy the data to next(@tfd_index) data buffer and update the TFD(transfer + * descriptor) with the data length and the DMA address of the data buffer. + */ +static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index, + struct sk_buff *skb) +{ + struct data_buf *buf; + struct tfd *tfd; + + tfd = &txq->tfds[tfd_index]; + memset(tfd, 0, sizeof(*tfd)); + + buf = &txq->bufs[tfd_index]; + + tfd->size = skb->len; + tfd->addr = buf->data_p_addr; + + /* Copy the outgoing data to DMA buffer */ + memcpy(buf->data, skb->data, tfd->size); +} + +static int btintel_pcie_send_sync(struct btintel_pcie_data *data, + struct sk_buff *skb) +{ + int ret; + u16 tfd_index; + struct txq *txq = &data->txq; + + tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM]; + + if (tfd_index > txq->count) + return -ERANGE; + + /* Prepare for TX. It updates the TFD with the length of data and + * address of the DMA buffer, and copy the data to the DMA buffer + */ + btintel_pcie_prepare_tx(txq, tfd_index, skb); + + tfd_index = (tfd_index + 1) % txq->count; + data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM] = tfd_index; + + /* Arm wait event condition */ + data->tx_wait_done = false; + + /* Set the doorbell to notify the device */ + btintel_pcie_set_tx_db(data, tfd_index); + + /* Wait for the complete interrupt - URBD0 */ + ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done, + msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS)); + if (!ret) + return -ETIME; + + return 0; +} + +/* Set the doorbell for RXQ to notify the device that @index (actually index-1) + * is available to receive the data + */ +static void btintel_pcie_set_rx_db(struct btintel_pcie_data *data, u16 index) +{ + u32 val; + + val = index; + val |= (BTINTEL_PCIE_RX_DB_VEC << 16); + + btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val); +} + +/* Update the FRBD (free buffer descriptor) with the @frbd_index and the + * DMA address of the free buffer. + */ +static void btintel_pcie_prepare_rx(struct rxq *rxq, u16 frbd_index) +{ + struct data_buf *buf; + struct frbd *frbd; + + /* Get the buffer of the FRBD for DMA */ + buf = &rxq->bufs[frbd_index]; + + frbd = &rxq->frbds[frbd_index]; + memset(frbd, 0, sizeof(*frbd)); + + /* Update FRBD */ + frbd->tag = frbd_index; + frbd->addr = buf->data_p_addr; +} + +static int btintel_pcie_submit_rx(struct btintel_pcie_data *data) +{ + u16 frbd_index; + struct rxq *rxq = &data->rxq; + + frbd_index = data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM]; + + if (frbd_index > rxq->count) + return -ERANGE; + + /* Prepare for RX submit. It updates the FRBD with the address of DMA + * buffer + */ + btintel_pcie_prepare_rx(rxq, frbd_index); + + frbd_index = (frbd_index + 1) % rxq->count; + data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM] = frbd_index; + ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM); + + /* Set the doorbell to notify the device */ + btintel_pcie_set_rx_db(data, frbd_index); + + return 0; +} + +static int btintel_pcie_start_rx(struct btintel_pcie_data *data) +{ + int i, ret; + + for (i = 0; i < BTINTEL_PCIE_RX_MAX_QUEUE; i++) { + ret = btintel_pcie_submit_rx(data); + if (ret) + return ret; + } + + return 0; +} + +static void btintel_pcie_reset_ia(struct btintel_pcie_data *data) +{ + memset(data->ia.tr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES); + memset(data->ia.tr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES); + memset(data->ia.cr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES); + memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES); +} + +static void btintel_pcie_reset_bt(struct btintel_pcie_data *data) +{ + btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, + BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET); +} + +/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in + * BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with + * BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0. + * Then the host reads firmware version from BTINTEL_CSR_F2D_MBX and the boot stage + * from BTINTEL_PCIE_CSR_BOOT_STAGE_REG. + */ +static int btintel_pcie_enable_bt(struct btintel_pcie_data *data) +{ + int err; + + data->gp0_received = false; + + /* Update the DMA address of CI struct to CSR */ + btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG, + data->ci_p_addr & 0xffffffff); + btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG, + (u64)data->ci_p_addr >> 32); + + /* Reset the cached value of boot stage. it is updated by the MSI-X + * gp0 interrupt handler. + */ + data->boot_stage_cache = 0x0; + + /* Set MAC_INIT bit to start primary bootloader */ + btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG); + + btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, + BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT); + + /* Wait until MAC_ACCESS is granted */ + err = btintel_pcie_poll_bit(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, + BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS, + BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS, + BTINTEL_DEFAULT_MAC_ACCESS_TIMEOUT_US); + if (err < 0) + return -ENODEV; + + /* MAC is ready. Enable BT FUNC */ + btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, + BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA | + BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT); + + btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG); + + /* wait for interrupt from the device after booting up to primary + * bootloader. + */ + err = wait_event_timeout(data->gp0_wait_q, data->gp0_received, + msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT)); + if (!err) + return -ETIME; + + /* Check cached boot stage is BTINTEL_PCIE_CSR_BOOT_STAGE_ROM(BIT(0)) */ + if (~data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM) + return -ENODEV; + + return 0; +} + +/* This function handles the MSI-X interrupt for gp0 cause (bit 0 in + * BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response. + */ +static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data) +{ + u32 reg; + + /* This interrupt is for three different causes and it is not easy to + * know what causes the interrupt. So, it compares each register value + * with cached value and update it before it wake up the queue. + */ + reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG); + if (reg != data->boot_stage_cache) + data->boot_stage_cache = reg; + + reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG); + if (reg != data->img_resp_cache) + data->img_resp_cache = reg; + + data->gp0_received = true; + + /* If the boot stage is OP or IML, reset IA and start RX again */ + if (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW || + data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML) { + btintel_pcie_reset_ia(data); + btintel_pcie_start_rx(data); + } + + wake_up(&data->gp0_wait_q); +} + +/* This function handles the MSX-X interrupt for rx queue 0 which is for TX + */ +static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data) +{ + u16 cr_tia, cr_hia; + struct txq *txq; + struct urbd0 *urbd0; + + cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM]; + cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM]; + + if (cr_tia == cr_hia) + return; + + txq = &data->txq; + + while (cr_tia != cr_hia) { + data->tx_wait_done = true; + wake_up(&data->tx_wait_q); + + urbd0 = &txq->urbd0s[cr_tia]; + + if (urbd0->tfd_index > txq->count) + return; + + cr_tia = (cr_tia + 1) % txq->count; + data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] = cr_tia; + ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_TXQ_NUM); + } +} + +/* Process the received rx data + * It check the frame header to identify the data type and create skb + * and calling HCI API + */ +static int btintel_pcie_recv_frame(struct btintel_pcie_data *data, + struct sk_buff *skb) +{ + int ret; + u8 pkt_type; + u16 plen; + u32 pcie_pkt_type; + struct sk_buff *new_skb; + void *pdata; + struct hci_dev *hdev = data->hdev; + + spin_lock(&data->hci_rx_lock); + + /* The first 4 bytes indicates the Intel PCIe specific packet type */ + pdata = skb_pull_data(skb, BTINTEL_PCIE_HCI_TYPE_LEN); + if (!pdata) { + bt_dev_err(hdev, "Corrupted packet received"); + ret = -EILSEQ; + goto exit_error; + } + + pcie_pkt_type = get_unaligned_le32(pdata); + + switch (pcie_pkt_type) { + case BTINTEL_PCIE_HCI_ACL_PKT: + if (skb->len >= HCI_ACL_HDR_SIZE) { + plen = HCI_ACL_HDR_SIZE + __le16_to_cpu(hci_acl_hdr(skb)->dlen); + pkt_type = HCI_ACLDATA_PKT; + } else { + bt_dev_err(hdev, "ACL packet is too short"); + ret = -EILSEQ; + goto exit_error; + } + break; + + case BTINTEL_PCIE_HCI_SCO_PKT: + if (skb->len >= HCI_SCO_HDR_SIZE) { + plen = HCI_SCO_HDR_SIZE + hci_sco_hdr(skb)->dlen; + pkt_type = HCI_SCODATA_PKT; + } else { + bt_dev_err(hdev, "SCO packet is too short"); + ret = -EILSEQ; + goto exit_error; + } + break; + + case BTINTEL_PCIE_HCI_EVT_PKT: + if (skb->len >= HCI_EVENT_HDR_SIZE) { + plen = HCI_EVENT_HDR_SIZE + hci_event_hdr(skb)->plen; + pkt_type = HCI_EVENT_PKT; + } else { + bt_dev_err(hdev, "Event packet is too short"); + ret = -EILSEQ; + goto exit_error; + } + break; + default: + bt_dev_err(hdev, "Invalid packet type received: 0x%4.4x", + pcie_pkt_type); + ret = -EINVAL; + goto exit_error; + } + + if (skb->len < plen) { + bt_dev_err(hdev, "Received corrupted packet. type: 0x%2.2x", + pkt_type); + ret = -EILSEQ; + goto exit_error; + } + + bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen); + + new_skb = bt_skb_alloc(plen, GFP_ATOMIC); + if (!new_skb) { + bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u", + skb->len); + ret = -ENOMEM; + goto exit_error; + } + + hci_skb_pkt_type(new_skb) = pkt_type; + skb_put_data(new_skb, skb->data, plen); + hdev->stat.byte_rx += plen; + + if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT) + ret = btintel_recv_event(hdev, new_skb); + else + ret = hci_recv_frame(hdev, new_skb); + +exit_error: + if (ret) + hdev->stat.err_rx++; + + spin_unlock(&data->hci_rx_lock); + + return ret; +} + +static void btintel_pcie_rx_work(struct work_struct *work) +{ + struct btintel_pcie_data *data = container_of(work, + struct btintel_pcie_data, rx_work); + struct sk_buff *skb; + int err; + struct hci_dev *hdev = data->hdev; + + /* Process the sk_buf in queue and send to the HCI layer */ + while ((skb = skb_dequeue(&data->rx_skb_q))) { + err = btintel_pcie_recv_frame(data, skb); + if (err) + bt_dev_err(hdev, "Failed to send received frame: %d", + err); + kfree_skb(skb); + } +} + +/* create sk_buff with data and save it to queue and start RX work */ +static int btintel_pcie_submit_rx_work(struct btintel_pcie_data *data, u8 status, + void *buf) +{ + int ret, len; + struct rfh_hdr *rfh_hdr; + struct sk_buff *skb; + + rfh_hdr = buf; + + len = rfh_hdr->packet_len; + if (len <= 0) { + ret = -EINVAL; + goto resubmit; + } + + /* Remove RFH header */ + buf += sizeof(*rfh_hdr); + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) { + ret = -ENOMEM; + goto resubmit; + } + + skb_put_data(skb, buf, len); + skb_queue_tail(&data->rx_skb_q, skb); + queue_work(data->workqueue, &data->rx_work); + +resubmit: + ret = btintel_pcie_submit_rx(data); + + return ret; +} + +/* Handles the MSI-X interrupt for rx queue 1 which is for RX */ +static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data) +{ + u16 cr_hia, cr_tia; + struct rxq *rxq; + struct urbd1 *urbd1; + struct data_buf *buf; + int ret; + struct hci_dev *hdev = data->hdev; + + cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM]; + cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM]; + + bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia); + + /* Check CR_TIA and CR_HIA for change */ + if (cr_tia == cr_hia) { + bt_dev_warn(hdev, "RXQ: no new CD found"); + return; + } + + rxq = &data->rxq; + + /* The firmware sends multiple CD in a single MSI-X and it needs to + * process all received CDs in this interrupt. + */ + while (cr_tia != cr_hia) { + urbd1 = &rxq->urbd1s[cr_tia]; + ipc_print_urbd1(data->hdev, urbd1, cr_tia); + + buf = &rxq->bufs[urbd1->frbd_tag]; + if (!buf) { + bt_dev_err(hdev, "RXQ: failed to get the DMA buffer for %d", + urbd1->frbd_tag); + return; + } + + ret = btintel_pcie_submit_rx_work(data, urbd1->status, + buf->data); + if (ret) { + bt_dev_err(hdev, "RXQ: failed to submit rx request"); + return; + } + + cr_tia = (cr_tia + 1) % rxq->count; + data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM] = cr_tia; + ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM); + } +} + +static irqreturn_t btintel_pcie_msix_isr(int irq, void *data) +{ + return IRQ_WAKE_THREAD; +} + +static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id) +{ + struct msix_entry *entry = dev_id; + struct btintel_pcie_data *data = btintel_pcie_get_data(entry); + u32 intr_fh, intr_hw; + + spin_lock(&data->irq_lock); + intr_fh = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES); + intr_hw = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES); + + /* Clear causes registers to avoid being handling the same cause */ + btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES, intr_fh); + btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES, intr_hw); + spin_unlock(&data->irq_lock); + + if (unlikely(!(intr_fh | intr_hw))) { + /* Ignore interrupt, inta == 0 */ + return IRQ_NONE; + } + + /* This interrupt is triggered by the firmware after updating + * boot_stage register and image_response register + */ + if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0) + btintel_pcie_msix_gp0_handler(data); + + /* For TX */ + if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) + btintel_pcie_msix_tx_handle(data); + + /* For RX */ + if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) + btintel_pcie_msix_rx_handle(data); + + /* + * Before sending the interrupt the HW disables it to prevent a nested + * interrupt. This is done by writing 1 to the corresponding bit in + * the mask register. After handling the interrupt, it should be + * re-enabled by clearing this bit. This register is defined as write 1 + * clear (W1C) register, meaning that it's cleared by writing 1 + * to the bit. + */ + btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_AUTOMASK_ST, + BIT(entry->entry)); + + return IRQ_HANDLED; +} + +/* This function requests the irq for MSI-X and registers the handlers per irq. + * Currently, it requests only 1 irq for all interrupt causes. + */ +static int btintel_pcie_setup_irq(struct btintel_pcie_data *data) +{ + int err; + int num_irqs, i; + + for (i = 0; i < BTINTEL_PCIE_MSIX_VEC_MAX; i++) + data->msix_entries[i].entry = i; + + num_irqs = pci_alloc_irq_vectors(data->pdev, BTINTEL_PCIE_MSIX_VEC_MIN, + BTINTEL_PCIE_MSIX_VEC_MAX, PCI_IRQ_MSIX); + if (num_irqs < 0) + return num_irqs; + + data->alloc_vecs = num_irqs; + data->msix_enabled = 1; + data->def_irq = 0; + + /* setup irq handler */ + for (i = 0; i < data->alloc_vecs; i++) { + struct msix_entry *msix_entry; + + msix_entry = &data->msix_entries[i]; + msix_entry->vector = pci_irq_vector(data->pdev, i); + + err = devm_request_threaded_irq(&data->pdev->dev, + msix_entry->vector, + btintel_pcie_msix_isr, + btintel_pcie_irq_msix_handler, + IRQF_SHARED, + KBUILD_MODNAME, + msix_entry); + if (err) { + pci_free_irq_vectors(data->pdev); + data->alloc_vecs = 0; + return err; + } + } + return 0; +} + +struct btintel_pcie_causes_list { + u32 cause; + u32 mask_reg; + u8 cause_num; +}; + +static struct btintel_pcie_causes_list causes_list[] = { + { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 }, + { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 }, + { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 }, +}; + +/* This function configures the interrupt masks for both HW_INT_CAUSES and + * FH_INT_CAUSES which are meaningful to us. + * + * After resetting BT function via PCIE FLR or FUNC_CTRL reset, the driver + * need to call this function again to configure since the masks + * are reset to 0xFFFFFFFF after reset. + */ +static void btintel_pcie_config_msix(struct btintel_pcie_data *data) +{ + int i; + int val = data->def_irq | BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE; + + /* Set Non Auto Clear Cause */ + for (i = 0; i < ARRAY_SIZE(causes_list); i++) { + btintel_pcie_wr_reg8(data, + BTINTEL_PCIE_CSR_MSIX_IVAR(causes_list[i].cause_num), + val); + btintel_pcie_clr_reg_bits(data, + causes_list[i].mask_reg, + causes_list[i].cause); + } + + /* Save the initial interrupt mask */ + data->fh_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK); + data->hw_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK); +} + +static int btintel_pcie_config_pcie(struct pci_dev *pdev, + struct btintel_pcie_data *data) +{ + int err; + + err = pcim_enable_device(pdev); + if (err) + return err; + + pci_set_master(pdev); + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + return err; + } + + err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME); + if (err) + return err; + + data->base_addr = pcim_iomap_table(pdev)[0]; + if (!data->base_addr) + return -ENODEV; + + err = btintel_pcie_setup_irq(data); + if (err) + return err; + + /* Configure MSI-X with causes list */ + btintel_pcie_config_msix(data); + + return 0; +} + +static void btintel_pcie_init_ci(struct btintel_pcie_data *data, + struct ctx_info *ci) +{ + ci->version = 0x1; + ci->size = sizeof(*ci); + ci->config = 0x0000; + ci->addr_cr_hia = data->ia.cr_hia_p_addr; + ci->addr_tr_tia = data->ia.tr_tia_p_addr; + ci->addr_cr_tia = data->ia.cr_tia_p_addr; + ci->addr_tr_hia = data->ia.tr_hia_p_addr; + ci->num_cr_ia = BTINTEL_PCIE_NUM_QUEUES; + ci->num_tr_ia = BTINTEL_PCIE_NUM_QUEUES; + ci->addr_urbdq0 = data->txq.urbd0s_p_addr; + ci->addr_tfdq = data->txq.tfds_p_addr; + ci->num_tfdq = data->txq.count; + ci->num_urbdq0 = data->txq.count; + ci->tfdq_db_vec = BTINTEL_PCIE_TXQ_NUM; + ci->urbdq0_db_vec = BTINTEL_PCIE_TXQ_NUM; + ci->rbd_size = BTINTEL_PCIE_RBD_SIZE_4K; + ci->addr_frbdq = data->rxq.frbds_p_addr; + ci->num_frbdq = data->rxq.count; + ci->frbdq_db_vec = BTINTEL_PCIE_RXQ_NUM; + ci->addr_urbdq1 = data->rxq.urbd1s_p_addr; + ci->num_urbdq1 = data->rxq.count; + ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM; +} + +static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data, + struct txq *txq) +{ + /* Free data buffers first */ + dma_free_coherent(&data->pdev->dev, txq->count * BTINTEL_PCIE_BUFFER_SIZE, + txq->buf_v_addr, txq->buf_p_addr); + kfree(txq->bufs); +} + +static int btintel_pcie_setup_txq_bufs(struct btintel_pcie_data *data, + struct txq *txq) +{ + int i; + struct data_buf *buf; + + /* Allocate the same number of buffers as the descriptor */ + txq->bufs = kmalloc_array(txq->count, sizeof(*buf), GFP_KERNEL); + if (!txq->bufs) + return -ENOMEM; + + /* Allocate full chunk of data buffer for DMA first and do indexing and + * initialization next, so it can be freed easily + */ + txq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev, + txq->count * BTINTEL_PCIE_BUFFER_SIZE, + &txq->buf_p_addr, + GFP_KERNEL | __GFP_NOWARN); + if (!txq->buf_v_addr) { + kfree(txq->bufs); + return -ENOMEM; + } + memset(txq->buf_v_addr, 0, txq->count * BTINTEL_PCIE_BUFFER_SIZE); + + /* Setup the allocated DMA buffer to bufs. Each data_buf should + * have virtual address and physical address + */ + for (i = 0; i < txq->count; i++) { + buf = &txq->bufs[i]; + buf->data_p_addr = txq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE); + buf->data = txq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE); + } + + return 0; +} + +static void btintel_pcie_free_rxq_bufs(struct btintel_pcie_data *data, + struct rxq *rxq) +{ + /* Free data buffers first */ + dma_free_coherent(&data->pdev->dev, rxq->count * BTINTEL_PCIE_BUFFER_SIZE, + rxq->buf_v_addr, rxq->buf_p_addr); + kfree(rxq->bufs); +} + +static int btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data *data, + struct rxq *rxq) +{ + int i; + struct data_buf *buf; + + /* Allocate the same number of buffers as the descriptor */ + rxq->bufs = kmalloc_array(rxq->count, sizeof(*buf), GFP_KERNEL); + if (!rxq->bufs) + return -ENOMEM; + + /* Allocate full chunk of data buffer for DMA first and do indexing and + * initialization next, so it can be freed easily + */ + rxq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev, + rxq->count * BTINTEL_PCIE_BUFFER_SIZE, + &rxq->buf_p_addr, + GFP_KERNEL | __GFP_NOWARN); + if (!rxq->buf_v_addr) { + kfree(rxq->bufs); + return -ENOMEM; + } + memset(rxq->buf_v_addr, 0, rxq->count * BTINTEL_PCIE_BUFFER_SIZE); + + /* Setup the allocated DMA buffer to bufs. Each data_buf should + * have virtual address and physical address + */ + for (i = 0; i < rxq->count; i++) { + buf = &rxq->bufs[i]; + buf->data_p_addr = rxq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE); + buf->data = rxq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE); + } + + return 0; +} + +static void btintel_pcie_setup_ia(struct btintel_pcie_data *data, + dma_addr_t p_addr, void *v_addr, + struct ia *ia) +{ + /* TR Head Index Array */ + ia->tr_hia_p_addr = p_addr; + ia->tr_hia = v_addr; + + /* TR Tail Index Array */ + ia->tr_tia_p_addr = p_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES; + ia->tr_tia = v_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES; + + /* CR Head index Array */ + ia->cr_hia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2); + ia->cr_hia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2); + + /* CR Tail Index Array */ + ia->cr_tia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3); + ia->cr_tia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3); +} + +static void btintel_pcie_free(struct btintel_pcie_data *data) +{ + btintel_pcie_free_rxq_bufs(data, &data->rxq); + btintel_pcie_free_txq_bufs(data, &data->txq); + + dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr); + dma_pool_destroy(data->dma_pool); +} + +/* Allocate tx and rx queues, any related data structures and buffers. + */ +static int btintel_pcie_alloc(struct btintel_pcie_data *data) +{ + int err = 0; + size_t total; + dma_addr_t p_addr; + void *v_addr; + + /* Allocate the chunk of DMA memory for descriptors, index array, and + * context information, instead of allocating individually. + * The DMA memory for data buffer is allocated while setting up the + * each queue. + * + * Total size is sum of the following + * + size of TFD * Number of descriptors in queue + * + size of URBD0 * Number of descriptors in queue + * + size of FRBD * Number of descriptors in queue + * + size of URBD1 * Number of descriptors in queue + * + size of index * Number of queues(2) * type of index array(4) + * + size of context information + */ + total = (sizeof(struct tfd) + sizeof(struct urbd0) + sizeof(struct frbd) + + sizeof(struct urbd1)) * BTINTEL_DESCS_COUNT; + + /* Add the sum of size of index array and size of ci struct */ + total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info); + + /* Allocate DMA Pool */ + data->dma_pool = dma_pool_create(KBUILD_MODNAME, &data->pdev->dev, + total, BTINTEL_PCIE_DMA_POOL_ALIGNMENT, 0); + if (!data->dma_pool) { + err = -ENOMEM; + goto exit_error; + } + + v_addr = dma_pool_zalloc(data->dma_pool, GFP_KERNEL | __GFP_NOWARN, + &p_addr); + if (!v_addr) { + dma_pool_destroy(data->dma_pool); + err = -ENOMEM; + goto exit_error; + } + + data->dma_p_addr = p_addr; + data->dma_v_addr = v_addr; + + /* Setup descriptor count */ + data->txq.count = BTINTEL_DESCS_COUNT; + data->rxq.count = BTINTEL_DESCS_COUNT; + + /* Setup tfds */ + data->txq.tfds_p_addr = p_addr; + data->txq.tfds = v_addr; + + p_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT); + v_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT); + + /* Setup urbd0 */ + data->txq.urbd0s_p_addr = p_addr; + data->txq.urbd0s = v_addr; + + p_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT); + v_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT); + + /* Setup FRBD*/ + data->rxq.frbds_p_addr = p_addr; + data->rxq.frbds = v_addr; + + p_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT); + v_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT); + + /* Setup urbd1 */ + data->rxq.urbd1s_p_addr = p_addr; + data->rxq.urbd1s = v_addr; + + p_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT); + v_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT); + + /* Setup data buffers for txq */ + err = btintel_pcie_setup_txq_bufs(data, &data->txq); + if (err) + goto exit_error_pool; + + /* Setup data buffers for rxq */ + err = btintel_pcie_setup_rxq_bufs(data, &data->rxq); + if (err) + goto exit_error_txq; + + /* Setup Index Array */ + btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia); + + /* Setup Context Information */ + p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4; + v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4; + + data->ci = v_addr; + data->ci_p_addr = p_addr; + + /* Initialize the CI */ + btintel_pcie_init_ci(data, data->ci); + + return 0; + +exit_error_txq: + btintel_pcie_free_txq_bufs(data, &data->txq); +exit_error_pool: + dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr); + dma_pool_destroy(data->dma_pool); +exit_error: + return err; +} + +static int btintel_pcie_open(struct hci_dev *hdev) +{ + bt_dev_dbg(hdev, ""); + + return 0; +} + +static int btintel_pcie_close(struct hci_dev *hdev) +{ + bt_dev_dbg(hdev, ""); + + return 0; +} + +static int btintel_pcie_inject_cmd_complete(struct hci_dev *hdev, __u16 opcode) +{ + struct sk_buff *skb; + struct hci_event_hdr *hdr; + struct hci_ev_cmd_complete *evt; + + skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr)); + hdr->evt = HCI_EV_CMD_COMPLETE; + hdr->plen = sizeof(*evt) + 1; + + evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt)); + evt->ncmd = 0x01; + evt->opcode = cpu_to_le16(opcode); + + *(u8 *)skb_put(skb, 1) = 0x00; + + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + + return hci_recv_frame(hdev, skb); +} + +static int btintel_pcie_send_frame(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct btintel_pcie_data *data = hci_get_drvdata(hdev); + int ret; + u32 type; + + /* Due to the fw limitation, the type header of the packet should be + * 4 bytes unlike 1 byte for UART. In UART, the firmware can read + * the first byte to get the packet type and redirect the rest of data + * packet to the right handler. + * + * But for PCIe, THF(Transfer Flow Handler) fetches the 4 bytes of data + * from DMA memory and by the time it reads the first 4 bytes, it has + * already consumed some part of packet. Thus the packet type indicator + * for iBT PCIe is 4 bytes. + * + * Luckily, when HCI core creates the skb, it allocates 8 bytes of + * head room for profile and driver use, and before sending the data + * to the device, append the iBT PCIe packet type in the front. + */ + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + type = BTINTEL_PCIE_HCI_CMD_PKT; + if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) { + struct hci_command_hdr *cmd = (void *)skb->data; + __u16 opcode = le16_to_cpu(cmd->opcode); + + /* When the 0xfc01 command is issued to boot into + * the operational firmware, it will actually not + * send a command complete event. To keep the flow + * control working inject that event here. + */ + if (opcode == 0xfc01) + btintel_pcie_inject_cmd_complete(hdev, opcode); + } + hdev->stat.cmd_tx++; + break; + case HCI_ACLDATA_PKT: + type = BTINTEL_PCIE_HCI_ACL_PKT; + hdev->stat.acl_tx++; + break; + case HCI_SCODATA_PKT: + type = BTINTEL_PCIE_HCI_SCO_PKT; + hdev->stat.sco_tx++; + break; + default: + bt_dev_err(hdev, "Unknown HCI packet type"); + return -EILSEQ; + } + memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &type, + BTINTEL_PCIE_HCI_TYPE_LEN); + + ret = btintel_pcie_send_sync(data, skb); + if (ret) { + hdev->stat.err_tx++; + bt_dev_err(hdev, "Failed to send frame (%d)", ret); + goto exit_error; + } + hdev->stat.byte_tx += skb->len; + kfree_skb(skb); + +exit_error: + return ret; +} + +static void btintel_pcie_release_hdev(struct btintel_pcie_data *data) +{ + struct hci_dev *hdev; + + hdev = data->hdev; + hci_unregister_dev(hdev); + hci_free_dev(hdev); + data->hdev = NULL; +} + +static int btintel_pcie_setup(struct hci_dev *hdev) +{ + const u8 param[1] = { 0xFF }; + struct intel_version_tlv ver_tlv; + struct sk_buff *skb; + int err; + + BT_DBG("%s", hdev->name); + + skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Reading Intel version command failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + /* Check the status */ + if (skb->data[0]) { + bt_dev_err(hdev, "Intel Read Version command failed (%02x)", + skb->data[0]); + err = -EIO; + goto exit_error; + } + + /* Apply the common HCI quirks for Intel device */ + set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks); + + /* Set up the quality report callback for Intel devices */ + hdev->set_quality_report = btintel_set_quality_report; + + memset(&ver_tlv, 0, sizeof(ver_tlv)); + /* For TLV type device, parse the tlv data */ + err = btintel_parse_version_tlv(hdev, &ver_tlv, skb); + if (err) { + bt_dev_err(hdev, "Failed to parse TLV version information"); + goto exit_error; + } + + switch (INTEL_HW_PLATFORM(ver_tlv.cnvi_bt)) { + case 0x37: + break; + default: + bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)", + INTEL_HW_PLATFORM(ver_tlv.cnvi_bt)); + err = -EINVAL; + goto exit_error; + } + + /* Check for supported iBT hardware variants of this firmware + * loading method. + * + * This check has been put in place to ensure correct forward + * compatibility options when newer hardware variants come + * along. + */ + switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) { + case 0x1e: /* BzrI */ + /* Display version information of TLV type */ + btintel_version_info_tlv(hdev, &ver_tlv); + + /* Apply the device specific HCI quirks for TLV based devices + * + * All TLV based devices support WBS + */ + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); + + /* Apply LE States quirk from solar onwards */ + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); + + /* Setup MSFT Extension support */ + btintel_set_msft_opcode(hdev, + INTEL_HW_VARIANT(ver_tlv.cnvi_bt)); + + err = btintel_bootloader_setup_tlv(hdev, &ver_tlv); + if (err) + goto exit_error; + break; + default: + bt_dev_err(hdev, "Unsupported Intel hw variant (%u)", + INTEL_HW_VARIANT(ver_tlv.cnvi_bt)); + err = -EINVAL; + break; + } + +exit_error: + kfree_skb(skb); + + return err; +} + +static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data) +{ + int err; + struct hci_dev *hdev; + + hdev = hci_alloc_dev(); + if (!hdev) + return -ENOMEM; + + hdev->bus = HCI_PCI; + hci_set_drvdata(hdev, data); + + data->hdev = hdev; + SET_HCIDEV_DEV(hdev, &data->pdev->dev); + + hdev->manufacturer = 2; + hdev->open = btintel_pcie_open; + hdev->close = btintel_pcie_close; + hdev->send = btintel_pcie_send_frame; + hdev->setup = btintel_pcie_setup; + hdev->shutdown = btintel_shutdown_combined; + hdev->hw_error = btintel_hw_error; + hdev->set_diag = btintel_set_diag; + hdev->set_bdaddr = btintel_set_bdaddr; + + err = hci_register_dev(hdev); + if (err < 0) { + BT_ERR("Failed to register to hdev (%d)", err); + goto exit_error; + } + + return 0; + +exit_error: + hci_free_dev(hdev); + return err; +} + +static int btintel_pcie_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int err; + struct btintel_pcie_data *data; + + if (!pdev) + return -ENODEV; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->pdev = pdev; + + spin_lock_init(&data->irq_lock); + spin_lock_init(&data->hci_rx_lock); + + init_waitqueue_head(&data->gp0_wait_q); + data->gp0_received = false; + + init_waitqueue_head(&data->tx_wait_q); + data->tx_wait_done = false; + + data->workqueue = alloc_ordered_workqueue(KBUILD_MODNAME, WQ_HIGHPRI); + if (!data->workqueue) + return -ENOMEM; + + skb_queue_head_init(&data->rx_skb_q); + INIT_WORK(&data->rx_work, btintel_pcie_rx_work); + + data->boot_stage_cache = 0x00; + data->img_resp_cache = 0x00; + + err = btintel_pcie_config_pcie(pdev, data); + if (err) + goto exit_error; + + pci_set_drvdata(pdev, data); + + err = btintel_pcie_alloc(data); + if (err) + goto exit_error; + + err = btintel_pcie_enable_bt(data); + if (err) + goto exit_error; + + /* CNV information (CNVi and CNVr) is in CSR */ + data->cnvi = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_HW_REV_REG); + + data->cnvr = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_RF_ID_REG); + + err = btintel_pcie_start_rx(data); + if (err) + goto exit_error; + + err = btintel_pcie_setup_hdev(data); + if (err) + goto exit_error; + + bt_dev_dbg(data->hdev, "cnvi: 0x%8.8x cnvr: 0x%8.8x", data->cnvi, + data->cnvr); + return 0; + +exit_error: + /* reset device before exit */ + btintel_pcie_reset_bt(data); + + pci_clear_master(pdev); + + pci_set_drvdata(pdev, NULL); + + return err; +} + +static void btintel_pcie_remove(struct pci_dev *pdev) +{ + struct btintel_pcie_data *data; + + data = pci_get_drvdata(pdev); + + btintel_pcie_reset_bt(data); + for (int i = 0; i < data->alloc_vecs; i++) { + struct msix_entry *msix_entry; + + msix_entry = &data->msix_entries[i]; + free_irq(msix_entry->vector, msix_entry); + } + + pci_free_irq_vectors(pdev); + + btintel_pcie_release_hdev(data); + + flush_work(&data->rx_work); + + destroy_workqueue(data->workqueue); + + btintel_pcie_free(data); + + pci_clear_master(pdev); + + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver btintel_pcie_driver = { + .name = KBUILD_MODNAME, + .id_table = btintel_pcie_table, + .probe = btintel_pcie_probe, + .remove = btintel_pcie_remove, +}; +module_pci_driver(btintel_pcie_driver); + +MODULE_AUTHOR("Tedd Ho-Jeong An "); +MODULE_DESCRIPTION("Intel Bluetooth PCIe transport driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h new file mode 100644 index 0000000000..baaff70420 --- /dev/null +++ b/drivers/bluetooth/btintel_pcie.h @@ -0,0 +1,430 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * + * Bluetooth support for Intel PCIe devices + * + * Copyright (C) 2024 Intel Corporation + */ + +/* Control and Status Register(BTINTEL_PCIE_CSR) */ +#define BTINTEL_PCIE_CSR_BASE (0x000) +#define BTINTEL_PCIE_CSR_FUNC_CTRL_REG (BTINTEL_PCIE_CSR_BASE + 0x024) +#define BTINTEL_PCIE_CSR_HW_REV_REG (BTINTEL_PCIE_CSR_BASE + 0x028) +#define BTINTEL_PCIE_CSR_RF_ID_REG (BTINTEL_PCIE_CSR_BASE + 0x09C) +#define BTINTEL_PCIE_CSR_BOOT_STAGE_REG (BTINTEL_PCIE_CSR_BASE + 0x108) +#define BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG (BTINTEL_PCIE_CSR_BASE + 0x118) +#define BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG (BTINTEL_PCIE_CSR_BASE + 0x11C) +#define BTINTEL_PCIE_CSR_IMG_RESPONSE_REG (BTINTEL_PCIE_CSR_BASE + 0x12C) +#define BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR (BTINTEL_PCIE_CSR_BASE + 0x460) + +/* BTINTEL_PCIE_CSR Function Control Register */ +#define BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA (BIT(0)) +#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT (BIT(6)) +#define BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT (BIT(7)) +#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS (BIT(20)) +#define BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET (BIT(31)) + +/* Value for BTINTEL_PCIE_CSR_BOOT_STAGE register */ +#define BTINTEL_PCIE_CSR_BOOT_STAGE_ROM (BIT(0)) +#define BTINTEL_PCIE_CSR_BOOT_STAGE_IML (BIT(1)) +#define BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW (BIT(2)) +#define BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN (BIT(10)) +#define BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN (BIT(11)) +#define BTINTEL_PCIE_CSR_BOOT_STAGE_MAC_ACCESS_ON (BIT(16)) +#define BTINTEL_PCIE_CSR_BOOT_STAGE_ALIVE (BIT(23)) + +/* Registers for MSI-X */ +#define BTINTEL_PCIE_CSR_MSIX_BASE (0x2000) +#define BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0800) +#define BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0804) +#define BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0808) +#define BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK (BTINTEL_PCIE_CSR_MSIX_BASE + 0x080C) +#define BTINTEL_PCIE_CSR_MSIX_AUTOMASK_ST (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0810) +#define BTINTEL_PCIE_CSR_MSIX_AUTOMASK_EN (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0814) +#define BTINTEL_PCIE_CSR_MSIX_IVAR_BASE (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0880) +#define BTINTEL_PCIE_CSR_MSIX_IVAR(cause) (BTINTEL_PCIE_CSR_MSIX_IVAR_BASE + (cause)) + +/* Causes for the FH register interrupts */ +enum msix_fh_int_causes { + BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0 = BIT(0), /* cause 0 */ + BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1 = BIT(1), /* cause 1 */ +}; + +/* Causes for the HW register interrupts */ +enum msix_hw_int_causes { + BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0 = BIT(0), /* cause 32 */ +}; + +#define BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE BIT(7) + +/* Minimum and Maximum number of MSI-X Vector + * Intel Bluetooth PCIe support only 1 vector + */ +#define BTINTEL_PCIE_MSIX_VEC_MAX 1 +#define BTINTEL_PCIE_MSIX_VEC_MIN 1 + +/* Default poll time for MAC access during init */ +#define BTINTEL_DEFAULT_MAC_ACCESS_TIMEOUT_US 200000 + +/* Default interrupt timeout in msec */ +#define BTINTEL_DEFAULT_INTR_TIMEOUT 3000 + +/* The number of descriptors in TX/RX queues */ +#define BTINTEL_DESCS_COUNT 16 + +/* Number of Queue for TX and RX + * It indicates the index of the IA(Index Array) + */ +enum { + BTINTEL_PCIE_TXQ_NUM = 0, + BTINTEL_PCIE_RXQ_NUM = 1, + BTINTEL_PCIE_NUM_QUEUES = 2, +}; + +/* The size of DMA buffer for TX and RX in bytes */ +#define BTINTEL_PCIE_BUFFER_SIZE 4096 + +/* DMA allocation alignment */ +#define BTINTEL_PCIE_DMA_POOL_ALIGNMENT 256 + +#define BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS 500 + +/* Doorbell vector for TFD */ +#define BTINTEL_PCIE_TX_DB_VEC 0 + +/* Number of pending RX requests for downlink */ +#define BTINTEL_PCIE_RX_MAX_QUEUE 6 + +/* Doorbell vector for FRBD */ +#define BTINTEL_PCIE_RX_DB_VEC 513 + +/* RBD buffer size mapping */ +#define BTINTEL_PCIE_RBD_SIZE_4K 0x04 + +/* + * Struct for Context Information (v2) + * + * All members are write-only for host and read-only for device. + * + * @version: Version of context information + * @size: Size of context information + * @config: Config with which host wants peripheral to execute + * Subset of capability register published by device + * @addr_tr_hia: Address of TR Head Index Array + * @addr_tr_tia: Address of TR Tail Index Array + * @addr_cr_hia: Address of CR Head Index Array + * @addr_cr_tia: Address of CR Tail Index Array + * @num_tr_ia: Number of entries in TR Index Arrays + * @num_cr_ia: Number of entries in CR Index Arrays + * @rbd_siz: RBD Size { 0x4=4K } + * @addr_tfdq: Address of TFD Queue(tx) + * @addr_urbdq0: Address of URBD Queue(tx) + * @num_tfdq: Number of TFD in TFD Queue(tx) + * @num_urbdq0: Number of URBD in URBD Queue(tx) + * @tfdq_db_vec: Queue number of TFD + * @urbdq0_db_vec: Queue number of URBD + * @addr_frbdq: Address of FRBD Queue(rx) + * @addr_urbdq1: Address of URBD Queue(rx) + * @num_frbdq: Number of FRBD in FRBD Queue(rx) + * @frbdq_db_vec: Queue number of FRBD + * @num_urbdq1: Number of URBD in URBD Queue(rx) + * @urbdq_db_vec: Queue number of URBDQ1 + * @tr_msi_vec: Transfer Ring MSI-X Vector + * @cr_msi_vec: Completion Ring MSI-X Vector + * @dbgc_addr: DBGC first fragment address + * @dbgc_size: DBGC buffer size + * @early_enable: Enarly debug enable + * @dbg_output_mode: Debug output mode + * Bit[4] DBGC O/P { 0=SRAM, 1=DRAM(not relevant for NPK) } + * Bit[5] DBGC I/P { 0=BDBG, 1=DBGI } + * Bits[6:7] DBGI O/P(relevant if bit[5] = 1) + * 0=BT DBGC, 1=WiFi DBGC, 2=NPK } + * @dbg_preset: Debug preset + * @ext_addr: Address of context information extension + * @ext_size: Size of context information part + * + * Total 38 DWords + */ +struct ctx_info { + u16 version; + u16 size; + u32 config; + u32 reserved_dw02; + u32 reserved_dw03; + u64 addr_tr_hia; + u64 addr_tr_tia; + u64 addr_cr_hia; + u64 addr_cr_tia; + u16 num_tr_ia; + u16 num_cr_ia; + u32 rbd_size:4, + reserved_dw13:28; + u64 addr_tfdq; + u64 addr_urbdq0; + u16 num_tfdq; + u16 num_urbdq0; + u16 tfdq_db_vec; + u16 urbdq0_db_vec; + u64 addr_frbdq; + u64 addr_urbdq1; + u16 num_frbdq; + u16 frbdq_db_vec; + u16 num_urbdq1; + u16 urbdq_db_vec; + u16 tr_msi_vec; + u16 cr_msi_vec; + u32 reserved_dw27; + u64 dbgc_addr; + u32 dbgc_size; + u32 early_enable:1, + reserved_dw31:3, + dbg_output_mode:4, + dbg_preset:8, + reserved2_dw31:16; + u64 ext_addr; + u32 ext_size; + u32 test_param; + u32 reserved_dw36; + u32 reserved_dw37; +} __packed; + +/* Transfer Descriptor for TX + * @type: Not in use. Set to 0x0 + * @size: Size of data in the buffer + * @addr: DMA Address of buffer + */ +struct tfd { + u8 type; + u16 size; + u8 reserved; + u64 addr; + u32 reserved1; +} __packed; + +/* URB Descriptor for TX + * @tfd_index: Index of TFD in TFDQ + 1 + * @num_txq: Queue index of TFD Queue + * @cmpl_count: Completion count. Always 0x01 + * @immediate_cmpl: Immediate completion flag: Always 0x01 + */ +struct urbd0 { + u32 tfd_index:16, + num_txq:8, + cmpl_count:4, + reserved:3, + immediate_cmpl:1; +} __packed; + +/* FRB Descriptor for RX + * @tag: RX buffer tag (index of RX buffer queue) + * @addr: Address of buffer + */ +struct frbd { + u32 tag:16, + reserved:16; + u32 reserved2; + u64 addr; +} __packed; + +/* URB Descriptor for RX + * @frbd_tag: Tag from FRBD + * @status: Status + */ +struct urbd1 { + u32 frbd_tag:16, + status:1, + reserved:14, + fixed:1; +} __packed; + +/* RFH header in RX packet + * @packet_len: Length of the data in the buffer + * @rxq: RX Queue number + * @cmd_id: Command ID. Not in Use + */ +struct rfh_hdr { + u64 packet_len:16, + rxq:6, + reserved:10, + cmd_id:16, + reserved1:16; +} __packed; + +/* Internal data buffer + * @data: pointer to the data buffer + * @p_addr: physical address of data buffer + */ +struct data_buf { + u8 *data; + dma_addr_t data_p_addr; +}; + +/* Index Array */ +struct ia { + dma_addr_t tr_hia_p_addr; + u16 *tr_hia; + dma_addr_t tr_tia_p_addr; + u16 *tr_tia; + dma_addr_t cr_hia_p_addr; + u16 *cr_hia; + dma_addr_t cr_tia_p_addr; + u16 *cr_tia; +}; + +/* Structure for TX Queue + * @count: Number of descriptors + * @tfds: Array of TFD + * @urbd0s: Array of URBD0 + * @buf: Array of data_buf structure + */ +struct txq { + u16 count; + + dma_addr_t tfds_p_addr; + struct tfd *tfds; + + dma_addr_t urbd0s_p_addr; + struct urbd0 *urbd0s; + + dma_addr_t buf_p_addr; + void *buf_v_addr; + struct data_buf *bufs; +}; + +/* Structure for RX Queue + * @count: Number of descriptors + * @frbds: Array of FRBD + * @urbd1s: Array of URBD1 + * @buf: Array of data_buf structure + */ +struct rxq { + u16 count; + + dma_addr_t frbds_p_addr; + struct frbd *frbds; + + dma_addr_t urbd1s_p_addr; + struct urbd1 *urbd1s; + + dma_addr_t buf_p_addr; + void *buf_v_addr; + struct data_buf *bufs; +}; + +/* struct btintel_pcie_data + * @pdev: pci device + * @hdev: hdev device + * @flags: driver state + * @irq_lock: spinlock for MSI-X + * @hci_rx_lock: spinlock for HCI RX flow + * @base_addr: pci base address (from BAR) + * @msix_entries: array of MSI-X entries + * @msix_enabled: true if MSI-X is enabled; + * @alloc_vecs: number of interrupt vectors allocated + * @def_irq: default irq for all causes + * @fh_init_mask: initial unmasked rxq causes + * @hw_init_mask: initial unmaksed hw causes + * @boot_stage_cache: cached value of boot stage register + * @img_resp_cache: cached value of image response register + * @cnvi: CNVi register value + * @cnvr: CNVr register value + * @gp0_received: condition for gp0 interrupt + * @gp0_wait_q: wait_q for gp0 interrupt + * @tx_wait_done: condition for tx interrupt + * @tx_wait_q: wait_q for tx interrupt + * @workqueue: workqueue for RX work + * @rx_skb_q: SKB queue for RX packet + * @rx_work: RX work struct to process the RX packet in @rx_skb_q + * @dma_pool: DMA pool for descriptors, index array and ci + * @dma_p_addr: DMA address for pool + * @dma_v_addr: address of pool + * @ci_p_addr: DMA address for CI struct + * @ci: CI struct + * @ia: Index Array struct + * @txq: TX Queue struct + * @rxq: RX Queue struct + */ +struct btintel_pcie_data { + struct pci_dev *pdev; + struct hci_dev *hdev; + + unsigned long flags; + /* lock used in MSI-X interrupt */ + spinlock_t irq_lock; + /* lock to serialize rx events */ + spinlock_t hci_rx_lock; + + void __iomem *base_addr; + + struct msix_entry msix_entries[BTINTEL_PCIE_MSIX_VEC_MAX]; + bool msix_enabled; + u32 alloc_vecs; + u32 def_irq; + + u32 fh_init_mask; + u32 hw_init_mask; + + u32 boot_stage_cache; + u32 img_resp_cache; + + u32 cnvi; + u32 cnvr; + + bool gp0_received; + wait_queue_head_t gp0_wait_q; + + bool tx_wait_done; + wait_queue_head_t tx_wait_q; + + struct workqueue_struct *workqueue; + struct sk_buff_head rx_skb_q; + struct work_struct rx_work; + + struct dma_pool *dma_pool; + dma_addr_t dma_p_addr; + void *dma_v_addr; + + dma_addr_t ci_p_addr; + struct ctx_info *ci; + struct ia ia; + struct txq txq; + struct rxq rxq; +}; + +static inline u32 btintel_pcie_rd_reg32(struct btintel_pcie_data *data, + u32 offset) +{ + return ioread32(data->base_addr + offset); +} + +static inline void btintel_pcie_wr_reg8(struct btintel_pcie_data *data, + u32 offset, u8 val) +{ + iowrite8(val, data->base_addr + offset); +} + +static inline void btintel_pcie_wr_reg32(struct btintel_pcie_data *data, + u32 offset, u32 val) +{ + iowrite32(val, data->base_addr + offset); +} + +static inline void btintel_pcie_set_reg_bits(struct btintel_pcie_data *data, + u32 offset, u32 bits) +{ + u32 r; + + r = ioread32(data->base_addr + offset); + r |= bits; + iowrite32(r, data->base_addr + offset); +} + +static inline void btintel_pcie_clr_reg_bits(struct btintel_pcie_data *data, + u32 offset, u32 bits) +{ + u32 r; + + r = ioread32(data->base_addr + offset); + r &= ~bits; + iowrite32(r, data->base_addr + offset); +} diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index d76c799553..85b7f2bb42 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -1736,7 +1736,6 @@ static struct sdio_driver bt_mrvl_sdio = { .probe = btmrvl_sdio_probe, .remove = btmrvl_sdio_remove, .drv = { - .owner = THIS_MODULE, .coredump = btmrvl_sdio_coredump, .pm = &btmrvl_sdio_pm_ops, } diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index ff4868c83c..8ded9ef808 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -1519,7 +1519,6 @@ static struct sdio_driver btmtksdio_driver = { .remove = btmtksdio_remove, .id_table = btmtksdio_table, .drv = { - .owner = THIS_MODULE, .pm = BTMTKSDIO_PM_OPS, } }; diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index 9bfa9a6ad5..6a863328b8 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -187,6 +187,11 @@ struct btnxpuart_dev { #define NXP_NAK_V3 0x7b #define NXP_CRC_ERROR_V3 0x7c +/* Bootloader signature error codes */ +#define NXP_ACK_RX_TIMEOUT 0x0002 /* ACK not received from host */ +#define NXP_HDR_RX_TIMEOUT 0x0003 /* FW Header chunk not received */ +#define NXP_DATA_RX_TIMEOUT 0x0004 /* FW Data chunk not received */ + #define HDR_LEN 16 #define NXP_RECV_CHIP_VER_V1 \ @@ -277,6 +282,17 @@ struct nxp_bootloader_cmd { __be32 crc; } __packed; +struct nxp_v3_rx_timeout_nak { + u8 nak; + __le32 offset; + u8 crc; +} __packed; + +union nxp_v3_rx_timeout_nak_u { + struct nxp_v3_rx_timeout_nak pkt; + u8 buf[6]; +}; + static u8 crc8_table[CRC8_TABLE_SIZE]; /* Default configurations */ @@ -899,6 +915,32 @@ free_skb: return 0; } +static void nxp_handle_fw_download_error(struct hci_dev *hdev, struct v3_data_req *req) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + __u32 offset = __le32_to_cpu(req->offset); + __u16 err = __le16_to_cpu(req->error); + union nxp_v3_rx_timeout_nak_u nak_tx_buf; + + switch (err) { + case NXP_ACK_RX_TIMEOUT: + case NXP_HDR_RX_TIMEOUT: + case NXP_DATA_RX_TIMEOUT: + nak_tx_buf.pkt.nak = NXP_NAK_V3; + nak_tx_buf.pkt.offset = __cpu_to_le32(offset); + nak_tx_buf.pkt.crc = crc8(crc8_table, nak_tx_buf.buf, + sizeof(nak_tx_buf) - 1, 0xff); + serdev_device_write_buf(nxpdev->serdev, nak_tx_buf.buf, + sizeof(nak_tx_buf)); + break; + default: + bt_dev_dbg(hdev, "Unknown bootloader error code: %d", err); + break; + + } + +} + static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); @@ -913,7 +955,12 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) if (!req || !nxpdev->fw) goto free_skb; - nxp_send_ack(NXP_ACK_V3, hdev); + if (!req->error) { + nxp_send_ack(NXP_ACK_V3, hdev); + } else { + nxp_handle_fw_download_error(hdev, req); + goto free_skb; + } len = __le16_to_cpu(req->len); @@ -940,9 +987,6 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q); goto free_skb; } - if (req->error) - bt_dev_dbg(hdev, "FW Download received err 0x%02x from chip", - req->error); offset = __le32_to_cpu(req->offset); if (offset < nxpdev->fw_v3_offset_correction) { diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 35fb26cbf2..dfbbac9224 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -13,8 +13,6 @@ #include "btqca.h" -#define VERSION "0.1" - int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver, enum qca_btsoc_type soc_type) { @@ -55,11 +53,6 @@ int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver, } edl = (struct edl_event_hdr *)(skb->data); - if (!edl) { - bt_dev_err(hdev, "QCA TLV with no header"); - err = -EILSEQ; - goto out; - } if (edl->cresp != EDL_CMD_REQ_RES_EVT || edl->rtype != rtype) { @@ -121,11 +114,6 @@ static int qca_read_fw_build_info(struct hci_dev *hdev) } edl = (struct edl_event_hdr *)(skb->data); - if (!edl) { - bt_dev_err(hdev, "QCA read fw build info with no header"); - err = -EILSEQ; - goto out; - } if (edl->cresp != EDL_CMD_REQ_RES_EVT || edl->rtype != EDL_GET_BUILD_INFO_CMD) { @@ -185,11 +173,6 @@ static int qca_send_patch_config_cmd(struct hci_dev *hdev) } edl = (struct edl_event_hdr *)(skb->data); - if (!edl) { - bt_dev_err(hdev, "QCA Patch config with no header"); - err = -EILSEQ; - goto out; - } if (edl->cresp != EDL_PATCH_CONFIG_RES_EVT || edl->rtype != EDL_PATCH_CONFIG_CMD) { bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp, @@ -504,11 +487,6 @@ static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size, } edl = (struct edl_event_hdr *)(skb->data); - if (!edl) { - bt_dev_err(hdev, "TLV with no header"); - err = -EILSEQ; - goto out; - } if (edl->cresp != EDL_CMD_REQ_RES_EVT || edl->rtype != rtype) { bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x", @@ -739,6 +717,19 @@ static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size, snprintf(fwname, max_size, "qca/hpnv%02x%s.%x", rom_ver, variant, bid); } +static inline void qca_get_nvm_name_generic(struct qca_fw_config *cfg, + const char *stem, u8 rom_ver, u16 bid) +{ + if (bid == 0x0) + snprintf(cfg->fwname, sizeof(cfg->fwname), "qca/%snv%02x.bin", stem, rom_ver); + else if (bid & 0xff00) + snprintf(cfg->fwname, sizeof(cfg->fwname), + "qca/%snv%02x.b%x", stem, rom_ver, bid); + else + snprintf(cfg->fwname, sizeof(cfg->fwname), + "qca/%snv%02x.b%02x", stem, rom_ver, bid); +} + int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, enum qca_btsoc_type soc_type, struct qca_btsoc_version ver, const char *firmware_name) @@ -819,7 +810,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, /* Give the controller some time to get ready to receive the NVM */ msleep(10); - if (soc_type == QCA_QCA2066) + if (soc_type == QCA_QCA2066 || soc_type == QCA_WCN7850) qca_read_fw_board_id(hdev, &boardid); /* Download NVM configuration */ @@ -861,8 +852,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, "qca/hpnv%02x.bin", rom_ver); break; case QCA_WCN7850: - snprintf(config.fwname, sizeof(config.fwname), - "qca/hmtnv%02x.bin", rom_ver); + qca_get_nvm_name_generic(&config, "hmt", rom_ver, boardid); break; default: @@ -963,6 +953,5 @@ EXPORT_SYMBOL_GPL(qca_set_bdaddr); MODULE_AUTHOR("Ben Young Tae Kim "); -MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION); -MODULE_VERSION(VERSION); +MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family"); MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index 215433fd76..bb5207d7a8 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -5,33 +5,33 @@ * Copyright (c) 2015 The Linux Foundation. All rights reserved. */ -#define EDL_PATCH_CMD_OPCODE (0xFC00) -#define EDL_NVM_ACCESS_OPCODE (0xFC0B) -#define EDL_WRITE_BD_ADDR_OPCODE (0xFC14) -#define EDL_PATCH_CMD_LEN (1) -#define EDL_PATCH_VER_REQ_CMD (0x19) -#define EDL_PATCH_TLV_REQ_CMD (0x1E) -#define EDL_GET_BUILD_INFO_CMD (0x20) -#define EDL_GET_BID_REQ_CMD (0x23) -#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) -#define EDL_PATCH_CONFIG_CMD (0x28) -#define MAX_SIZE_PER_TLV_SEGMENT (243) -#define QCA_PRE_SHUTDOWN_CMD (0xFC08) -#define QCA_DISABLE_LOGGING (0xFC17) - -#define EDL_CMD_REQ_RES_EVT (0x00) -#define EDL_PATCH_VER_RES_EVT (0x19) -#define EDL_APP_VER_RES_EVT (0x02) -#define EDL_TVL_DNLD_RES_EVT (0x04) -#define EDL_CMD_EXE_STATUS_EVT (0x00) -#define EDL_SET_BAUDRATE_RSP_EVT (0x92) -#define EDL_NVM_ACCESS_CODE_EVT (0x0B) -#define EDL_PATCH_CONFIG_RES_EVT (0x00) -#define QCA_DISABLE_LOGGING_SUB_OP (0x14) +#define EDL_PATCH_CMD_OPCODE 0xFC00 +#define EDL_NVM_ACCESS_OPCODE 0xFC0B +#define EDL_WRITE_BD_ADDR_OPCODE 0xFC14 +#define EDL_PATCH_CMD_LEN 1 +#define EDL_PATCH_VER_REQ_CMD 0x19 +#define EDL_PATCH_TLV_REQ_CMD 0x1E +#define EDL_GET_BUILD_INFO_CMD 0x20 +#define EDL_GET_BID_REQ_CMD 0x23 +#define EDL_NVM_ACCESS_SET_REQ_CMD 0x01 +#define EDL_PATCH_CONFIG_CMD 0x28 +#define MAX_SIZE_PER_TLV_SEGMENT 243 +#define QCA_PRE_SHUTDOWN_CMD 0xFC08 +#define QCA_DISABLE_LOGGING 0xFC17 + +#define EDL_CMD_REQ_RES_EVT 0x00 +#define EDL_PATCH_VER_RES_EVT 0x19 +#define EDL_APP_VER_RES_EVT 0x02 +#define EDL_TVL_DNLD_RES_EVT 0x04 +#define EDL_CMD_EXE_STATUS_EVT 0x00 +#define EDL_SET_BAUDRATE_RSP_EVT 0x92 +#define EDL_NVM_ACCESS_CODE_EVT 0x0B +#define EDL_PATCH_CONFIG_RES_EVT 0x00 +#define QCA_DISABLE_LOGGING_SUB_OP 0x14 #define EDL_TAG_ID_BD_ADDR 2 -#define EDL_TAG_ID_HCI (17) -#define EDL_TAG_ID_DEEP_SLEEP (27) +#define EDL_TAG_ID_HCI 17 +#define EDL_TAG_ID_DEEP_SLEEP 27 #define QCA_WCN3990_POWERON_PULSE 0xFC #define QCA_WCN3990_POWEROFF_PULSE 0xC0 @@ -39,7 +39,7 @@ #define QCA_HCI_CC_OPCODE 0xFC00 #define QCA_HCI_CC_SUCCESS 0x00 -#define QCA_WCN3991_SOC_ID (0x40014320) +#define QCA_WCN3991_SOC_ID 0x40014320 /* QCA chipset version can be decided by patch and SoC * version, combination with upper 2 bytes from SoC @@ -48,11 +48,11 @@ #define get_soc_ver(soc_id, rom_ver) \ ((le32_to_cpu(soc_id) << 16) | (le16_to_cpu(rom_ver))) -#define QCA_HSP_GF_SOC_ID 0x1200 -#define QCA_HSP_GF_SOC_MASK 0x0000ff00 +#define QCA_HSP_GF_SOC_ID 0x1200 +#define QCA_HSP_GF_SOC_MASK 0x0000ff00 enum qca_baudrate { - QCA_BAUDRATE_115200 = 0, + QCA_BAUDRATE_115200 = 0, QCA_BAUDRATE_57600, QCA_BAUDRATE_38400, QCA_BAUDRATE_19200, @@ -71,7 +71,7 @@ enum qca_baudrate { QCA_BAUDRATE_1600000, QCA_BAUDRATE_3200000, QCA_BAUDRATE_3500000, - QCA_BAUDRATE_AUTO = 0xFE, + QCA_BAUDRATE_AUTO = 0xFE, QCA_BAUDRATE_RESERVED }; diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c index 11c7e04bf3..88dbb2f3fa 100644 --- a/drivers/bluetooth/btqcomsmd.c +++ b/drivers/bluetooth/btqcomsmd.c @@ -197,7 +197,7 @@ destroy_acl_channel: return ret; } -static int btqcomsmd_remove(struct platform_device *pdev) +static void btqcomsmd_remove(struct platform_device *pdev) { struct btqcomsmd *btq = platform_get_drvdata(pdev); @@ -206,8 +206,6 @@ static int btqcomsmd_remove(struct platform_device *pdev) rpmsg_destroy_ept(btq->cmd_channel); rpmsg_destroy_ept(btq->acl_channel); - - return 0; } static const struct of_device_id btqcomsmd_of_match[] = { @@ -218,7 +216,7 @@ MODULE_DEVICE_TABLE(of, btqcomsmd_of_match); static struct platform_driver btqcomsmd_driver = { .probe = btqcomsmd_probe, - .remove = btqcomsmd_remove, + .remove_new = btqcomsmd_remove, .driver = { .name = "btqcomsmd", .of_match_table = btqcomsmd_of_match, diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index cc50de69e8..4f1e37b4f7 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -1339,6 +1339,13 @@ int btrtl_setup_realtek(struct hci_dev *hdev) btrtl_set_quirks(hdev, btrtl_dev); + hci_set_hw_info(hdev, + "RTL lmp_subver=%u hci_rev=%u hci_ver=%u hci_bus=%u", + btrtl_dev->ic_info->lmp_subver, + btrtl_dev->ic_info->hci_rev, + btrtl_dev->ic_info->hci_ver, + btrtl_dev->ic_info->hci_bus); + btrtl_free(btrtl_dev); return ret; } diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index fb716849b6..789c492df6 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -477,6 +477,7 @@ static const struct usb_device_id quirks_table[] = { { USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x0035), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x0036), .driver_info = BTUSB_INTEL_COMBINED }, + { USB_DEVICE(0x8087, 0x0037), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x0038), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED | @@ -554,6 +555,10 @@ static const struct usb_device_id quirks_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3572), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, /* Realtek 8852BT/8852BE-VT Bluetooth devices */ { USB_DEVICE(0x0bda, 0x8520), .driver_info = BTUSB_REALTEK | @@ -588,6 +593,9 @@ static const struct usb_device_id quirks_table[] = { { USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, @@ -597,6 +605,9 @@ static const struct usb_device_id quirks_table[] = { { USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x13d3, 0x3563), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, @@ -612,10 +623,12 @@ static const struct usb_device_id quirks_table[] = { { USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, - { USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK | + { USB_DEVICE(0x13d3, 0x3606), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, - { USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK | + + /* MediaTek MT7922 Bluetooth devices */ + { USB_DEVICE(0x13d3, 0x3585), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, @@ -626,12 +639,6 @@ static const struct usb_device_id quirks_table[] = { { USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, - { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, - { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0e2), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, @@ -656,14 +663,38 @@ static const struct usb_device_id quirks_table[] = { { USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3605), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3607), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3614), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3615), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x35f5, 0x7922), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, /* Additional MediaTek MT7925 Bluetooth devices */ + { USB_DEVICE(0x0489, 0xe113), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3603), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, /* Additional Realtek 8723AE Bluetooth devices */ { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK }, @@ -2951,7 +2982,7 @@ static int btusb_mtk_uhw_reg_read(struct btusb_data *data, u32 reg, u32 *val) err = usb_control_msg(data->udev, pipe, 0x01, 0xDE, reg >> 16, reg & 0xffff, - buf, 4, USB_CTRL_SET_TIMEOUT); + buf, 4, USB_CTRL_GET_TIMEOUT); if (err < 0) { bt_dev_err(hdev, "Failed to read uhw reg(%d)", err); goto err_free_buf; @@ -2979,7 +3010,7 @@ static int btusb_mtk_reg_read(struct btusb_data *data, u32 reg, u32 *val) err = usb_control_msg(data->udev, pipe, 0x63, USB_TYPE_VENDOR | USB_DIR_IN, reg >> 16, reg & 0xffff, - buf, size, USB_CTRL_SET_TIMEOUT); + buf, size, USB_CTRL_GET_TIMEOUT); if (err < 0) goto err_free_buf; @@ -3694,7 +3725,7 @@ static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request, */ pipe = usb_rcvctrlpipe(udev, 0); err = usb_control_msg(udev, pipe, request, USB_TYPE_VENDOR | USB_DIR_IN, - 0, 0, buf, size, USB_CTRL_SET_TIMEOUT); + 0, 0, buf, size, USB_CTRL_GET_TIMEOUT); if (err < 0) { dev_err(&udev->dev, "Failed to access otp area (%d)", err); goto done; diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 874d23089b..89d4c22245 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -1293,7 +1293,7 @@ static int bcm_probe(struct platform_device *pdev) return 0; } -static int bcm_remove(struct platform_device *pdev) +static void bcm_remove(struct platform_device *pdev) { struct bcm_device *dev = platform_get_drvdata(pdev); @@ -1302,8 +1302,6 @@ static int bcm_remove(struct platform_device *pdev) mutex_unlock(&bcm_device_lock); dev_info(&pdev->dev, "%s device unregistered.\n", dev->name); - - return 0; } static const struct hci_uart_proto bcm_proto = { @@ -1487,7 +1485,7 @@ static const struct acpi_device_id bcm_acpi_match[] = { { "BCM2EA1" }, { "BCM2EA2", (long)&bcm43430_device_data }, { "BCM2EA3", (long)&bcm43430_device_data }, - { "BCM2EA4" }, + { "BCM2EA4", (long)&bcm43430_device_data }, /* bcm43455 */ { "BCM2EA5" }, { "BCM2EA6" }, { "BCM2EA7" }, @@ -1509,7 +1507,7 @@ static const struct dev_pm_ops bcm_pm_ops = { static struct platform_driver bcm_driver = { .probe = bcm_probe, - .remove = bcm_remove, + .remove_new = bcm_remove, .driver = { .name = "hci_bcm", .acpi_match_table = ACPI_PTR(bcm_acpi_match), diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c index d90858ea2f..a77a30fdc6 100644 --- a/drivers/bluetooth/hci_bcm4377.c +++ b/drivers/bluetooth/hci_bcm4377.c @@ -32,7 +32,7 @@ enum bcm4377_chip { #define BCM4378_DEVICE_ID 0x5f69 #define BCM4387_DEVICE_ID 0x5f71 -#define BCM4377_TIMEOUT 1000 +#define BCM4377_TIMEOUT msecs_to_jiffies(1000) /* * These devices only support DMA transactions inside a 32bit window diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 78afb9a348..999ccd5bb4 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -537,7 +537,7 @@ static int intel_setup(struct hci_uart *hu) int speed_change = 0; int err; - bt_dev_dbg(hdev, "start intel_setup"); + bt_dev_dbg(hdev, ""); hu->hdev->set_diag = btintel_set_diag; hu->hdev->set_bdaddr = btintel_set_bdaddr; @@ -591,12 +591,12 @@ static int intel_setup(struct hci_uart *hu) return -EINVAL; } - /* Check for supported iBT hardware variants of this firmware - * loading method. - * - * This check has been put in place to ensure correct forward - * compatibility options when newer hardware variants come along. - */ + /* Check for supported iBT hardware variants of this firmware + * loading method. + * + * This check has been put in place to ensure correct forward + * compatibility options when newer hardware variants come along. + */ switch (ver.hw_variant) { case 0x0b: /* LnP */ case 0x0c: /* WsP */ @@ -777,7 +777,7 @@ static int intel_setup(struct hci_uart *hu) rettime = ktime_get(); delta = ktime_sub(rettime, calltime); - duration = (unsigned long long) ktime_to_ns(delta) >> 10; + duration = (unsigned long long)ktime_to_ns(delta) >> 10; bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration); @@ -822,7 +822,7 @@ done: rettime = ktime_get(); delta = ktime_sub(rettime, calltime); - duration = (unsigned long long) ktime_to_ns(delta) >> 10; + duration = (unsigned long long)ktime_to_ns(delta) >> 10; bt_dev_info(hdev, "Device booted in %llu usecs", duration); @@ -977,6 +977,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count) ARRAY_SIZE(intel_recv_pkts)); if (IS_ERR(intel->rx_skb)) { int err = PTR_ERR(intel->rx_skb); + bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); intel->rx_skb = NULL; return err; @@ -1190,7 +1191,7 @@ no_irq: return 0; } -static int intel_remove(struct platform_device *pdev) +static void intel_remove(struct platform_device *pdev) { struct intel_device *idev = platform_get_drvdata(pdev); @@ -1201,13 +1202,11 @@ static int intel_remove(struct platform_device *pdev) mutex_unlock(&intel_device_list_lock); dev_info(&pdev->dev, "unregistered.\n"); - - return 0; } static struct platform_driver intel_driver = { .probe = intel_probe, - .remove = intel_remove, + .remove_new = intel_remove, .driver = { .name = "hci_intel", .acpi_match_table = ACPI_PTR(intel_acpi_match), diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c index 18208e152a..40bd83825c 100644 --- a/drivers/bluetooth/virtio_bt.c +++ b/drivers/bluetooth/virtio_bt.c @@ -415,7 +415,6 @@ static const unsigned int virtbt_features[] = { static struct virtio_driver virtbt_driver = { .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .feature_table = virtbt_features, .feature_table_size = ARRAY_SIZE(virtbt_features), .id_table = virtbt_table, diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index d5e7fa9173..64cd2ee03a 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -163,6 +163,16 @@ config QCOM_SSC_BLOCK_BUS i2c/spi/uart controllers, a hexagon core, and a clock controller which provides clocks for the above. +config STM32_FIREWALL + bool "STM32 Firewall framework" + depends on (ARCH_STM32 || COMPILE_TEST) && OF + select OF_DYNAMIC + help + Say y to enable STM32 firewall framework and its services. Firewall + controllers will be able to register to the framework. Access for + hardware resources linked to a firewall controller can be requested + through this STM32 framework. + config SUN50I_DE2_BUS bool "Allwinner A64 DE2 Bus Driver" default ARM64 diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index d90eed189a..cddd4984d6 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o obj-$(CONFIG_QCOM_SSC_BLOCK_BUS) += qcom-ssc-block-bus.o +obj-$(CONFIG_STM32_FIREWALL) += stm32_firewall.o stm32_rifsc.o stm32_etzpc.o obj-$(CONFIG_SUN50I_DE2_BUS) += sun50i-de2.o obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o obj-$(CONFIG_OF) += simple-pm-bus.o diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c index 65ae758f31..ee29162da4 100644 --- a/drivers/bus/brcmstb_gisb.c +++ b/drivers/bus/brcmstb_gisb.c @@ -410,6 +410,7 @@ static const struct of_device_id brcmstb_gisb_arb_of_match[] = { { .compatible = "brcm,bcm74165-gisb-arb", .data = gisb_offsets_bcm74165 }, { }, }; +MODULE_DEVICE_TABLE(of, brcmstb_gisb_arb_of_match); static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev) { diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index f8f674adf1..4acfac73ca 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -90,7 +90,7 @@ static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct m struct mhi_ring_element *event; int ret; - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); if (!event) return -ENOMEM; @@ -109,7 +109,7 @@ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_stat struct mhi_ring_element *event; int ret; - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); if (!event) return -ENOMEM; @@ -127,7 +127,7 @@ int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_e struct mhi_ring_element *event; int ret; - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); if (!event) return -ENOMEM; @@ -146,7 +146,7 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e struct mhi_ring_element *event; int ret; - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); if (!event) return -ENOMEM; @@ -438,7 +438,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; write_offset = len - buf_left; - buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA); + buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL); if (!buf_addr) return -ENOMEM; @@ -1481,14 +1481,14 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el", sizeof(struct mhi_ring_element), 0, - SLAB_CACHE_DMA, NULL); + 0, NULL); if (!mhi_cntrl->ev_ring_el_cache) { ret = -ENOMEM; goto err_free_cmd; } mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0, - SLAB_CACHE_DMA, NULL); + 0, NULL); if (!mhi_cntrl->tre_buf_cache) { ret = -ENOMEM; goto err_destroy_ev_ring_el_cache; diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index 44f934981d..173f799187 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -127,6 +127,30 @@ static ssize_t soc_reset_store(struct device *dev, } static DEVICE_ATTR_WO(soc_reset); +static ssize_t trigger_edl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + unsigned long val; + int ret; + + ret = kstrtoul(buf, 10, &val); + if (ret < 0) + return ret; + + if (!val) + return -EINVAL; + + ret = mhi_cntrl->edl_trigger(mhi_cntrl); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR_WO(trigger_edl); + static struct attribute *mhi_dev_attrs[] = { &dev_attr_serial_number.attr, &dev_attr_oem_pk_hash.attr, @@ -517,11 +541,9 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) dev_dbg(dev, "Initializing MHI registers\n"); /* Read channel db offset */ - ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val); - if (ret) { - dev_err(dev, "Unable to read CHDBOFF register\n"); - return -EIO; - } + ret = mhi_get_channel_doorbell_offset(mhi_cntrl, &val); + if (ret) + return ret; if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) { dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n", @@ -1018,6 +1040,12 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, if (ret) goto err_release_dev; + if (mhi_cntrl->edl_trigger) { + ret = sysfs_create_file(&mhi_dev->dev.kobj, &dev_attr_trigger_edl.attr); + if (ret) + goto err_release_dev; + } + mhi_cntrl->mhi_dev = mhi_dev; mhi_create_debugfs(mhi_cntrl); @@ -1051,6 +1079,9 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) mhi_deinit_free_irq(mhi_cntrl); mhi_destroy_debugfs(mhi_cntrl); + if (mhi_cntrl->edl_trigger) + sysfs_remove_file(&mhi_dev->dev.kobj, &dev_attr_trigger_edl.attr); + destroy_workqueue(mhi_cntrl->hiprio_wq); kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_event); diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h index 5fe49311b8..aaad40a07f 100644 --- a/drivers/bus/mhi/host/internal.h +++ b/drivers/bus/mhi/host/internal.h @@ -80,6 +80,7 @@ enum dev_st_transition { DEV_ST_TRANSITION_FP, DEV_ST_TRANSITION_SYS_ERR, DEV_ST_TRANSITION_DISABLE, + DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE, DEV_ST_TRANSITION_MAX, }; @@ -90,7 +91,8 @@ enum dev_st_transition { dev_st_trans(MISSION_MODE, "MISSION MODE") \ dev_st_trans(FP, "FLASH PROGRAMMER") \ dev_st_trans(SYS_ERR, "SYS ERROR") \ - dev_st_trans_end(DISABLE, "DISABLE") + dev_st_trans(DISABLE, "DISABLE") \ + dev_st_trans_end(DISABLE_DESTROY_DEVICE, "DISABLE (DESTROY DEVICE)") extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX]; #define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \ diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index 15d657af9b..4de75674f1 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -1691,3 +1691,19 @@ void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) } } EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer); + +int mhi_get_channel_doorbell_offset(struct mhi_controller *mhi_cntrl, u32 *chdb_offset) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + void __iomem *base = mhi_cntrl->regs; + int ret; + + ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, chdb_offset); + if (ret) { + dev_err(dev, "Unable to read CHDBOFF register\n"); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_get_channel_doorbell_offset); diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index 51639bfcfe..08844ee796 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -27,12 +27,16 @@ #define PCI_VENDOR_ID_THALES 0x1269 #define PCI_VENDOR_ID_QUECTEL 0x1eac +#define MHI_EDL_DB 91 +#define MHI_EDL_COOKIE 0xEDEDEDED + /** * struct mhi_pci_dev_info - MHI PCI device specific information * @config: MHI controller configuration * @name: name of the PCI module * @fw: firmware path (if any) * @edl: emergency download mode firmware path (if any) + * @edl_trigger: capable of triggering EDL mode in the device (if supported) * @bar_num: PCI base address register to use for MHI MMIO register space * @dma_data_width: DMA transfer word size (32 or 64 bits) * @mru_default: default MRU size for MBIM network packets @@ -44,6 +48,7 @@ struct mhi_pci_dev_info { const char *name; const char *fw; const char *edl; + bool edl_trigger; unsigned int bar_num; unsigned int dma_data_width; unsigned int mru_default; @@ -292,6 +297,7 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = { .name = "qcom-sdx75m", .fw = "qcom/sdx75m/xbl.elf", .edl = "qcom/sdx75m/edl.mbn", + .edl_trigger = true, .config = &modem_qcom_v2_mhiv_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -302,6 +308,7 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = { .name = "qcom-sdx65m", .fw = "qcom/sdx65m/xbl.elf", .edl = "qcom/sdx65m/edl.mbn", + .edl_trigger = true, .config = &modem_qcom_v1_mhiv_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -312,6 +319,7 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = { .name = "qcom-sdx55m", .fw = "qcom/sdx55m/sbl1.mbn", .edl = "qcom/sdx55m/edl.mbn", + .edl_trigger = true, .config = &modem_qcom_v1_mhiv_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -928,6 +936,40 @@ static void health_check(struct timer_list *t) mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); } +static int mhi_pci_generic_edl_trigger(struct mhi_controller *mhi_cntrl) +{ + void __iomem *base = mhi_cntrl->regs; + void __iomem *edl_db; + int ret; + u32 val; + + ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); + if (ret) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to wakeup the device\n"); + return ret; + } + + pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); + mhi_cntrl->runtime_get(mhi_cntrl); + + ret = mhi_get_channel_doorbell_offset(mhi_cntrl, &val); + if (ret) + goto err_get_chdb; + + edl_db = base + val + (8 * MHI_EDL_DB); + + mhi_cntrl->write_reg(mhi_cntrl, edl_db + 4, upper_32_bits(MHI_EDL_COOKIE)); + mhi_cntrl->write_reg(mhi_cntrl, edl_db, lower_32_bits(MHI_EDL_COOKIE)); + + mhi_soc_reset(mhi_cntrl); + +err_get_chdb: + mhi_cntrl->runtime_put(mhi_cntrl); + mhi_device_put(mhi_cntrl->mhi_dev); + + return ret; +} + static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data; @@ -962,6 +1004,9 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mhi_cntrl->runtime_put = mhi_pci_runtime_put; mhi_cntrl->mru = info->mru_default; + if (info->edl_trigger) + mhi_cntrl->edl_trigger = mhi_pci_generic_edl_trigger; + if (info->sideband_wake) { mhi_cntrl->wake_get = mhi_pci_wake_get_nop; mhi_cntrl->wake_put = mhi_pci_wake_put_nop; diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c index 8b40d3f01a..11c0e751f2 100644 --- a/drivers/bus/mhi/host/pm.c +++ b/drivers/bus/mhi/host/pm.c @@ -468,7 +468,8 @@ error_mission_mode: } /* Handle shutdown transitions */ -static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) +static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, + bool destroy_device) { enum mhi_pm_state cur_state; struct mhi_event *mhi_event; @@ -530,8 +531,16 @@ skip_mhi_reset: dev_dbg(dev, "Waiting for all pending threads to complete\n"); wake_up_all(&mhi_cntrl->state_event); - dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); - device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); + /* + * Only destroy the 'struct device' for channels if indicated by the + * 'destroy_device' flag. Because, during system suspend or hibernation + * state, there is no need to destroy the 'struct device' as the endpoint + * device would still be physically attached to the machine. + */ + if (destroy_device) { + dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); + } mutex_lock(&mhi_cntrl->pm_mutex); @@ -821,7 +830,10 @@ void mhi_pm_st_worker(struct work_struct *work) mhi_pm_sys_error_transition(mhi_cntrl); break; case DEV_ST_TRANSITION_DISABLE: - mhi_pm_disable_transition(mhi_cntrl); + mhi_pm_disable_transition(mhi_cntrl, false); + break; + case DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE: + mhi_pm_disable_transition(mhi_cntrl, true); break; default: break; @@ -1175,7 +1187,8 @@ error_exit: } EXPORT_SYMBOL_GPL(mhi_async_power_up); -void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) +static void __mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful, + bool destroy_device) { enum mhi_pm_state cur_state, transition_state; struct device *dev = &mhi_cntrl->mhi_dev->dev; @@ -1211,15 +1224,32 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) write_unlock_irq(&mhi_cntrl->pm_lock); mutex_unlock(&mhi_cntrl->pm_mutex); - mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE); + if (destroy_device) + mhi_queue_state_transition(mhi_cntrl, + DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE); + else + mhi_queue_state_transition(mhi_cntrl, + DEV_ST_TRANSITION_DISABLE); /* Wait for shutdown to complete */ flush_work(&mhi_cntrl->st_worker); disable_irq(mhi_cntrl->irq[0]); } + +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) +{ + __mhi_power_down(mhi_cntrl, graceful, true); +} EXPORT_SYMBOL_GPL(mhi_power_down); +void mhi_power_down_keep_dev(struct mhi_controller *mhi_cntrl, + bool graceful) +{ + __mhi_power_down(mhi_cntrl, graceful, false); +} +EXPORT_SYMBOL_GPL(mhi_power_down_keep_dev); + int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) { int ret = mhi_async_power_up(mhi_cntrl); diff --git a/drivers/bus/mhi/host/trace.h b/drivers/bus/mhi/host/trace.h index 368515dcb2..95613c8ebe 100644 --- a/drivers/bus/mhi/host/trace.h +++ b/drivers/bus/mhi/host/trace.h @@ -103,7 +103,7 @@ TRACE_EVENT(mhi_gen_tre, ), TP_fast_assign( - __assign_str(name, mhi_cntrl->mhi_dev->name); + __assign_str(name); __entry->ch_num = mhi_chan->chan; __entry->wp = mhi_tre; __entry->tre_ptr = mhi_tre->ptr; @@ -131,7 +131,7 @@ TRACE_EVENT(mhi_intvec_states, ), TP_fast_assign( - __assign_str(name, mhi_cntrl->mhi_dev->name); + __assign_str(name); __entry->local_ee = mhi_cntrl->ee; __entry->state = mhi_cntrl->dev_state; __entry->dev_ee = dev_ee; @@ -158,7 +158,7 @@ TRACE_EVENT(mhi_tryset_pm_state, ), TP_fast_assign( - __assign_str(name, mhi_cntrl->mhi_dev->name); + __assign_str(name); if (pm_state) pm_state = __fls(pm_state); __entry->pm_state = pm_state; @@ -184,7 +184,7 @@ DECLARE_EVENT_CLASS(mhi_process_event_ring, ), TP_fast_assign( - __assign_str(name, mhi_cntrl->mhi_dev->name); + __assign_str(name); __entry->rp = rp; __entry->ptr = rp->ptr; __entry->dword0 = rp->dword[0]; @@ -226,7 +226,7 @@ DECLARE_EVENT_CLASS(mhi_update_channel_state, ), TP_fast_assign( - __assign_str(name, mhi_cntrl->mhi_dev->name); + __assign_str(name); __entry->ch_num = mhi_chan->chan; __entry->state = state; __entry->reason = reason; @@ -265,7 +265,7 @@ TRACE_EVENT(mhi_pm_st_transition, ), TP_fast_assign( - __assign_str(name, mhi_cntrl->mhi_dev->name); + __assign_str(name); __entry->state = state; ), diff --git a/drivers/bus/stm32_etzpc.c b/drivers/bus/stm32_etzpc.c new file mode 100644 index 0000000000..7fc0f16960 --- /dev/null +++ b/drivers/bus/stm32_etzpc.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023, STMicroelectronics - All Rights Reserved + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stm32_firewall.h" + +/* + * ETZPC registers + */ +#define ETZPC_DECPROT 0x10 +#define ETZPC_HWCFGR 0x3F0 + +/* + * HWCFGR register + */ +#define ETZPC_HWCFGR_NUM_TZMA GENMASK(7, 0) +#define ETZPC_HWCFGR_NUM_PER_SEC GENMASK(15, 8) +#define ETZPC_HWCFGR_NUM_AHB_SEC GENMASK(23, 16) +#define ETZPC_HWCFGR_CHUNKS1N4 GENMASK(31, 24) + +/* + * ETZPC miscellaneous + */ +#define ETZPC_PROT_MASK GENMASK(1, 0) +#define ETZPC_PROT_A7NS 0x3 +#define ETZPC_DECPROT_SHIFT 1 + +#define IDS_PER_DECPROT_REGS 16 + +static int stm32_etzpc_grant_access(struct stm32_firewall_controller *ctrl, u32 firewall_id) +{ + u32 offset, reg_offset, sec_val; + + if (firewall_id >= ctrl->max_entries) { + dev_err(ctrl->dev, "Invalid sys bus ID %u", firewall_id); + return -EINVAL; + } + + /* Check access configuration, 16 peripherals per register */ + reg_offset = ETZPC_DECPROT + 0x4 * (firewall_id / IDS_PER_DECPROT_REGS); + offset = (firewall_id % IDS_PER_DECPROT_REGS) << ETZPC_DECPROT_SHIFT; + + /* Verify peripheral is non-secure and attributed to cortex A7 */ + sec_val = (readl(ctrl->mmio + reg_offset) >> offset) & ETZPC_PROT_MASK; + if (sec_val != ETZPC_PROT_A7NS) { + dev_dbg(ctrl->dev, "Invalid bus configuration: reg_offset %#x, value %d\n", + reg_offset, sec_val); + return -EACCES; + } + + return 0; +} + +static void stm32_etzpc_release_access(struct stm32_firewall_controller *ctrl __maybe_unused, + u32 firewall_id __maybe_unused) +{ +} + +static int stm32_etzpc_probe(struct platform_device *pdev) +{ + struct stm32_firewall_controller *etzpc_controller; + struct device_node *np = pdev->dev.of_node; + u32 nb_per, nb_master; + struct resource *res; + void __iomem *mmio; + int rc; + + etzpc_controller = devm_kzalloc(&pdev->dev, sizeof(*etzpc_controller), GFP_KERNEL); + if (!etzpc_controller) + return -ENOMEM; + + mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(mmio)) + return PTR_ERR(mmio); + + etzpc_controller->dev = &pdev->dev; + etzpc_controller->mmio = mmio; + etzpc_controller->name = dev_driver_string(etzpc_controller->dev); + etzpc_controller->type = STM32_PERIPHERAL_FIREWALL | STM32_MEMORY_FIREWALL; + etzpc_controller->grant_access = stm32_etzpc_grant_access; + etzpc_controller->release_access = stm32_etzpc_release_access; + + /* Get number of etzpc entries*/ + nb_per = FIELD_GET(ETZPC_HWCFGR_NUM_PER_SEC, + readl(etzpc_controller->mmio + ETZPC_HWCFGR)); + nb_master = FIELD_GET(ETZPC_HWCFGR_NUM_AHB_SEC, + readl(etzpc_controller->mmio + ETZPC_HWCFGR)); + etzpc_controller->max_entries = nb_per + nb_master; + + platform_set_drvdata(pdev, etzpc_controller); + + rc = stm32_firewall_controller_register(etzpc_controller); + if (rc) { + dev_err(etzpc_controller->dev, "Couldn't register as a firewall controller: %d", + rc); + return rc; + } + + rc = stm32_firewall_populate_bus(etzpc_controller); + if (rc) { + dev_err(etzpc_controller->dev, "Couldn't populate ETZPC bus: %d", + rc); + return rc; + } + + /* Populate all allowed nodes */ + return of_platform_populate(np, NULL, NULL, &pdev->dev); +} + +static const struct of_device_id stm32_etzpc_of_match[] = { + { .compatible = "st,stm32-etzpc" }, + {} +}; +MODULE_DEVICE_TABLE(of, stm32_etzpc_of_match); + +static struct platform_driver stm32_etzpc_driver = { + .probe = stm32_etzpc_probe, + .driver = { + .name = "stm32-etzpc", + .of_match_table = stm32_etzpc_of_match, + }, +}; +module_platform_driver(stm32_etzpc_driver); + +MODULE_AUTHOR("Gatien Chevallier "); +MODULE_DESCRIPTION("STMicroelectronics ETZPC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/bus/stm32_firewall.c b/drivers/bus/stm32_firewall.c new file mode 100644 index 0000000000..2fc9761dad --- /dev/null +++ b/drivers/bus/stm32_firewall.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023, STMicroelectronics - All Rights Reserved + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stm32_firewall.h" + +/* Corresponds to STM32_FIREWALL_MAX_EXTRA_ARGS + firewall ID */ +#define STM32_FIREWALL_MAX_ARGS (STM32_FIREWALL_MAX_EXTRA_ARGS + 1) + +static LIST_HEAD(firewall_controller_list); +static DEFINE_MUTEX(firewall_controller_list_lock); + +/* Firewall device API */ + +int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall, + unsigned int nb_firewall) +{ + struct stm32_firewall_controller *ctrl; + struct of_phandle_iterator it; + unsigned int i, j = 0; + int err; + + if (!firewall || !nb_firewall) + return -EINVAL; + + /* Parse property with phandle parsed out */ + of_for_each_phandle(&it, err, np, "access-controllers", "#access-controller-cells", 0) { + struct of_phandle_args provider_args; + struct device_node *provider = it.node; + const char *fw_entry; + bool match = false; + + if (err) { + pr_err("Unable to get access-controllers property for node %s\n, err: %d", + np->full_name, err); + of_node_put(provider); + return err; + } + + if (j >= nb_firewall) { + pr_err("Too many firewall controllers"); + of_node_put(provider); + return -EINVAL; + } + + provider_args.args_count = of_phandle_iterator_args(&it, provider_args.args, + STM32_FIREWALL_MAX_ARGS); + + /* Check if the parsed phandle corresponds to a registered firewall controller */ + mutex_lock(&firewall_controller_list_lock); + list_for_each_entry(ctrl, &firewall_controller_list, entry) { + if (ctrl->dev->of_node->phandle == it.phandle) { + match = true; + firewall[j].firewall_ctrl = ctrl; + break; + } + } + mutex_unlock(&firewall_controller_list_lock); + + if (!match) { + firewall[j].firewall_ctrl = NULL; + pr_err("No firewall controller registered for %s\n", np->full_name); + of_node_put(provider); + return -ENODEV; + } + + err = of_property_read_string_index(np, "access-controller-names", j, &fw_entry); + if (err == 0) + firewall[j].entry = fw_entry; + + /* Handle the case when there are no arguments given along with the phandle */ + if (provider_args.args_count < 0 || + provider_args.args_count > STM32_FIREWALL_MAX_ARGS) { + of_node_put(provider); + return -EINVAL; + } else if (provider_args.args_count == 0) { + firewall[j].extra_args_size = 0; + firewall[j].firewall_id = U32_MAX; + j++; + continue; + } + + /* The firewall ID is always the first argument */ + firewall[j].firewall_id = provider_args.args[0]; + + /* Extra args start at the second argument */ + for (i = 0; i < provider_args.args_count - 1; i++) + firewall[j].extra_args[i] = provider_args.args[i + 1]; + + /* Remove the firewall ID arg that is not an extra argument */ + firewall[j].extra_args_size = provider_args.args_count - 1; + + j++; + } + + return 0; +} +EXPORT_SYMBOL_GPL(stm32_firewall_get_firewall); + +int stm32_firewall_grant_access(struct stm32_firewall *firewall) +{ + struct stm32_firewall_controller *firewall_controller; + + if (!firewall || firewall->firewall_id == U32_MAX) + return -EINVAL; + + firewall_controller = firewall->firewall_ctrl; + + if (!firewall_controller) + return -ENODEV; + + return firewall_controller->grant_access(firewall_controller, firewall->firewall_id); +} +EXPORT_SYMBOL_GPL(stm32_firewall_grant_access); + +int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id) +{ + struct stm32_firewall_controller *firewall_controller; + + if (!firewall || subsystem_id == U32_MAX || firewall->firewall_id == U32_MAX) + return -EINVAL; + + firewall_controller = firewall->firewall_ctrl; + + if (!firewall_controller) + return -ENODEV; + + return firewall_controller->grant_access(firewall_controller, subsystem_id); +} +EXPORT_SYMBOL_GPL(stm32_firewall_grant_access_by_id); + +void stm32_firewall_release_access(struct stm32_firewall *firewall) +{ + struct stm32_firewall_controller *firewall_controller; + + if (!firewall || firewall->firewall_id == U32_MAX) { + pr_debug("Incorrect arguments when releasing a firewall access\n"); + return; + } + + firewall_controller = firewall->firewall_ctrl; + + if (!firewall_controller) { + pr_debug("No firewall controller to release\n"); + return; + } + + firewall_controller->release_access(firewall_controller, firewall->firewall_id); +} +EXPORT_SYMBOL_GPL(stm32_firewall_release_access); + +void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id) +{ + struct stm32_firewall_controller *firewall_controller; + + if (!firewall || subsystem_id == U32_MAX || firewall->firewall_id == U32_MAX) { + pr_debug("Incorrect arguments when releasing a firewall access"); + return; + } + + firewall_controller = firewall->firewall_ctrl; + + if (!firewall_controller) { + pr_debug("No firewall controller to release"); + return; + } + + firewall_controller->release_access(firewall_controller, subsystem_id); +} +EXPORT_SYMBOL_GPL(stm32_firewall_release_access_by_id); + +/* Firewall controller API */ + +int stm32_firewall_controller_register(struct stm32_firewall_controller *firewall_controller) +{ + struct stm32_firewall_controller *ctrl; + + if (!firewall_controller) + return -ENODEV; + + pr_info("Registering %s firewall controller\n", firewall_controller->name); + + mutex_lock(&firewall_controller_list_lock); + list_for_each_entry(ctrl, &firewall_controller_list, entry) { + if (ctrl == firewall_controller) { + pr_debug("%s firewall controller already registered\n", + firewall_controller->name); + mutex_unlock(&firewall_controller_list_lock); + return 0; + } + } + list_add_tail(&firewall_controller->entry, &firewall_controller_list); + mutex_unlock(&firewall_controller_list_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(stm32_firewall_controller_register); + +void stm32_firewall_controller_unregister(struct stm32_firewall_controller *firewall_controller) +{ + struct stm32_firewall_controller *ctrl; + bool controller_removed = false; + + if (!firewall_controller) { + pr_debug("Null reference while unregistering firewall controller\n"); + return; + } + + mutex_lock(&firewall_controller_list_lock); + list_for_each_entry(ctrl, &firewall_controller_list, entry) { + if (ctrl == firewall_controller) { + controller_removed = true; + list_del_init(&ctrl->entry); + break; + } + } + mutex_unlock(&firewall_controller_list_lock); + + if (!controller_removed) + pr_debug("There was no firewall controller named %s to unregister\n", + firewall_controller->name); +} +EXPORT_SYMBOL_GPL(stm32_firewall_controller_unregister); + +int stm32_firewall_populate_bus(struct stm32_firewall_controller *firewall_controller) +{ + struct stm32_firewall *firewalls; + struct device_node *child; + struct device *parent; + unsigned int i; + int len; + int err; + + parent = firewall_controller->dev; + + dev_dbg(parent, "Populating %s system bus\n", dev_name(firewall_controller->dev)); + + for_each_available_child_of_node(dev_of_node(parent), child) { + /* The access-controllers property is mandatory for firewall bus devices */ + len = of_count_phandle_with_args(child, "access-controllers", + "#access-controller-cells"); + if (len <= 0) { + of_node_put(child); + return -EINVAL; + } + + firewalls = kcalloc(len, sizeof(*firewalls), GFP_KERNEL); + if (!firewalls) { + of_node_put(child); + return -ENOMEM; + } + + err = stm32_firewall_get_firewall(child, firewalls, (unsigned int)len); + if (err) { + kfree(firewalls); + of_node_put(child); + return err; + } + + for (i = 0; i < len; i++) { + if (firewall_controller->grant_access(firewall_controller, + firewalls[i].firewall_id)) { + /* + * Peripheral access not allowed or not defined. + * Mark the node as populated so platform bus won't probe it + */ + of_detach_node(child); + dev_err(parent, "%s: Device driver will not be probed\n", + child->full_name); + } + } + + kfree(firewalls); + } + + return 0; +} +EXPORT_SYMBOL_GPL(stm32_firewall_populate_bus); diff --git a/drivers/bus/stm32_firewall.h b/drivers/bus/stm32_firewall.h new file mode 100644 index 0000000000..e5fac85fe3 --- /dev/null +++ b/drivers/bus/stm32_firewall.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023, STMicroelectronics - All Rights Reserved + */ + +#ifndef _STM32_FIREWALL_H +#define _STM32_FIREWALL_H + +#include +#include +#include +#include +#include + +/** + * STM32_PERIPHERAL_FIREWALL: This type of firewall protects peripherals + * STM32_MEMORY_FIREWALL: This type of firewall protects memories/subsets of memory + * zones + * STM32_NOTYPE_FIREWALL: Undefined firewall type + */ + +#define STM32_PERIPHERAL_FIREWALL BIT(1) +#define STM32_MEMORY_FIREWALL BIT(2) +#define STM32_NOTYPE_FIREWALL BIT(3) + +/** + * struct stm32_firewall_controller - Information on firewall controller supplying services + * + * @name: Name of the firewall controller + * @dev: Device reference of the firewall controller + * @mmio: Base address of the firewall controller + * @entry: List entry of the firewall controller list + * @type: Type of firewall + * @max_entries: Number of entries covered by the firewall + * @grant_access: Callback used to grant access for a device access against a + * firewall controller + * @release_access: Callback used to release resources taken by a device when access was + * granted + * @grant_memory_range_access: Callback used to grant access for a device to a given memory region + */ +struct stm32_firewall_controller { + const char *name; + struct device *dev; + void __iomem *mmio; + struct list_head entry; + unsigned int type; + unsigned int max_entries; + + int (*grant_access)(struct stm32_firewall_controller *ctrl, u32 id); + void (*release_access)(struct stm32_firewall_controller *ctrl, u32 id); + int (*grant_memory_range_access)(struct stm32_firewall_controller *ctrl, phys_addr_t paddr, + size_t size); +}; + +/** + * stm32_firewall_controller_register - Register a firewall controller to the STM32 firewall + * framework + * @firewall_controller: Firewall controller to register + * + * Returns 0 in case of success or -ENODEV if no controller was given. + */ +int stm32_firewall_controller_register(struct stm32_firewall_controller *firewall_controller); + +/** + * stm32_firewall_controller_unregister - Unregister a firewall controller from the STM32 + * firewall framework + * @firewall_controller: Firewall controller to unregister + */ +void stm32_firewall_controller_unregister(struct stm32_firewall_controller *firewall_controller); + +/** + * stm32_firewall_populate_bus - Populate device tree nodes that have a correct firewall + * configuration. This is used at boot-time only, as a sanity check + * between device tree and firewalls hardware configurations to + * prevent a kernel crash when a device driver is not granted access + * + * @firewall_controller: Firewall controller which nodes will be populated or not + * + * Returns 0 in case of success or appropriate errno code if error occurred. + */ +int stm32_firewall_populate_bus(struct stm32_firewall_controller *firewall_controller); + +#endif /* _STM32_FIREWALL_H */ diff --git a/drivers/bus/stm32_rifsc.c b/drivers/bus/stm32_rifsc.c new file mode 100644 index 0000000000..4cf1b60014 --- /dev/null +++ b/drivers/bus/stm32_rifsc.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023, STMicroelectronics - All Rights Reserved + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stm32_firewall.h" + +/* + * RIFSC offset register + */ +#define RIFSC_RISC_SECCFGR0 0x10 +#define RIFSC_RISC_PRIVCFGR0 0x30 +#define RIFSC_RISC_PER0_CIDCFGR 0x100 +#define RIFSC_RISC_PER0_SEMCR 0x104 +#define RIFSC_RISC_HWCFGR2 0xFEC + +/* + * SEMCR register + */ +#define SEMCR_MUTEX BIT(0) + +/* + * HWCFGR2 register + */ +#define HWCFGR2_CONF1_MASK GENMASK(15, 0) +#define HWCFGR2_CONF2_MASK GENMASK(23, 16) +#define HWCFGR2_CONF3_MASK GENMASK(31, 24) + +/* + * RIFSC miscellaneous + */ +#define RIFSC_RISC_CFEN_MASK BIT(0) +#define RIFSC_RISC_SEM_EN_MASK BIT(1) +#define RIFSC_RISC_SCID_MASK GENMASK(6, 4) +#define RIFSC_RISC_SEML_SHIFT 16 +#define RIFSC_RISC_SEMWL_MASK GENMASK(23, 16) +#define RIFSC_RISC_PER_ID_MASK GENMASK(31, 24) + +#define RIFSC_RISC_PERx_CID_MASK (RIFSC_RISC_CFEN_MASK | \ + RIFSC_RISC_SEM_EN_MASK | \ + RIFSC_RISC_SCID_MASK | \ + RIFSC_RISC_SEMWL_MASK) + +#define IDS_PER_RISC_SEC_PRIV_REGS 32 + +/* RIF miscellaneous */ +/* + * CIDCFGR register fields + */ +#define CIDCFGR_CFEN BIT(0) +#define CIDCFGR_SEMEN BIT(1) +#define CIDCFGR_SEMWL(x) BIT(RIFSC_RISC_SEML_SHIFT + (x)) + +#define SEMWL_SHIFT 16 + +/* Compartiment IDs */ +#define RIF_CID0 0x0 +#define RIF_CID1 0x1 + +static bool stm32_rifsc_is_semaphore_available(void __iomem *addr) +{ + return !(readl(addr) & SEMCR_MUTEX); +} + +static int stm32_rif_acquire_semaphore(struct stm32_firewall_controller *stm32_firewall_controller, + int id) +{ + void __iomem *addr = stm32_firewall_controller->mmio + RIFSC_RISC_PER0_SEMCR + 0x8 * id; + + writel(SEMCR_MUTEX, addr); + + /* Check that CID1 has the semaphore */ + if (stm32_rifsc_is_semaphore_available(addr) || + FIELD_GET(RIFSC_RISC_SCID_MASK, readl(addr)) != RIF_CID1) + return -EACCES; + + return 0; +} + +static void stm32_rif_release_semaphore(struct stm32_firewall_controller *stm32_firewall_controller, + int id) +{ + void __iomem *addr = stm32_firewall_controller->mmio + RIFSC_RISC_PER0_SEMCR + 0x8 * id; + + if (stm32_rifsc_is_semaphore_available(addr)) + return; + + writel(SEMCR_MUTEX, addr); + + /* Ok if another compartment takes the semaphore before the check */ + WARN_ON(!stm32_rifsc_is_semaphore_available(addr) && + FIELD_GET(RIFSC_RISC_SCID_MASK, readl(addr)) == RIF_CID1); +} + +static int stm32_rifsc_grant_access(struct stm32_firewall_controller *ctrl, u32 firewall_id) +{ + struct stm32_firewall_controller *rifsc_controller = ctrl; + u32 reg_offset, reg_id, sec_reg_value, cid_reg_value; + int rc; + + if (firewall_id >= rifsc_controller->max_entries) { + dev_err(rifsc_controller->dev, "Invalid sys bus ID %u", firewall_id); + return -EINVAL; + } + + /* + * RIFSC_RISC_PRIVCFGRx and RIFSC_RISC_SECCFGRx both handle configuration access for + * 32 peripherals. On the other hand, there is one _RIFSC_RISC_PERx_CIDCFGR register + * per peripheral + */ + reg_id = firewall_id / IDS_PER_RISC_SEC_PRIV_REGS; + reg_offset = firewall_id % IDS_PER_RISC_SEC_PRIV_REGS; + sec_reg_value = readl(rifsc_controller->mmio + RIFSC_RISC_SECCFGR0 + 0x4 * reg_id); + cid_reg_value = readl(rifsc_controller->mmio + RIFSC_RISC_PER0_CIDCFGR + 0x8 * firewall_id); + + /* First check conditions for semaphore mode, which doesn't take into account static CID. */ + if ((cid_reg_value & CIDCFGR_SEMEN) && (cid_reg_value & CIDCFGR_CFEN)) { + if (cid_reg_value & BIT(RIF_CID1 + SEMWL_SHIFT)) { + /* Static CID is irrelevant if semaphore mode */ + goto skip_cid_check; + } else { + dev_dbg(rifsc_controller->dev, + "Invalid bus semaphore configuration: index %d\n", firewall_id); + return -EACCES; + } + } + + /* + * Skip CID check if CID filtering isn't enabled or filtering is enabled on CID0, which + * corresponds to whatever CID. + */ + if (!(cid_reg_value & CIDCFGR_CFEN) || + FIELD_GET(RIFSC_RISC_SCID_MASK, cid_reg_value) == RIF_CID0) + goto skip_cid_check; + + /* Coherency check with the CID configuration */ + if (FIELD_GET(RIFSC_RISC_SCID_MASK, cid_reg_value) != RIF_CID1) { + dev_dbg(rifsc_controller->dev, "Invalid CID configuration for peripheral: %d\n", + firewall_id); + return -EACCES; + } + +skip_cid_check: + /* Check security configuration */ + if (sec_reg_value & BIT(reg_offset)) { + dev_dbg(rifsc_controller->dev, + "Invalid security configuration for peripheral: %d\n", firewall_id); + return -EACCES; + } + + /* + * If the peripheral is in semaphore mode, take the semaphore so that + * the CID1 has the ownership. + */ + if ((cid_reg_value & CIDCFGR_SEMEN) && (cid_reg_value & CIDCFGR_CFEN)) { + rc = stm32_rif_acquire_semaphore(rifsc_controller, firewall_id); + if (rc) { + dev_err(rifsc_controller->dev, + "Couldn't acquire semaphore for peripheral: %d\n", firewall_id); + return rc; + } + } + + return 0; +} + +static void stm32_rifsc_release_access(struct stm32_firewall_controller *ctrl, u32 firewall_id) +{ + stm32_rif_release_semaphore(ctrl, firewall_id); +} + +static int stm32_rifsc_probe(struct platform_device *pdev) +{ + struct stm32_firewall_controller *rifsc_controller; + struct device_node *np = pdev->dev.of_node; + u32 nb_risup, nb_rimu, nb_risal; + struct resource *res; + void __iomem *mmio; + int rc; + + rifsc_controller = devm_kzalloc(&pdev->dev, sizeof(*rifsc_controller), GFP_KERNEL); + if (!rifsc_controller) + return -ENOMEM; + + mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(mmio)) + return PTR_ERR(mmio); + + rifsc_controller->dev = &pdev->dev; + rifsc_controller->mmio = mmio; + rifsc_controller->name = dev_driver_string(rifsc_controller->dev); + rifsc_controller->type = STM32_PERIPHERAL_FIREWALL | STM32_MEMORY_FIREWALL; + rifsc_controller->grant_access = stm32_rifsc_grant_access; + rifsc_controller->release_access = stm32_rifsc_release_access; + + /* Get number of RIFSC entries*/ + nb_risup = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF1_MASK; + nb_rimu = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF2_MASK; + nb_risal = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF3_MASK; + rifsc_controller->max_entries = nb_risup + nb_rimu + nb_risal; + + platform_set_drvdata(pdev, rifsc_controller); + + rc = stm32_firewall_controller_register(rifsc_controller); + if (rc) { + dev_err(rifsc_controller->dev, "Couldn't register as a firewall controller: %d", + rc); + return rc; + } + + rc = stm32_firewall_populate_bus(rifsc_controller); + if (rc) { + dev_err(rifsc_controller->dev, "Couldn't populate RIFSC bus: %d", + rc); + return rc; + } + + /* Populate all allowed nodes */ + return of_platform_populate(np, NULL, NULL, &pdev->dev); +} + +static const struct of_device_id stm32_rifsc_of_match[] = { + { .compatible = "st,stm32mp25-rifsc" }, + {} +}; +MODULE_DEVICE_TABLE(of, stm32_rifsc_of_match); + +static struct platform_driver stm32_rifsc_driver = { + .probe = stm32_rifsc_probe, + .driver = { + .name = "stm32-rifsc", + .of_match_table = stm32_rifsc_of_match, + }, +}; +module_platform_driver(stm32_rifsc_driver); + +MODULE_AUTHOR("Gatien Chevallier "); +MODULE_DESCRIPTION("STMicroelectronics RIFSC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 41d33f39ef..8767e04d6c 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1,6 +1,17 @@ // SPDX-License-Identifier: GPL-2.0 /* * ti-sysc.c - Texas Instruments sysc interconnect target driver + * + * TI SoCs have an interconnect target wrapper IP for many devices. The wrapper + * IP manages clock gating, resets, and PM capabilities for the connected devices. + * + * Copyright (C) 2017-2024 Texas Instruments Incorporated - https://www.ti.com/ + * + * Many features are based on the earlier omap_hwmod arch code with thanks to all + * the people who developed and debugged the code over the years: + * + * Copyright (C) 2009-2011 Nokia Corporation + * Copyright (C) 2011-2021 Texas Instruments Incorporated - https://www.ti.com/ */ #include @@ -1458,8 +1469,7 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev) ddata = dev_get_drvdata(dev); - if (ddata->cfg.quirks & - (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) + if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE) return 0; if (!ddata->enabled) @@ -1477,8 +1487,7 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev) ddata = dev_get_drvdata(dev); - if (ddata->cfg.quirks & - (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) + if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE) return 0; if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) { @@ -1529,19 +1538,6 @@ struct sysc_revision_quirk { } static const struct sysc_revision_quirk sysc_revision_quirks[] = { - /* These drivers need to be fixed to not use pm_runtime_irq_safe() */ - SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), - SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), - /* Uarts on omap4 and later */ - SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff, - SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), - SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), - SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), - /* Quirks that need to be set based on the module address */ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff, SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT | @@ -1599,6 +1595,17 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE_ACT), + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE_ACT), + /* Uarts on omap4 and later */ + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff, + SYSC_QUIRK_SWSUP_SIDLE_ACT), + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE_ACT), + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE_ACT), SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, @@ -2145,8 +2152,7 @@ static int sysc_reset(struct sysc *ddata) sysc_offset = ddata->offsets[SYSC_SYSCONFIG]; if (ddata->legacy_mode || - ddata->cap->regbits->srst_shift < 0 || - ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) + ddata->cap->regbits->srst_shift < 0) return 0; sysc_mask = BIT(ddata->cap->regbits->srst_shift); @@ -2240,12 +2246,14 @@ static int sysc_init_module(struct sysc *ddata) goto err_main_clocks; } - error = sysc_reset(ddata); - if (error) - dev_err(ddata->dev, "Reset failed with %d\n", error); + if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) { + error = sysc_reset(ddata); + if (error) + dev_err(ddata->dev, "Reset failed with %d\n", error); - if (error && !ddata->legacy_mode) - sysc_disable_module(ddata->dev); + if (error && !ddata->legacy_mode) + sysc_disable_module(ddata->dev); + } err_main_clocks: if (error) @@ -2447,89 +2455,6 @@ static int __maybe_unused sysc_child_runtime_resume(struct device *dev) return pm_generic_runtime_resume(dev); } -#ifdef CONFIG_PM_SLEEP -static int sysc_child_suspend_noirq(struct device *dev) -{ - struct sysc *ddata; - int error; - - ddata = sysc_child_to_parent(dev); - - dev_dbg(ddata->dev, "%s %s\n", __func__, - ddata->name ? ddata->name : ""); - - error = pm_generic_suspend_noirq(dev); - if (error) { - dev_err(dev, "%s error at %i: %i\n", - __func__, __LINE__, error); - - return error; - } - - if (!pm_runtime_status_suspended(dev)) { - error = pm_generic_runtime_suspend(dev); - if (error) { - dev_dbg(dev, "%s busy at %i: %i\n", - __func__, __LINE__, error); - - return 0; - } - - error = sysc_runtime_suspend(ddata->dev); - if (error) { - dev_err(dev, "%s error at %i: %i\n", - __func__, __LINE__, error); - - return error; - } - - ddata->child_needs_resume = true; - } - - return 0; -} - -static int sysc_child_resume_noirq(struct device *dev) -{ - struct sysc *ddata; - int error; - - ddata = sysc_child_to_parent(dev); - - dev_dbg(ddata->dev, "%s %s\n", __func__, - ddata->name ? ddata->name : ""); - - if (ddata->child_needs_resume) { - ddata->child_needs_resume = false; - - error = sysc_runtime_resume(ddata->dev); - if (error) - dev_err(ddata->dev, - "%s runtime resume error: %i\n", - __func__, error); - - error = pm_generic_runtime_resume(dev); - if (error) - dev_err(ddata->dev, - "%s generic runtime resume: %i\n", - __func__, error); - } - - return pm_generic_resume_noirq(dev); -} -#endif - -static struct dev_pm_domain sysc_child_pm_domain = { - .ops = { - SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend, - sysc_child_runtime_resume, - NULL) - USE_PLATFORM_PM_SLEEP_OPS - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq, - sysc_child_resume_noirq) - } -}; - /* Caller needs to take list_lock if ever used outside of cpu_pm */ static void sysc_reinit_modules(struct sysc_soc_info *soc) { @@ -2600,25 +2525,6 @@ out_unlock: mutex_unlock(&sysc_soc->list_lock); } -/** - * sysc_legacy_idle_quirk - handle children in omap_device compatible way - * @ddata: device driver data - * @child: child device driver - * - * Allow idle for child devices as done with _od_runtime_suspend(). - * Otherwise many child devices will not idle because of the permanent - * parent usecount set in pm_runtime_irq_safe(). - * - * Note that the long term solution is to just modify the child device - * drivers to not set pm_runtime_irq_safe() and then this can be just - * dropped. - */ -static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child) -{ - if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) - dev_pm_domain_set(child, &sysc_child_pm_domain); -} - static int sysc_notifier_call(struct notifier_block *nb, unsigned long event, void *device) { @@ -2635,7 +2541,6 @@ static int sysc_notifier_call(struct notifier_block *nb, error = sysc_child_add_clocks(ddata, dev); if (error) return error; - sysc_legacy_idle_quirk(ddata, dev); break; default: break; @@ -2859,8 +2764,7 @@ static const struct sysc_capabilities sysc_34xx_sr = { .type = TI_SYSC_OMAP34XX_SR, .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY, .regbits = &sysc_regbits_omap34xx_sr, - .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED | - SYSC_QUIRK_LEGACY_IDLE, + .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED, }; /* @@ -2881,13 +2785,12 @@ static const struct sysc_capabilities sysc_36xx_sr = { .type = TI_SYSC_OMAP36XX_SR, .sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP, .regbits = &sysc_regbits_omap36xx_sr, - .mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE, + .mod_quirks = SYSC_QUIRK_UNCACHED, }; static const struct sysc_capabilities sysc_omap4_sr = { .type = TI_SYSC_OMAP4_SR, .regbits = &sysc_regbits_omap36xx_sr, - .mod_quirks = SYSC_QUIRK_LEGACY_IDLE, }; /* diff --git a/drivers/cdx/controller/cdx_controller.c b/drivers/cdx/controller/cdx_controller.c index 112a1541de..201f9a6fbd 100644 --- a/drivers/cdx/controller/cdx_controller.c +++ b/drivers/cdx/controller/cdx_controller.c @@ -222,7 +222,7 @@ mcdi_init_fail: return ret; } -static int xlnx_cdx_remove(struct platform_device *pdev) +static void xlnx_cdx_remove(struct platform_device *pdev) { struct cdx_controller *cdx = platform_get_drvdata(pdev); struct cdx_mcdi *cdx_mcdi = cdx->priv; @@ -234,8 +234,6 @@ static int xlnx_cdx_remove(struct platform_device *pdev) cdx_mcdi_finish(cdx_mcdi); kfree(cdx_mcdi); - - return 0; } static const struct of_device_id cdx_match_table[] = { @@ -252,7 +250,7 @@ static struct platform_driver cdx_pdriver = { .of_match_table = cdx_match_table, }, .probe = xlnx_cdx_probe, - .remove = xlnx_cdx_remove, + .remove_new = xlnx_cdx_remove, }; static int __init cdx_controller_init(void) diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c index c9bf2c2198..f0d0c04473 100644 --- a/drivers/char/agp/alpha-agp.c +++ b/drivers/char/agp/alpha-agp.c @@ -149,7 +149,7 @@ struct agp_bridge_driver alpha_core_agp_driver = { struct agp_bridge_data *alpha_bridge; -int __init +static int __init alpha_core_agp_setup(void) { alpha_agp_info *agp = alpha_mv.agp_info(); diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 86162a1368..9a24d19236 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -143,8 +143,10 @@ static int __init amd_rng_mod_init(void) found: err = pci_read_config_dword(pdev, 0x58, &pmbase); - if (err) + if (err) { + err = pcibios_err_to_errno(err); goto put_dev; + } pmbase &= 0x0000FF00; if (pmbase == 0) { diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index a3bbdd6e60..f6122a03ee 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -64,19 +64,6 @@ static size_t rng_buffer_size(void) return RNG_BUFFER_SIZE; } -static void add_early_randomness(struct hwrng *rng) -{ - int bytes_read; - - mutex_lock(&reading_mutex); - bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0); - mutex_unlock(&reading_mutex); - if (bytes_read > 0) { - size_t entropy = bytes_read * 8 * rng->quality / 1024; - add_hwgenerator_randomness(rng_fillbuf, bytes_read, entropy, false); - } -} - static inline void cleanup_rng(struct kref *kref) { struct hwrng *rng = container_of(kref, struct hwrng, ref); @@ -174,7 +161,6 @@ static int hwrng_init(struct hwrng *rng) reinit_completion(&rng->cleanup_done); skip_init: - rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024); current_quality = rng->quality; /* obsolete */ return 0; @@ -340,13 +326,12 @@ static ssize_t rng_current_store(struct device *dev, const char *buf, size_t len) { int err; - struct hwrng *rng, *old_rng, *new_rng; + struct hwrng *rng, *new_rng; err = mutex_lock_interruptible(&rng_mutex); if (err) return -ERESTARTSYS; - old_rng = current_rng; if (sysfs_streq(buf, "")) { err = enable_best_rng(); } else { @@ -362,11 +347,8 @@ static ssize_t rng_current_store(struct device *dev, new_rng = get_current_rng_nolock(); mutex_unlock(&rng_mutex); - if (new_rng) { - if (new_rng != old_rng) - add_early_randomness(new_rng); + if (new_rng) put_rng(new_rng); - } return err ? : len; } @@ -382,7 +364,7 @@ static ssize_t rng_current_show(struct device *dev, if (IS_ERR(rng)) return PTR_ERR(rng); - ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); + ret = sysfs_emit(buf, "%s\n", rng ? rng->name : "none"); put_rng(rng); return ret; @@ -544,7 +526,6 @@ int hwrng_register(struct hwrng *rng) { int err = -EINVAL; struct hwrng *tmp; - bool is_new_current = false; if (!rng->name || (!rng->data_read && !rng->read)) goto out; @@ -563,6 +544,9 @@ int hwrng_register(struct hwrng *rng) complete(&rng->cleanup_done); init_completion(&rng->dying); + /* Adjust quality field to always have a proper value */ + rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024); + if (!current_rng || (!cur_rng_set_by_user && rng->quality > current_rng->quality)) { /* @@ -573,25 +557,8 @@ int hwrng_register(struct hwrng *rng) err = set_current_rng(rng); if (err) goto out_unlock; - /* to use current_rng in add_early_randomness() we need - * to take a ref - */ - is_new_current = true; - kref_get(&rng->ref); } mutex_unlock(&rng_mutex); - if (is_new_current || !rng->init) { - /* - * Use a new device's input to add some randomness to - * the system. If this rng device isn't going to be - * used right away, its init function hasn't been - * called yet by set_current_rng(); so only use the - * randomness from devices that don't need an init callback - */ - add_early_randomness(rng); - } - if (is_new_current) - put_rng(rng); return 0; out_unlock: mutex_unlock(&rng_mutex); @@ -602,12 +569,11 @@ EXPORT_SYMBOL_GPL(hwrng_register); void hwrng_unregister(struct hwrng *rng) { - struct hwrng *old_rng, *new_rng; + struct hwrng *new_rng; int err; mutex_lock(&rng_mutex); - old_rng = current_rng; list_del(&rng->list); complete_all(&rng->dying); if (current_rng == rng) { @@ -626,11 +592,8 @@ void hwrng_unregister(struct hwrng *rng) } else mutex_unlock(&rng_mutex); - if (new_rng) { - if (old_rng != new_rng) - add_early_randomness(new_rng); + if (new_rng) put_rng(new_rng); - } wait_for_completion(&rng->cleanup_done); } diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c index 07ec000e4c..94ee18a112 100644 --- a/drivers/char/hw_random/mxc-rnga.c +++ b/drivers/char/hw_random/mxc-rnga.c @@ -131,7 +131,7 @@ static void mxc_rnga_cleanup(struct hwrng *rng) __raw_writel(ctrl & ~RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL); } -static int __init mxc_rnga_probe(struct platform_device *pdev) +static int mxc_rnga_probe(struct platform_device *pdev) { int err; struct mxc_rng *mxc_rng; @@ -176,7 +176,7 @@ err_ioremap: return err; } -static void __exit mxc_rnga_remove(struct platform_device *pdev) +static void mxc_rnga_remove(struct platform_device *pdev) { struct mxc_rng *mxc_rng = platform_get_drvdata(pdev); @@ -197,10 +197,11 @@ static struct platform_driver mxc_rnga_driver = { .name = "mxc_rnga", .of_match_table = mxc_rnga_of_match, }, - .remove_new = __exit_p(mxc_rnga_remove), + .probe = mxc_rnga_probe, + .remove_new = mxc_rnga_remove, }; -module_platform_driver_probe(mxc_rnga_driver, mxc_rnga_probe); +module_platform_driver(mxc_rnga_driver); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("H/W RNGA driver for i.MX"); diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index a2009fc4ad..f2a2aa7a53 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -78,7 +78,6 @@ MODULE_DEVICE_TABLE(amba, nmk_rng_ids); static struct amba_driver nmk_rng_driver = { .drv = { - .owner = THIS_MODULE, .name = "rng", }, .probe = nmk_rng_probe, diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 7a4b45393a..dd998f4fe4 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -245,7 +245,6 @@ static const struct virtio_device_id id_table[] = { static struct virtio_driver virtio_rng_driver = { .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtrng_probe, .remove = virtrng_remove, diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index cb6138b8de..e0944547c9 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile @@ -5,13 +5,10 @@ ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o \ ipmi_si_hotmod.o ipmi_si_hardcode.o ipmi_si_platform.o \ - ipmi_si_port_io.o ipmi_si_mem_io.o -ifdef CONFIG_PCI -ipmi_si-y += ipmi_si_pci.o -endif -ifdef CONFIG_PARISC -ipmi_si-y += ipmi_si_parisc.o -endif + ipmi_si_mem_io.o +ipmi_si-$(CONFIG_HAS_IOPORT) += ipmi_si_port_io.o +ipmi_si-$(CONFIG_PCI) += ipmi_si_pci.o +ipmi_si-$(CONFIG_PARISC) += ipmi_si_parisc.o obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c index 7450904e33..b8b9c07d3b 100644 --- a/drivers/char/ipmi/bt-bmc.c +++ b/drivers/char/ipmi/bt-bmc.c @@ -459,14 +459,13 @@ static int bt_bmc_probe(struct platform_device *pdev) return 0; } -static int bt_bmc_remove(struct platform_device *pdev) +static void bt_bmc_remove(struct platform_device *pdev) { struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev); misc_deregister(&bt_bmc->miscdev); if (bt_bmc->irq < 0) del_timer_sync(&bt_bmc->poll_timer); - return 0; } static const struct of_device_id bt_bmc_match[] = { @@ -482,7 +481,7 @@ static struct platform_driver bt_bmc_driver = { .of_match_table = bt_bmc_match, }, .probe = bt_bmc_probe, - .remove = bt_bmc_remove, + .remove_new = bt_bmc_remove, }; module_platform_driver(bt_bmc_driver); diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index b0eedc4595..e12b531f5c 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -41,7 +41,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); static int ipmi_init_msghandler(void); -static void smi_recv_tasklet(struct tasklet_struct *t); +static void smi_recv_work(struct work_struct *t); static void handle_new_recv_msgs(struct ipmi_smi *intf); static void need_waiter(struct ipmi_smi *intf); static int handle_one_recv_msg(struct ipmi_smi *intf, @@ -498,13 +498,13 @@ struct ipmi_smi { /* * Messages queued for delivery. If delivery fails (out of memory * for instance), They will stay in here to be processed later in a - * periodic timer interrupt. The tasklet is for handling received + * periodic timer interrupt. The workqueue is for handling received * messages directly from the handler. */ spinlock_t waiting_rcv_msgs_lock; struct list_head waiting_rcv_msgs; atomic_t watchdog_pretimeouts_to_deliver; - struct tasklet_struct recv_tasklet; + struct work_struct recv_work; spinlock_t xmit_msgs_lock; struct list_head xmit_msgs; @@ -704,7 +704,7 @@ static void clean_up_interface_data(struct ipmi_smi *intf) struct cmd_rcvr *rcvr, *rcvr2; struct list_head list; - tasklet_kill(&intf->recv_tasklet); + cancel_work_sync(&intf->recv_work); free_smi_msg_list(&intf->waiting_rcv_msgs); free_recv_msg_list(&intf->waiting_events); @@ -1319,7 +1319,7 @@ static void free_user(struct kref *ref) { struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); - /* SRCU cleanup must happen in task context. */ + /* SRCU cleanup must happen in workqueue context. */ queue_work(remove_work_wq, &user->remove_work); } @@ -3605,8 +3605,7 @@ int ipmi_add_smi(struct module *owner, intf->curr_seq = 0; spin_lock_init(&intf->waiting_rcv_msgs_lock); INIT_LIST_HEAD(&intf->waiting_rcv_msgs); - tasklet_setup(&intf->recv_tasklet, - smi_recv_tasklet); + INIT_WORK(&intf->recv_work, smi_recv_work); atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); spin_lock_init(&intf->xmit_msgs_lock); INIT_LIST_HEAD(&intf->xmit_msgs); @@ -4779,7 +4778,7 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf) * To preserve message order, quit if we * can't handle a message. Add the message * back at the head, this is safe because this - * tasklet is the only thing that pulls the + * workqueue is the only thing that pulls the * messages. */ list_add(&smi_msg->link, &intf->waiting_rcv_msgs); @@ -4812,10 +4811,10 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf) } } -static void smi_recv_tasklet(struct tasklet_struct *t) +static void smi_recv_work(struct work_struct *t) { unsigned long flags = 0; /* keep us warning-free. */ - struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet); + struct ipmi_smi *intf = from_work(intf, t, recv_work); int run_to_completion = intf->run_to_completion; struct ipmi_smi_msg *newmsg = NULL; @@ -4866,7 +4865,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf, /* * To preserve message order, we keep a queue and deliver from - * a tasklet. + * a workqueue. */ if (!run_to_completion) spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); @@ -4887,9 +4886,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf, spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); if (run_to_completion) - smi_recv_tasklet(&intf->recv_tasklet); + smi_recv_work(&intf->recv_work); else - tasklet_schedule(&intf->recv_tasklet); + queue_work(system_bh_wq, &intf->recv_work); } EXPORT_SYMBOL(ipmi_smi_msg_received); @@ -4899,7 +4898,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) return; atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); - tasklet_schedule(&intf->recv_tasklet); + queue_work(system_bh_wq, &intf->recv_work); } EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); @@ -5068,7 +5067,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf, flags); } - tasklet_schedule(&intf->recv_tasklet); + queue_work(system_bh_wq, &intf->recv_work); return need_timer; } diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c index da22a8cbe6..c59a86eb58 100644 --- a/drivers/char/ipmi/ipmi_powernv.c +++ b/drivers/char/ipmi/ipmi_powernv.c @@ -281,15 +281,13 @@ err_free: return rc; } -static int ipmi_powernv_remove(struct platform_device *pdev) +static void ipmi_powernv_remove(struct platform_device *pdev) { struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev); ipmi_unregister_smi(smi->intf); free_irq(smi->irq, smi); irq_dispose_mapping(smi->irq); - - return 0; } static const struct of_device_id ipmi_powernv_match[] = { @@ -304,7 +302,7 @@ static struct platform_driver powernv_ipmi_driver = { .of_match_table = ipmi_powernv_match, }, .probe = ipmi_powernv_probe, - .remove = ipmi_powernv_remove, + .remove_new = ipmi_powernv_remove, }; diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5cd031f3fc..eea23a3b96 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -1882,7 +1882,8 @@ int ipmi_si_add_smi(struct si_sm_io *io) } if (!io->io_setup) { - if (io->addr_space == IPMI_IO_ADDR_SPACE) { + if (IS_ENABLED(CONFIG_HAS_IOPORT) && + io->addr_space == IPMI_IO_ADDR_SPACE) { io->io_setup = ipmi_si_port_setup; } else if (io->addr_space == IPMI_MEM_ADDR_SPACE) { io->io_setup = ipmi_si_mem_setup; diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c index 74fa205586..b83d55685b 100644 --- a/drivers/char/ipmi/ipmi_si_pci.c +++ b/drivers/char/ipmi/ipmi_si_pci.c @@ -97,6 +97,9 @@ static int ipmi_pci_probe(struct pci_dev *pdev, } if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) { + if (!IS_ENABLED(CONFIG_HAS_IOPORT)) + return -ENXIO; + io.addr_space = IPMI_IO_ADDR_SPACE; io.io_setup = ipmi_si_port_setup; } else { diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c index cd2edd8f8a..96ba856481 100644 --- a/drivers/char/ipmi/ipmi_si_platform.c +++ b/drivers/char/ipmi/ipmi_si_platform.c @@ -405,11 +405,9 @@ static int ipmi_probe(struct platform_device *pdev) return platform_ipmi_probe(pdev); } -static int ipmi_remove(struct platform_device *pdev) +static void ipmi_remove(struct platform_device *pdev) { ipmi_si_remove_by_dev(&pdev->dev); - - return 0; } static int pdev_match_name(struct device *dev, const void *data) @@ -447,7 +445,7 @@ struct platform_driver ipmi_platform_driver = { .acpi_match_table = ACPI_PTR(acpi_ipmi_match), }, .probe = ipmi_probe, - .remove = ipmi_remove, + .remove_new = ipmi_remove, .id_table = si_plat_ids }; diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 1f7600c361..3f509a2221 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -2071,7 +2071,7 @@ static int ssif_platform_probe(struct platform_device *dev) return dmi_ipmi_probe(dev); } -static int ssif_platform_remove(struct platform_device *dev) +static void ssif_platform_remove(struct platform_device *dev) { struct ssif_addr_info *addr_info = dev_get_drvdata(&dev->dev); @@ -2079,7 +2079,6 @@ static int ssif_platform_remove(struct platform_device *dev) list_del(&addr_info->link); kfree(addr_info); mutex_unlock(&ssif_infos_mutex); - return 0; } static const struct platform_device_id ssif_plat_ids[] = { @@ -2092,7 +2091,7 @@ static struct platform_driver ipmi_driver = { .name = DEVICE_NAME, }, .probe = ssif_platform_probe, - .remove = ssif_platform_remove, + .remove_new = ssif_platform_remove, .id_table = ssif_plat_ids }; diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c index 72640da553..227bf06c7c 100644 --- a/drivers/char/ipmi/kcs_bmc_aspeed.c +++ b/drivers/char/ipmi/kcs_bmc_aspeed.c @@ -641,7 +641,7 @@ static int aspeed_kcs_probe(struct platform_device *pdev) return 0; } -static int aspeed_kcs_remove(struct platform_device *pdev) +static void aspeed_kcs_remove(struct platform_device *pdev) { struct aspeed_kcs_bmc *priv = platform_get_drvdata(pdev); struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc; @@ -656,8 +656,6 @@ static int aspeed_kcs_remove(struct platform_device *pdev) priv->obe.remove = true; spin_unlock_irq(&priv->obe.lock); del_timer_sync(&priv->obe.timer); - - return 0; } static const struct of_device_id ast_kcs_bmc_match[] = { @@ -674,7 +672,7 @@ static struct platform_driver ast_kcs_bmc_driver = { .of_match_table = ast_kcs_bmc_match, }, .probe = aspeed_kcs_probe, - .remove = aspeed_kcs_remove, + .remove_new = aspeed_kcs_remove, }; module_platform_driver(ast_kcs_bmc_driver); diff --git a/drivers/char/ipmi/kcs_bmc_npcm7xx.c b/drivers/char/ipmi/kcs_bmc_npcm7xx.c index 7961fec564..0771019823 100644 --- a/drivers/char/ipmi/kcs_bmc_npcm7xx.c +++ b/drivers/char/ipmi/kcs_bmc_npcm7xx.c @@ -218,7 +218,7 @@ static int npcm7xx_kcs_probe(struct platform_device *pdev) return 0; } -static int npcm7xx_kcs_remove(struct platform_device *pdev) +static void npcm7xx_kcs_remove(struct platform_device *pdev) { struct npcm7xx_kcs_bmc *priv = platform_get_drvdata(pdev); struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc; @@ -227,8 +227,6 @@ static int npcm7xx_kcs_remove(struct platform_device *pdev) npcm7xx_kcs_enable_channel(kcs_bmc, false); npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0); - - return 0; } static const struct of_device_id npcm_kcs_bmc_match[] = { @@ -243,7 +241,7 @@ static struct platform_driver npcm_kcs_bmc_driver = { .of_match_table = npcm_kcs_bmc_match, }, .probe = npcm7xx_kcs_probe, - .remove = npcm7xx_kcs_remove, + .remove_new = npcm7xx_kcs_remove, }; module_platform_driver(npcm_kcs_bmc_driver); diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c index 56346fb328..ab4e87a99f 100644 --- a/drivers/char/ipmi/ssif_bmc.c +++ b/drivers/char/ipmi/ssif_bmc.c @@ -177,13 +177,15 @@ static ssize_t ssif_bmc_write(struct file *file, const char __user *buf, size_t unsigned long flags; ssize_t ret; - if (count > sizeof(struct ipmi_ssif_msg)) + if (count < sizeof(msg.len) || + count > sizeof(struct ipmi_ssif_msg)) return -EINVAL; if (copy_from_user(&msg, buf, count)) return -EFAULT; - if (!msg.len || count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len) + if (!msg.len || msg.len > IPMI_SSIF_PAYLOAD_MAX || + count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len) return -EINVAL; spin_lock_irqsave(&ssif_bmc->lock, flags); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 3c6670cf90..7c359cc406 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -383,6 +383,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma) return 0; } +#ifdef CONFIG_DEVPORT static ssize_t read_port(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -424,6 +425,7 @@ static ssize_t write_port(struct file *file, const char __user *buf, *ppos = i; return tmp-buf; } +#endif static ssize_t read_null(struct file *file, char __user *buf, size_t count, loff_t *ppos) @@ -544,7 +546,7 @@ static unsigned long get_unmapped_area_zero(struct file *file, } /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */ - return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); + return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags); #else return -ENOSYS; #endif @@ -653,12 +655,14 @@ static const struct file_operations null_fops = { .uring_cmd = uring_cmd_null, }; -static const struct file_operations __maybe_unused port_fops = { +#ifdef CONFIG_DEVPORT +static const struct file_operations port_fops = { .llseek = memory_lseek, .read = read_port, .write = write_port, .open = open_port, }; +#endif static const struct file_operations zero_fops = { .llseek = zero_lseek, diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c index 3c99696b14..f2cff1a6fe 100644 --- a/drivers/char/powernv-op-panel.c +++ b/drivers/char/powernv-op-panel.c @@ -195,12 +195,11 @@ free_oppanel_data: return rc; } -static int oppanel_remove(struct platform_device *pdev) +static void oppanel_remove(struct platform_device *pdev) { misc_deregister(&oppanel_dev); kfree(oppanel_lines); kfree(oppanel_data); - return 0; } static const struct of_device_id oppanel_match[] = { @@ -214,7 +213,7 @@ static struct platform_driver oppanel_driver = { .of_match_table = oppanel_match, }, .probe = oppanel_probe, - .remove = oppanel_remove, + .remove_new = oppanel_remove, }; module_platform_driver(oppanel_driver); diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index 22d249333f..bb5115b173 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c @@ -1408,7 +1408,7 @@ static int sonypi_probe(struct platform_device *dev) return error; } -static int sonypi_remove(struct platform_device *dev) +static void sonypi_remove(struct platform_device *dev) { sonypi_disable(); @@ -1432,8 +1432,6 @@ static int sonypi_remove(struct platform_device *dev) } kfifo_free(&sonypi_device.fifo); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -1470,7 +1468,7 @@ static struct platform_driver sonypi_driver = { .pm = SONYPI_PM, }, .probe = sonypi_probe, - .remove = sonypi_remove, + .remove_new = sonypi_remove, .shutdown = sonypi_shutdown, }; diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 927088b2c3..cf0be8a793 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -27,6 +27,20 @@ menuconfig TCG_TPM if TCG_TPM +config TCG_TPM2_HMAC + bool "Use HMAC and encrypted transactions on the TPM bus" + default X86_64 + select CRYPTO_ECDH + select CRYPTO_LIB_AESCFB + select CRYPTO_LIB_SHA256 + help + Setting this causes us to deploy a scheme which uses request + and response HMACs in addition to encryption for + communicating with the TPM to prevent or detect bus snooping + and interposer attacks (see tpm-security.rst). Saying Y + here adds some encryption overhead to all kernel to TPM + transactions. + config HW_RANDOM_TPM bool "TPM HW Random Number Generator support" depends on TCG_TPM && HW_RANDOM && !(TCG_TPM=y && HW_RANDOM=m) @@ -149,6 +163,7 @@ config TCG_NSC config TCG_ATMEL tristate "Atmel TPM Interface" depends on PPC64 || HAS_IOPORT_MAP + depends on HAS_IOPORT help If you have a TPM security chip from Atmel say Yes and it will be accessible from within Linux. To compile this driver @@ -156,7 +171,7 @@ config TCG_ATMEL config TCG_INFINEON tristate "Infineon Technologies TPM Interface" - depends on PNP + depends on PNP || COMPILE_TEST help If you have a TPM security chip from Infineon Technologies (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 0222b1ddb3..9bb142c752 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -15,6 +15,8 @@ tpm-y += tpm-sysfs.o tpm-y += eventlog/common.o tpm-y += eventlog/tpm1.o tpm-y += eventlog/tpm2.o +tpm-y += tpm-buf.o +tpm-y += tpm2-sessions.o tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o tpm-$(CONFIG_EFI) += eventlog/efi.o diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c index bd757d836c..69533d0bfb 100644 --- a/drivers/char/tpm/eventlog/acpi.c +++ b/drivers/char/tpm/eventlog/acpi.c @@ -142,7 +142,6 @@ int tpm_read_log_acpi(struct tpm_chip *chip) log->bios_event_log_end = log->bios_event_log + len; - ret = -EIO; virt = acpi_os_map_iomem(start, len); if (!virt) { dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__); diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c index 639c3f395a..4c0bbba64e 100644 --- a/drivers/char/tpm/eventlog/common.c +++ b/drivers/char/tpm/eventlog/common.c @@ -47,6 +47,8 @@ static int tpm_bios_measurements_open(struct inode *inode, if (!err) { seq = file->private_data; seq->private = chip; + } else { + put_device(&chip->dev); } return err; diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c new file mode 100644 index 0000000000..cad0048bcc --- /dev/null +++ b/drivers/char/tpm/tpm-buf.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Handling of TPM command and other buffers. + */ + +#include +#include +#include + +/** + * tpm_buf_init() - Allocate and initialize a TPM command + * @buf: A &tpm_buf + * @tag: TPM_TAG_RQU_COMMAND, TPM2_ST_NO_SESSIONS or TPM2_ST_SESSIONS + * @ordinal: A command ordinal + * + * Return: 0 or -ENOMEM + */ +int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal) +{ + buf->data = (u8 *)__get_free_page(GFP_KERNEL); + if (!buf->data) + return -ENOMEM; + + tpm_buf_reset(buf, tag, ordinal); + return 0; +} +EXPORT_SYMBOL_GPL(tpm_buf_init); + +/** + * tpm_buf_reset() - Initialize a TPM command + * @buf: A &tpm_buf + * @tag: TPM_TAG_RQU_COMMAND, TPM2_ST_NO_SESSIONS or TPM2_ST_SESSIONS + * @ordinal: A command ordinal + */ +void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + + WARN_ON(tag != TPM_TAG_RQU_COMMAND && tag != TPM2_ST_NO_SESSIONS && + tag != TPM2_ST_SESSIONS && tag != 0); + + buf->flags = 0; + buf->length = sizeof(*head); + head->tag = cpu_to_be16(tag); + head->length = cpu_to_be32(sizeof(*head)); + head->ordinal = cpu_to_be32(ordinal); + buf->handles = 0; +} +EXPORT_SYMBOL_GPL(tpm_buf_reset); + +/** + * tpm_buf_init_sized() - Allocate and initialize a sized (TPM2B) buffer + * @buf: A @tpm_buf + * + * Return: 0 or -ENOMEM + */ +int tpm_buf_init_sized(struct tpm_buf *buf) +{ + buf->data = (u8 *)__get_free_page(GFP_KERNEL); + if (!buf->data) + return -ENOMEM; + + tpm_buf_reset_sized(buf); + return 0; +} +EXPORT_SYMBOL_GPL(tpm_buf_init_sized); + +/** + * tpm_buf_reset_sized() - Initialize a sized buffer + * @buf: A &tpm_buf + */ +void tpm_buf_reset_sized(struct tpm_buf *buf) +{ + buf->flags = TPM_BUF_TPM2B; + buf->length = 2; + buf->data[0] = 0; + buf->data[1] = 0; +} +EXPORT_SYMBOL_GPL(tpm_buf_reset_sized); + +void tpm_buf_destroy(struct tpm_buf *buf) +{ + free_page((unsigned long)buf->data); +} +EXPORT_SYMBOL_GPL(tpm_buf_destroy); + +/** + * tpm_buf_length() - Return the number of bytes consumed by the data + * @buf: A &tpm_buf + * + * Return: The number of bytes consumed by the buffer + */ +u32 tpm_buf_length(struct tpm_buf *buf) +{ + return buf->length; +} +EXPORT_SYMBOL_GPL(tpm_buf_length); + +/** + * tpm_buf_append() - Append data to an initialized buffer + * @buf: A &tpm_buf + * @new_data: A data blob + * @new_length: Size of the appended data + */ +void tpm_buf_append(struct tpm_buf *buf, const u8 *new_data, u16 new_length) +{ + /* Return silently if overflow has already happened. */ + if (buf->flags & TPM_BUF_OVERFLOW) + return; + + if ((buf->length + new_length) > PAGE_SIZE) { + WARN(1, "tpm_buf: write overflow\n"); + buf->flags |= TPM_BUF_OVERFLOW; + return; + } + + memcpy(&buf->data[buf->length], new_data, new_length); + buf->length += new_length; + + if (buf->flags & TPM_BUF_TPM2B) + ((__be16 *)buf->data)[0] = cpu_to_be16(buf->length - 2); + else + ((struct tpm_header *)buf->data)->length = cpu_to_be32(buf->length); +} +EXPORT_SYMBOL_GPL(tpm_buf_append); + +void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value) +{ + tpm_buf_append(buf, &value, 1); +} +EXPORT_SYMBOL_GPL(tpm_buf_append_u8); + +void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value) +{ + __be16 value2 = cpu_to_be16(value); + + tpm_buf_append(buf, (u8 *)&value2, 2); +} +EXPORT_SYMBOL_GPL(tpm_buf_append_u16); + +void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value) +{ + __be32 value2 = cpu_to_be32(value); + + tpm_buf_append(buf, (u8 *)&value2, 4); +} +EXPORT_SYMBOL_GPL(tpm_buf_append_u32); + +/** + * tpm_buf_read() - Read from a TPM buffer + * @buf: &tpm_buf instance + * @offset: offset within the buffer + * @count: the number of bytes to read + * @output: the output buffer + */ +static void tpm_buf_read(struct tpm_buf *buf, off_t *offset, size_t count, void *output) +{ + off_t next_offset; + + /* Return silently if overflow has already happened. */ + if (buf->flags & TPM_BUF_BOUNDARY_ERROR) + return; + + next_offset = *offset + count; + if (next_offset > buf->length) { + WARN(1, "tpm_buf: read out of boundary\n"); + buf->flags |= TPM_BUF_BOUNDARY_ERROR; + return; + } + + memcpy(output, &buf->data[*offset], count); + *offset = next_offset; +} + +/** + * tpm_buf_read_u8() - Read 8-bit word from a TPM buffer + * @buf: &tpm_buf instance + * @offset: offset within the buffer + * + * Return: next 8-bit word + */ +u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset) +{ + u8 value; + + tpm_buf_read(buf, offset, sizeof(value), &value); + + return value; +} +EXPORT_SYMBOL_GPL(tpm_buf_read_u8); + +/** + * tpm_buf_read_u16() - Read 16-bit word from a TPM buffer + * @buf: &tpm_buf instance + * @offset: offset within the buffer + * + * Return: next 16-bit word + */ +u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset) +{ + u16 value; + + tpm_buf_read(buf, offset, sizeof(value), &value); + + return be16_to_cpu(value); +} +EXPORT_SYMBOL_GPL(tpm_buf_read_u16); + +/** + * tpm_buf_read_u32() - Read 32-bit word from a TPM buffer + * @buf: &tpm_buf instance + * @offset: offset within the buffer + * + * Return: next 32-bit word + */ +u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset) +{ + u32 value; + + tpm_buf_read(buf, offset, sizeof(value), &value); + + return be32_to_cpu(value); +} +EXPORT_SYMBOL_GPL(tpm_buf_read_u32); + + diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 42b1062e33..854546000c 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -158,6 +158,9 @@ int tpm_try_get_ops(struct tpm_chip *chip) { int rc = -EIO; + if (chip->flags & TPM_CHIP_FLAG_DISABLE) + return rc; + get_device(&chip->dev); down_read(&chip->ops_sem); @@ -275,6 +278,9 @@ static void tpm_dev_release(struct device *dev) kfree(chip->work_space.context_buf); kfree(chip->work_space.session_buf); kfree(chip->allocated_banks); +#ifdef CONFIG_TCG_TPM2_HMAC + kfree(chip->auth); +#endif kfree(chip); } diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 757336324c..5da134f12c 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -232,6 +232,7 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf, if (len < min_rsp_body_length + TPM_HEADER_SIZE) return -EFAULT; + buf->length = len; return 0; } EXPORT_SYMBOL_GPL(tpm_transmit_cmd); @@ -342,31 +343,6 @@ out: } EXPORT_SYMBOL_GPL(tpm_pcr_extend); -/** - * tpm_send - send a TPM command - * @chip: a &struct tpm_chip instance, %NULL for the default chip - * @cmd: a TPM command buffer - * @buflen: the length of the TPM command buffer - * - * Return: same as with tpm_transmit_cmd() - */ -int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen) -{ - struct tpm_buf buf; - int rc; - - chip = tpm_find_get_ops(chip); - if (!chip) - return -ENODEV; - - buf.data = cmd; - rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to a send a command"); - - tpm_put_ops(chip); - return rc; -} -EXPORT_SYMBOL_GPL(tpm_send); - int tpm_auto_startup(struct tpm_chip *chip) { int rc; diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index 54c71473aa..94231f052e 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -309,6 +309,21 @@ static ssize_t tpm_version_major_show(struct device *dev, } static DEVICE_ATTR_RO(tpm_version_major); +#ifdef CONFIG_TCG_TPM2_HMAC +static ssize_t null_name_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tpm_chip *chip = to_tpm_chip(dev); + int size = TPM2_NAME_SIZE; + + bin2hex(buf, chip->null_key_name, size); + size *= 2; + buf[size++] = '\n'; + return size; +} +static DEVICE_ATTR_RO(null_name); +#endif + static struct attribute *tpm1_dev_attrs[] = { &dev_attr_pubek.attr, &dev_attr_pcrs.attr, @@ -326,6 +341,9 @@ static struct attribute *tpm1_dev_attrs[] = { static struct attribute *tpm2_dev_attrs[] = { &dev_attr_tpm_version_major.attr, +#ifdef CONFIG_TCG_TPM2_HMAC + &dev_attr_null_name.attr, +#endif NULL }; diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 61445f1dc4..7bb87fa5f7 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -28,7 +28,7 @@ #include #ifdef CONFIG_X86 -#include +#include #endif #define TPM_MINOR 224 /* officially assigned */ @@ -312,9 +312,23 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf, size_t *bufsiz); int tpm_devs_add(struct tpm_chip *chip); void tpm_devs_remove(struct tpm_chip *chip); +int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf, + unsigned int buf_size, unsigned int *offset); +int tpm2_load_context(struct tpm_chip *chip, u8 *buf, + unsigned int *offset, u32 *handle); void tpm_bios_log_setup(struct tpm_chip *chip); void tpm_bios_log_teardown(struct tpm_chip *chip); int tpm_dev_common_init(void); void tpm_dev_common_exit(void); + +#ifdef CONFIG_TCG_TPM2_HMAC +int tpm2_sessions_init(struct tpm_chip *chip); +#else +static inline int tpm2_sessions_init(struct tpm_chip *chip) +{ + return 0; +} +#endif + #endif diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index 93545be190..1e85625921 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -216,13 +216,6 @@ out: return rc; } -struct tpm2_null_auth_area { - __be32 handle; - __be16 nonce_size; - u8 attributes; - __be16 auth_size; -} __packed; - /** * tpm2_pcr_extend() - extend a PCR value * @@ -236,24 +229,22 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, struct tpm_digest *digests) { struct tpm_buf buf; - struct tpm2_null_auth_area auth_area; int rc; int i; - rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND); + rc = tpm2_start_auth_session(chip); if (rc) return rc; - tpm_buf_append_u32(&buf, pcr_idx); + rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND); + if (rc) { + tpm2_end_auth_session(chip); + return rc; + } - auth_area.handle = cpu_to_be32(TPM2_RS_PW); - auth_area.nonce_size = 0; - auth_area.attributes = 0; - auth_area.auth_size = 0; + tpm_buf_append_name(chip, &buf, pcr_idx, NULL); + tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0); - tpm_buf_append_u32(&buf, sizeof(struct tpm2_null_auth_area)); - tpm_buf_append(&buf, (const unsigned char *)&auth_area, - sizeof(auth_area)); tpm_buf_append_u32(&buf, chip->nr_allocated_banks); for (i = 0; i < chip->nr_allocated_banks; i++) { @@ -262,7 +253,9 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, chip->allocated_banks[i].digest_size); } + tpm_buf_fill_hmac_session(chip, &buf); rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value"); + rc = tpm_buf_check_hmac_response(chip, &buf, rc); tpm_buf_destroy(&buf); @@ -288,6 +281,7 @@ struct tpm2_get_random_out { int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max) { struct tpm2_get_random_out *out; + struct tpm_header *head; struct tpm_buf buf; u32 recd; u32 num_bytes = max; @@ -295,29 +289,46 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max) int total = 0; int retries = 5; u8 *dest_ptr = dest; + off_t offset; if (!num_bytes || max > TPM_MAX_RNG_DATA) return -EINVAL; - err = tpm_buf_init(&buf, 0, 0); + err = tpm2_start_auth_session(chip); if (err) return err; + err = tpm_buf_init(&buf, 0, 0); + if (err) { + tpm2_end_auth_session(chip); + return err; + } + do { - tpm_buf_reset(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_RANDOM); + tpm_buf_reset(&buf, TPM2_ST_SESSIONS, TPM2_CC_GET_RANDOM); + tpm_buf_append_hmac_session_opt(chip, &buf, TPM2_SA_ENCRYPT + | TPM2_SA_CONTINUE_SESSION, + NULL, 0); tpm_buf_append_u16(&buf, num_bytes); + tpm_buf_fill_hmac_session(chip, &buf); err = tpm_transmit_cmd(chip, &buf, offsetof(struct tpm2_get_random_out, buffer), "attempting get random"); + err = tpm_buf_check_hmac_response(chip, &buf, err); if (err) { if (err > 0) err = -EIO; goto out; } - out = (struct tpm2_get_random_out *) - &buf.data[TPM_HEADER_SIZE]; + head = (struct tpm_header *)buf.data; + offset = TPM_HEADER_SIZE; + /* Skip the parameter size field: */ + if (be16_to_cpu(head->tag) == TPM2_ST_SESSIONS) + offset += 4; + + out = (struct tpm2_get_random_out *)&buf.data[offset]; recd = min_t(u32, be16_to_cpu(out->size), num_bytes); if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + @@ -334,9 +345,12 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max) } while (retries-- && total < max); tpm_buf_destroy(&buf); + tpm2_end_auth_session(chip); + return total ? total : -EIO; out: tpm_buf_destroy(&buf); + tpm2_end_auth_session(chip); return err; } @@ -759,6 +773,11 @@ int tpm2_auto_startup(struct tpm_chip *chip) rc = 0; } + if (rc) + goto out; + + rc = tpm2_sessions_init(chip); + out: /* * Infineon TPM in field upgrade mode will return no data for the number diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c new file mode 100644 index 0000000000..d3521aadd4 --- /dev/null +++ b/drivers/char/tpm/tpm2-sessions.c @@ -0,0 +1,1365 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2018 James.Bottomley@HansenPartnership.com + * + * Cryptographic helper routines for handling TPM2 sessions for + * authorization HMAC and request response encryption. + * + * The idea is to ensure that every TPM command is HMAC protected by a + * session, meaning in-flight tampering would be detected and in + * addition all sensitive inputs and responses should be encrypted. + * + * The basic way this works is to use a TPM feature called salted + * sessions where a random secret used in session construction is + * encrypted to the public part of a known TPM key. The problem is we + * have no known keys, so initially a primary Elliptic Curve key is + * derived from the NULL seed (we use EC because most TPMs generate + * these keys much faster than RSA ones). The curve used is NIST_P256 + * because that's now mandated to be present in 'TCG TPM v2.0 + * Provisioning Guidance' + * + * Threat problems: the initial TPM2_CreatePrimary is not (and cannot + * be) session protected, so a clever Man in the Middle could return a + * public key they control to this command and from there intercept + * and decode all subsequent session based transactions. The kernel + * cannot mitigate this threat but, after boot, userspace can get + * proof this has not happened by asking the TPM to certify the NULL + * key. This certification would chain back to the TPM Endorsement + * Certificate and prove the NULL seed primary had not been tampered + * with and thus all sessions must have been cryptographically secure. + * To assist with this, the initial NULL seed public key name is made + * available in a sysfs file. + * + * Use of these functions: + * + * The design is all the crypto, hash and hmac gunk is confined in this + * file and never needs to be seen even by the kernel internal user. To + * the user there's an init function tpm2_sessions_init() that needs to + * be called once per TPM which generates the NULL seed primary key. + * + * These are the usage functions: + * + * tpm2_start_auth_session() which allocates the opaque auth structure + * and gets a session from the TPM. This must be called before + * any of the following functions. The session is protected by a + * session_key which is derived from a random salt value + * encrypted to the NULL seed. + * tpm2_end_auth_session() kills the session and frees the resources. + * Under normal operation this function is done by + * tpm_buf_check_hmac_response(), so this is only to be used on + * error legs where the latter is not executed. + * tpm_buf_append_name() to add a handle to the buffer. This must be + * used in place of the usual tpm_buf_append_u32() for adding + * handles because handles have to be processed specially when + * calculating the HMAC. In particular, for NV, volatile and + * permanent objects you now need to provide the name. + * tpm_buf_append_hmac_session() which appends the hmac session to the + * buf in the same way tpm_buf_append_auth does(). + * tpm_buf_fill_hmac_session() This calculates the correct hash and + * places it in the buffer. It must be called after the complete + * command buffer is finalized so it can fill in the correct HMAC + * based on the parameters. + * tpm_buf_check_hmac_response() which checks the session response in + * the buffer and calculates what it should be. If there's a + * mismatch it will log a warning and return an error. If + * tpm_buf_append_hmac_session() did not specify + * TPM_SA_CONTINUE_SESSION then the session will be closed (if it + * hasn't been consumed) and the auth structure freed. + */ + +#include "tpm.h" +#include +#include +#include +#include +#include +#include +#include + +/* maximum number of names the TPM must remember for authorization */ +#define AUTH_MAX_NAMES 3 + +#define AES_KEY_BYTES AES_KEYSIZE_128 +#define AES_KEY_BITS (AES_KEY_BYTES*8) + +/* + * This is the structure that carries all the auth information (like + * session handle, nonces, session key and auth) from use to use it is + * designed to be opaque to anything outside. + */ +struct tpm2_auth { + u32 handle; + /* + * This has two meanings: before tpm_buf_fill_hmac_session() + * it marks the offset in the buffer of the start of the + * sessions (i.e. after all the handles). Once the buffer has + * been filled it markes the session number of our auth + * session so we can find it again in the response buffer. + * + * The two cases are distinguished because the first offset + * must always be greater than TPM_HEADER_SIZE and the second + * must be less than or equal to 5. + */ + u32 session; + /* + * the size here is variable and set by the size of our_nonce + * which must be between 16 and the name hash length. we set + * the maximum sha256 size for the greatest protection + */ + u8 our_nonce[SHA256_DIGEST_SIZE]; + u8 tpm_nonce[SHA256_DIGEST_SIZE]; + /* + * the salt is only used across the session command/response + * after that it can be used as a scratch area + */ + union { + u8 salt[EC_PT_SZ]; + /* scratch for key + IV */ + u8 scratch[AES_KEY_BYTES + AES_BLOCK_SIZE]; + }; + /* + * the session key and passphrase are the same size as the + * name digest (sha256 again). The session key is constant + * for the use of the session and the passphrase can change + * with every invocation. + * + * Note: these fields must be adjacent and in this order + * because several HMAC/KDF schemes use the combination of the + * session_key and passphrase. + */ + u8 session_key[SHA256_DIGEST_SIZE]; + u8 passphrase[SHA256_DIGEST_SIZE]; + int passphrase_len; + struct crypto_aes_ctx aes_ctx; + /* saved session attributes: */ + u8 attrs; + __be32 ordinal; + + /* + * memory for three authorization handles. We know them by + * handle, but they are part of the session by name, which + * we must compute and remember + */ + u32 name_h[AUTH_MAX_NAMES]; + u8 name[AUTH_MAX_NAMES][2 + SHA512_DIGEST_SIZE]; +}; + +#ifdef CONFIG_TCG_TPM2_HMAC +/* + * Name Size based on TPM algorithm (assumes no hash bigger than 255) + */ +static u8 name_size(const u8 *name) +{ + static u8 size_map[] = { + [TPM_ALG_SHA1] = SHA1_DIGEST_SIZE, + [TPM_ALG_SHA256] = SHA256_DIGEST_SIZE, + [TPM_ALG_SHA384] = SHA384_DIGEST_SIZE, + [TPM_ALG_SHA512] = SHA512_DIGEST_SIZE, + }; + u16 alg = get_unaligned_be16(name); + return size_map[alg] + 2; +} + +static int tpm2_parse_read_public(char *name, struct tpm_buf *buf) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + off_t offset = TPM_HEADER_SIZE; + u32 tot_len = be32_to_cpu(head->length); + u32 val; + + /* we're starting after the header so adjust the length */ + tot_len -= TPM_HEADER_SIZE; + + /* skip public */ + val = tpm_buf_read_u16(buf, &offset); + if (val > tot_len) + return -EINVAL; + offset += val; + /* name */ + val = tpm_buf_read_u16(buf, &offset); + if (val != name_size(&buf->data[offset])) + return -EINVAL; + memcpy(name, &buf->data[offset], val); + /* forget the rest */ + return 0; +} + +static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name) +{ + struct tpm_buf buf; + int rc; + + rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC); + if (rc) + return rc; + + tpm_buf_append_u32(&buf, handle); + rc = tpm_transmit_cmd(chip, &buf, 0, "read public"); + if (rc == TPM2_RC_SUCCESS) + rc = tpm2_parse_read_public(name, &buf); + + tpm_buf_destroy(&buf); + + return rc; +} +#endif /* CONFIG_TCG_TPM2_HMAC */ + +/** + * tpm_buf_append_name() - add a handle area to the buffer + * @chip: the TPM chip structure + * @buf: The buffer to be appended + * @handle: The handle to be appended + * @name: The name of the handle (may be NULL) + * + * In order to compute session HMACs, we need to know the names of the + * objects pointed to by the handles. For most objects, this is simply + * the actual 4 byte handle or an empty buf (in these cases @name + * should be NULL) but for volatile objects, permanent objects and NV + * areas, the name is defined as the hash (according to the name + * algorithm which should be set to sha256) of the public area to + * which the two byte algorithm id has been appended. For these + * objects, the @name pointer should point to this. If a name is + * required but @name is NULL, then TPM2_ReadPublic() will be called + * on the handle to obtain the name. + * + * As with most tpm_buf operations, success is assumed because failure + * will be caused by an incorrect programming model and indicated by a + * kernel message. + */ +void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, + u32 handle, u8 *name) +{ +#ifdef CONFIG_TCG_TPM2_HMAC + enum tpm2_mso_type mso = tpm2_handle_mso(handle); + struct tpm2_auth *auth; + int slot; +#endif + + if (!tpm2_chip_auth(chip)) { + tpm_buf_append_u32(buf, handle); + /* count the number of handles in the upper bits of flags */ + buf->handles++; + return; + } + +#ifdef CONFIG_TCG_TPM2_HMAC + slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE) / 4; + if (slot >= AUTH_MAX_NAMES) { + dev_err(&chip->dev, "TPM: too many handles\n"); + return; + } + auth = chip->auth; + WARN(auth->session != tpm_buf_length(buf), + "name added in wrong place\n"); + tpm_buf_append_u32(buf, handle); + auth->session += 4; + + if (mso == TPM2_MSO_PERSISTENT || + mso == TPM2_MSO_VOLATILE || + mso == TPM2_MSO_NVRAM) { + if (!name) + tpm2_read_public(chip, handle, auth->name[slot]); + } else { + if (name) + dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n"); + } + + auth->name_h[slot] = handle; + if (name) + memcpy(auth->name[slot], name, name_size(name)); +#endif +} +EXPORT_SYMBOL_GPL(tpm_buf_append_name); + +/** + * tpm_buf_append_hmac_session() - Append a TPM session element + * @chip: the TPM chip structure + * @buf: The buffer to be appended + * @attributes: The session attributes + * @passphrase: The session authority (NULL if none) + * @passphrase_len: The length of the session authority (0 if none) + * + * This fills in a session structure in the TPM command buffer, except + * for the HMAC which cannot be computed until the command buffer is + * complete. The type of session is controlled by the @attributes, + * the main ones of which are TPM2_SA_CONTINUE_SESSION which means the + * session won't terminate after tpm_buf_check_hmac_response(), + * TPM2_SA_DECRYPT which means this buffers first parameter should be + * encrypted with a session key and TPM2_SA_ENCRYPT, which means the + * response buffer's first parameter needs to be decrypted (confusing, + * but the defines are written from the point of view of the TPM). + * + * Any session appended by this command must be finalized by calling + * tpm_buf_fill_hmac_session() otherwise the HMAC will be incorrect + * and the TPM will reject the command. + * + * As with most tpm_buf operations, success is assumed because failure + * will be caused by an incorrect programming model and indicated by a + * kernel message. + */ +void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, + u8 attributes, u8 *passphrase, + int passphrase_len) +{ +#ifdef CONFIG_TCG_TPM2_HMAC + u8 nonce[SHA256_DIGEST_SIZE]; + struct tpm2_auth *auth; + u32 len; +#endif + + if (!tpm2_chip_auth(chip)) { + /* offset tells us where the sessions area begins */ + int offset = buf->handles * 4 + TPM_HEADER_SIZE; + u32 len = 9 + passphrase_len; + + if (tpm_buf_length(buf) != offset) { + /* not the first session so update the existing length */ + len += get_unaligned_be32(&buf->data[offset]); + put_unaligned_be32(len, &buf->data[offset]); + } else { + tpm_buf_append_u32(buf, len); + } + /* auth handle */ + tpm_buf_append_u32(buf, TPM2_RS_PW); + /* nonce */ + tpm_buf_append_u16(buf, 0); + /* attributes */ + tpm_buf_append_u8(buf, 0); + /* passphrase */ + tpm_buf_append_u16(buf, passphrase_len); + tpm_buf_append(buf, passphrase, passphrase_len); + return; + } + +#ifdef CONFIG_TCG_TPM2_HMAC + /* + * The Architecture Guide requires us to strip trailing zeros + * before computing the HMAC + */ + while (passphrase && passphrase_len > 0 && passphrase[passphrase_len - 1] == '\0') + passphrase_len--; + + auth = chip->auth; + auth->attrs = attributes; + auth->passphrase_len = passphrase_len; + if (passphrase_len) + memcpy(auth->passphrase, passphrase, passphrase_len); + + if (auth->session != tpm_buf_length(buf)) { + /* we're not the first session */ + len = get_unaligned_be32(&buf->data[auth->session]); + if (4 + len + auth->session != tpm_buf_length(buf)) { + WARN(1, "session length mismatch, cannot append"); + return; + } + + /* add our new session */ + len += 9 + 2 * SHA256_DIGEST_SIZE; + put_unaligned_be32(len, &buf->data[auth->session]); + } else { + tpm_buf_append_u32(buf, 9 + 2 * SHA256_DIGEST_SIZE); + } + + /* random number for our nonce */ + get_random_bytes(nonce, sizeof(nonce)); + memcpy(auth->our_nonce, nonce, sizeof(nonce)); + tpm_buf_append_u32(buf, auth->handle); + /* our new nonce */ + tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE); + tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE); + tpm_buf_append_u8(buf, auth->attrs); + /* and put a placeholder for the hmac */ + tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE); + tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE); +#endif +} +EXPORT_SYMBOL_GPL(tpm_buf_append_hmac_session); + +#ifdef CONFIG_TCG_TPM2_HMAC + +static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy, + u32 *handle, u8 *name); + +/* + * It turns out the crypto hmac(sha256) is hard for us to consume + * because it assumes a fixed key and the TPM seems to change the key + * on every operation, so we weld the hmac init and final functions in + * here to give it the same usage characteristics as a regular hash + */ +static void tpm2_hmac_init(struct sha256_state *sctx, u8 *key, u32 key_len) +{ + u8 pad[SHA256_BLOCK_SIZE]; + int i; + + sha256_init(sctx); + for (i = 0; i < sizeof(pad); i++) { + if (i < key_len) + pad[i] = key[i]; + else + pad[i] = 0; + pad[i] ^= HMAC_IPAD_VALUE; + } + sha256_update(sctx, pad, sizeof(pad)); +} + +static void tpm2_hmac_final(struct sha256_state *sctx, u8 *key, u32 key_len, + u8 *out) +{ + u8 pad[SHA256_BLOCK_SIZE]; + int i; + + for (i = 0; i < sizeof(pad); i++) { + if (i < key_len) + pad[i] = key[i]; + else + pad[i] = 0; + pad[i] ^= HMAC_OPAD_VALUE; + } + + /* collect the final hash; use out as temporary storage */ + sha256_final(sctx, out); + + sha256_init(sctx); + sha256_update(sctx, pad, sizeof(pad)); + sha256_update(sctx, out, SHA256_DIGEST_SIZE); + sha256_final(sctx, out); +} + +/* + * assume hash sha256 and nonces u, v of size SHA256_DIGEST_SIZE but + * otherwise standard tpm2_KDFa. Note output is in bytes not bits. + */ +static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u, + u8 *v, u32 bytes, u8 *out) +{ + u32 counter = 1; + const __be32 bits = cpu_to_be32(bytes * 8); + + while (bytes > 0) { + struct sha256_state sctx; + __be32 c = cpu_to_be32(counter); + + tpm2_hmac_init(&sctx, key, key_len); + sha256_update(&sctx, (u8 *)&c, sizeof(c)); + sha256_update(&sctx, label, strlen(label)+1); + sha256_update(&sctx, u, SHA256_DIGEST_SIZE); + sha256_update(&sctx, v, SHA256_DIGEST_SIZE); + sha256_update(&sctx, (u8 *)&bits, sizeof(bits)); + tpm2_hmac_final(&sctx, key, key_len, out); + + bytes -= SHA256_DIGEST_SIZE; + counter++; + out += SHA256_DIGEST_SIZE; + } +} + +/* + * Somewhat of a bastardization of the real KDFe. We're assuming + * we're working with known point sizes for the input parameters and + * the hash algorithm is fixed at sha256. Because we know that the + * point size is 32 bytes like the hash size, there's no need to loop + * in this KDF. + */ +static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v, + u8 *out) +{ + struct sha256_state sctx; + /* + * this should be an iterative counter, but because we know + * we're only taking 32 bytes for the point using a sha256 + * hash which is also 32 bytes, there's only one loop + */ + __be32 c = cpu_to_be32(1); + + sha256_init(&sctx); + /* counter (BE) */ + sha256_update(&sctx, (u8 *)&c, sizeof(c)); + /* secret value */ + sha256_update(&sctx, z, EC_PT_SZ); + /* string including trailing zero */ + sha256_update(&sctx, str, strlen(str)+1); + sha256_update(&sctx, pt_u, EC_PT_SZ); + sha256_update(&sctx, pt_v, EC_PT_SZ); + sha256_final(&sctx, out); +} + +static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip) +{ + struct crypto_kpp *kpp; + struct kpp_request *req; + struct scatterlist s[2], d[1]; + struct ecdh p = {0}; + u8 encoded_key[EC_PT_SZ], *x, *y; + unsigned int buf_len; + + /* secret is two sized points */ + tpm_buf_append_u16(buf, (EC_PT_SZ + 2)*2); + /* + * we cheat here and append uninitialized data to form + * the points. All we care about is getting the two + * co-ordinate pointers, which will be used to overwrite + * the uninitialized data + */ + tpm_buf_append_u16(buf, EC_PT_SZ); + x = &buf->data[tpm_buf_length(buf)]; + tpm_buf_append(buf, encoded_key, EC_PT_SZ); + tpm_buf_append_u16(buf, EC_PT_SZ); + y = &buf->data[tpm_buf_length(buf)]; + tpm_buf_append(buf, encoded_key, EC_PT_SZ); + sg_init_table(s, 2); + sg_set_buf(&s[0], x, EC_PT_SZ); + sg_set_buf(&s[1], y, EC_PT_SZ); + + kpp = crypto_alloc_kpp("ecdh-nist-p256", CRYPTO_ALG_INTERNAL, 0); + if (IS_ERR(kpp)) { + dev_err(&chip->dev, "crypto ecdh allocation failed\n"); + return; + } + + buf_len = crypto_ecdh_key_len(&p); + if (sizeof(encoded_key) < buf_len) { + dev_err(&chip->dev, "salt buffer too small needs %d\n", + buf_len); + goto out; + } + crypto_ecdh_encode_key(encoded_key, buf_len, &p); + /* this generates a random private key */ + crypto_kpp_set_secret(kpp, encoded_key, buf_len); + + /* salt is now the public point of this private key */ + req = kpp_request_alloc(kpp, GFP_KERNEL); + if (!req) + goto out; + kpp_request_set_input(req, NULL, 0); + kpp_request_set_output(req, s, EC_PT_SZ*2); + crypto_kpp_generate_public_key(req); + /* + * we're not done: now we have to compute the shared secret + * which is our private key multiplied by the tpm_key public + * point, we actually only take the x point and discard the y + * point and feed it through KDFe to get the final secret salt + */ + sg_set_buf(&s[0], chip->null_ec_key_x, EC_PT_SZ); + sg_set_buf(&s[1], chip->null_ec_key_y, EC_PT_SZ); + kpp_request_set_input(req, s, EC_PT_SZ*2); + sg_init_one(d, chip->auth->salt, EC_PT_SZ); + kpp_request_set_output(req, d, EC_PT_SZ); + crypto_kpp_compute_shared_secret(req); + kpp_request_free(req); + + /* + * pass the shared secret through KDFe for salt. Note salt + * area is used both for input shared secret and output salt. + * This works because KDFe fully consumes the secret before it + * writes the salt + */ + tpm2_KDFe(chip->auth->salt, "SECRET", x, chip->null_ec_key_x, + chip->auth->salt); + + out: + crypto_free_kpp(kpp); +} + +/** + * tpm_buf_fill_hmac_session() - finalize the session HMAC + * @chip: the TPM chip structure + * @buf: The buffer to be appended + * + * This command must not be called until all of the parameters have + * been appended to @buf otherwise the computed HMAC will be + * incorrect. + * + * This function computes and fills in the session HMAC using the + * session key and, if TPM2_SA_DECRYPT was specified, computes the + * encryption key and encrypts the first parameter of the command + * buffer with it. + * + * As with most tpm_buf operations, success is assumed because failure + * will be caused by an incorrect programming model and indicated by a + * kernel message. + */ +void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf) +{ + u32 cc, handles, val; + struct tpm2_auth *auth = chip->auth; + int i; + struct tpm_header *head = (struct tpm_header *)buf->data; + off_t offset_s = TPM_HEADER_SIZE, offset_p; + u8 *hmac = NULL; + u32 attrs; + u8 cphash[SHA256_DIGEST_SIZE]; + struct sha256_state sctx; + + if (!auth) + return; + + /* save the command code in BE format */ + auth->ordinal = head->ordinal; + + cc = be32_to_cpu(head->ordinal); + + i = tpm2_find_cc(chip, cc); + if (i < 0) { + dev_err(&chip->dev, "Command 0x%x not found in TPM\n", cc); + return; + } + attrs = chip->cc_attrs_tbl[i]; + + handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0); + + /* + * just check the names, it's easy to make mistakes. This + * would happen if someone added a handle via + * tpm_buf_append_u32() instead of tpm_buf_append_name() + */ + for (i = 0; i < handles; i++) { + u32 handle = tpm_buf_read_u32(buf, &offset_s); + + if (auth->name_h[i] != handle) { + dev_err(&chip->dev, "TPM: handle %d wrong for name\n", + i); + return; + } + } + /* point offset_s to the start of the sessions */ + val = tpm_buf_read_u32(buf, &offset_s); + /* point offset_p to the start of the parameters */ + offset_p = offset_s + val; + for (i = 1; offset_s < offset_p; i++) { + u32 handle = tpm_buf_read_u32(buf, &offset_s); + u16 len; + u8 a; + + /* nonce (already in auth) */ + len = tpm_buf_read_u16(buf, &offset_s); + offset_s += len; + + a = tpm_buf_read_u8(buf, &offset_s); + + len = tpm_buf_read_u16(buf, &offset_s); + if (handle == auth->handle && auth->attrs == a) { + hmac = &buf->data[offset_s]; + /* + * save our session number so we know which + * session in the response belongs to us + */ + auth->session = i; + } + + offset_s += len; + } + if (offset_s != offset_p) { + dev_err(&chip->dev, "TPM session length is incorrect\n"); + return; + } + if (!hmac) { + dev_err(&chip->dev, "TPM could not find HMAC session\n"); + return; + } + + /* encrypt before HMAC */ + if (auth->attrs & TPM2_SA_DECRYPT) { + u16 len; + + /* need key and IV */ + tpm2_KDFa(auth->session_key, SHA256_DIGEST_SIZE + + auth->passphrase_len, "CFB", auth->our_nonce, + auth->tpm_nonce, AES_KEY_BYTES + AES_BLOCK_SIZE, + auth->scratch); + + len = tpm_buf_read_u16(buf, &offset_p); + aes_expandkey(&auth->aes_ctx, auth->scratch, AES_KEY_BYTES); + aescfb_encrypt(&auth->aes_ctx, &buf->data[offset_p], + &buf->data[offset_p], len, + auth->scratch + AES_KEY_BYTES); + /* reset p to beginning of parameters for HMAC */ + offset_p -= 2; + } + + sha256_init(&sctx); + /* ordinal is already BE */ + sha256_update(&sctx, (u8 *)&head->ordinal, sizeof(head->ordinal)); + /* add the handle names */ + for (i = 0; i < handles; i++) { + enum tpm2_mso_type mso = tpm2_handle_mso(auth->name_h[i]); + + if (mso == TPM2_MSO_PERSISTENT || + mso == TPM2_MSO_VOLATILE || + mso == TPM2_MSO_NVRAM) { + sha256_update(&sctx, auth->name[i], + name_size(auth->name[i])); + } else { + __be32 h = cpu_to_be32(auth->name_h[i]); + + sha256_update(&sctx, (u8 *)&h, 4); + } + } + if (offset_s != tpm_buf_length(buf)) + sha256_update(&sctx, &buf->data[offset_s], + tpm_buf_length(buf) - offset_s); + sha256_final(&sctx, cphash); + + /* now calculate the hmac */ + tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key) + + auth->passphrase_len); + sha256_update(&sctx, cphash, sizeof(cphash)); + sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce)); + sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce)); + sha256_update(&sctx, &auth->attrs, 1); + tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key) + + auth->passphrase_len, hmac); +} +EXPORT_SYMBOL(tpm_buf_fill_hmac_session); + +/** + * tpm_buf_check_hmac_response() - check the TPM return HMAC for correctness + * @chip: the TPM chip structure + * @buf: the original command buffer (which now contains the response) + * @rc: the return code from tpm_transmit_cmd + * + * If @rc is non zero, @buf may not contain an actual return, so @rc + * is passed through as the return and the session cleaned up and + * de-allocated if required (this is required if + * TPM2_SA_CONTINUE_SESSION was not specified as a session flag). + * + * If @rc is zero, the response HMAC is computed against the returned + * @buf and matched to the TPM one in the session area. If there is a + * mismatch, an error is logged and -EINVAL returned. + * + * The reason for this is that the command issue and HMAC check + * sequence should look like: + * + * rc = tpm_transmit_cmd(...); + * rc = tpm_buf_check_hmac_response(&buf, auth, rc); + * if (rc) + * ... + * + * Which is easily layered into the current contrl flow. + * + * Returns: 0 on success or an error. + */ +int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, + int rc) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + struct tpm2_auth *auth = chip->auth; + off_t offset_s, offset_p; + u8 rphash[SHA256_DIGEST_SIZE]; + u32 attrs, cc; + struct sha256_state sctx; + u16 tag = be16_to_cpu(head->tag); + int parm_len, len, i, handles; + + if (!auth) + return rc; + + cc = be32_to_cpu(auth->ordinal); + + if (auth->session >= TPM_HEADER_SIZE) { + WARN(1, "tpm session not filled correctly\n"); + goto out; + } + + if (rc != 0) + /* pass non success rc through and close the session */ + goto out; + + rc = -EINVAL; + if (tag != TPM2_ST_SESSIONS) { + dev_err(&chip->dev, "TPM: HMAC response check has no sessions tag\n"); + goto out; + } + + i = tpm2_find_cc(chip, cc); + if (i < 0) + goto out; + attrs = chip->cc_attrs_tbl[i]; + handles = (attrs >> TPM2_CC_ATTR_RHANDLE) & 1; + + /* point to area beyond handles */ + offset_s = TPM_HEADER_SIZE + handles * 4; + parm_len = tpm_buf_read_u32(buf, &offset_s); + offset_p = offset_s; + offset_s += parm_len; + /* skip over any sessions before ours */ + for (i = 0; i < auth->session - 1; i++) { + len = tpm_buf_read_u16(buf, &offset_s); + offset_s += len + 1; + len = tpm_buf_read_u16(buf, &offset_s); + offset_s += len; + } + /* TPM nonce */ + len = tpm_buf_read_u16(buf, &offset_s); + if (offset_s + len > tpm_buf_length(buf)) + goto out; + if (len != SHA256_DIGEST_SIZE) + goto out; + memcpy(auth->tpm_nonce, &buf->data[offset_s], len); + offset_s += len; + attrs = tpm_buf_read_u8(buf, &offset_s); + len = tpm_buf_read_u16(buf, &offset_s); + if (offset_s + len != tpm_buf_length(buf)) + goto out; + if (len != SHA256_DIGEST_SIZE) + goto out; + /* + * offset_s points to the HMAC. now calculate comparison, beginning + * with rphash + */ + sha256_init(&sctx); + /* yes, I know this is now zero, but it's what the standard says */ + sha256_update(&sctx, (u8 *)&head->return_code, + sizeof(head->return_code)); + /* ordinal is already BE */ + sha256_update(&sctx, (u8 *)&auth->ordinal, sizeof(auth->ordinal)); + sha256_update(&sctx, &buf->data[offset_p], parm_len); + sha256_final(&sctx, rphash); + + /* now calculate the hmac */ + tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key) + + auth->passphrase_len); + sha256_update(&sctx, rphash, sizeof(rphash)); + sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce)); + sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce)); + sha256_update(&sctx, &auth->attrs, 1); + /* we're done with the rphash, so put our idea of the hmac there */ + tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key) + + auth->passphrase_len, rphash); + if (memcmp(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE) == 0) { + rc = 0; + } else { + dev_err(&chip->dev, "TPM: HMAC check failed\n"); + goto out; + } + + /* now do response decryption */ + if (auth->attrs & TPM2_SA_ENCRYPT) { + /* need key and IV */ + tpm2_KDFa(auth->session_key, SHA256_DIGEST_SIZE + + auth->passphrase_len, "CFB", auth->tpm_nonce, + auth->our_nonce, AES_KEY_BYTES + AES_BLOCK_SIZE, + auth->scratch); + + len = tpm_buf_read_u16(buf, &offset_p); + aes_expandkey(&auth->aes_ctx, auth->scratch, AES_KEY_BYTES); + aescfb_decrypt(&auth->aes_ctx, &buf->data[offset_p], + &buf->data[offset_p], len, + auth->scratch + AES_KEY_BYTES); + } + + out: + if ((auth->attrs & TPM2_SA_CONTINUE_SESSION) == 0) { + if (rc) + /* manually close the session if it wasn't consumed */ + tpm2_flush_context(chip, auth->handle); + memzero_explicit(auth, sizeof(*auth)); + } else { + /* reset for next use */ + auth->session = TPM_HEADER_SIZE; + } + + return rc; +} +EXPORT_SYMBOL(tpm_buf_check_hmac_response); + +/** + * tpm2_end_auth_session() - kill the allocated auth session + * @chip: the TPM chip structure + * + * ends the session started by tpm2_start_auth_session and frees all + * the resources. Under normal conditions, + * tpm_buf_check_hmac_response() will correctly end the session if + * required, so this function is only for use in error legs that will + * bypass the normal invocation of tpm_buf_check_hmac_response(). + */ +void tpm2_end_auth_session(struct tpm_chip *chip) +{ + struct tpm2_auth *auth = chip->auth; + + if (!auth) + return; + + tpm2_flush_context(chip, auth->handle); + memzero_explicit(auth, sizeof(*auth)); +} +EXPORT_SYMBOL(tpm2_end_auth_session); + +static int tpm2_parse_start_auth_session(struct tpm2_auth *auth, + struct tpm_buf *buf) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + u32 tot_len = be32_to_cpu(head->length); + off_t offset = TPM_HEADER_SIZE; + u32 val; + + /* we're starting after the header so adjust the length */ + tot_len -= TPM_HEADER_SIZE; + + /* should have handle plus nonce */ + if (tot_len != 4 + 2 + sizeof(auth->tpm_nonce)) + return -EINVAL; + + auth->handle = tpm_buf_read_u32(buf, &offset); + val = tpm_buf_read_u16(buf, &offset); + if (val != sizeof(auth->tpm_nonce)) + return -EINVAL; + memcpy(auth->tpm_nonce, &buf->data[offset], sizeof(auth->tpm_nonce)); + /* now compute the session key from the nonces */ + tpm2_KDFa(auth->salt, sizeof(auth->salt), "ATH", auth->tpm_nonce, + auth->our_nonce, sizeof(auth->session_key), + auth->session_key); + + return 0; +} + +static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key) +{ + int rc; + unsigned int offset = 0; /* dummy offset for null seed context */ + u8 name[SHA256_DIGEST_SIZE + 2]; + + rc = tpm2_load_context(chip, chip->null_key_context, &offset, + null_key); + if (rc != -EINVAL) + return rc; + + /* an integrity failure may mean the TPM has been reset */ + dev_err(&chip->dev, "NULL key integrity failure!\n"); + /* check the null name against what we know */ + tpm2_create_primary(chip, TPM2_RH_NULL, NULL, name); + if (memcmp(name, chip->null_key_name, sizeof(name)) == 0) + /* name unchanged, assume transient integrity failure */ + return rc; + /* + * Fatal TPM failure: the NULL seed has actually changed, so + * the TPM must have been illegally reset. All in-kernel TPM + * operations will fail because the NULL primary can't be + * loaded to salt the sessions, but disable the TPM anyway so + * userspace programmes can't be compromised by it. + */ + dev_err(&chip->dev, "NULL name has changed, disabling TPM due to interference\n"); + chip->flags |= TPM_CHIP_FLAG_DISABLE; + + return rc; +} + +/** + * tpm2_start_auth_session() - create a HMAC authentication session with the TPM + * @chip: the TPM chip structure to create the session with + * + * This function loads the NULL seed from its saved context and starts + * an authentication session on the null seed, fills in the + * @chip->auth structure to contain all the session details necessary + * for performing the HMAC, encrypt and decrypt operations and + * returns. The NULL seed is flushed before this function returns. + * + * Return: zero on success or actual error encountered. + */ +int tpm2_start_auth_session(struct tpm_chip *chip) +{ + struct tpm_buf buf; + struct tpm2_auth *auth = chip->auth; + int rc; + u32 null_key; + + if (!auth) { + dev_warn_once(&chip->dev, "auth session is not active\n"); + return 0; + } + + rc = tpm2_load_null(chip, &null_key); + if (rc) + goto out; + + auth->session = TPM_HEADER_SIZE; + + rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_START_AUTH_SESS); + if (rc) + goto out; + + /* salt key handle */ + tpm_buf_append_u32(&buf, null_key); + /* bind key handle */ + tpm_buf_append_u32(&buf, TPM2_RH_NULL); + /* nonce caller */ + get_random_bytes(auth->our_nonce, sizeof(auth->our_nonce)); + tpm_buf_append_u16(&buf, sizeof(auth->our_nonce)); + tpm_buf_append(&buf, auth->our_nonce, sizeof(auth->our_nonce)); + + /* append encrypted salt and squirrel away unencrypted in auth */ + tpm_buf_append_salt(&buf, chip); + /* session type (HMAC, audit or policy) */ + tpm_buf_append_u8(&buf, TPM2_SE_HMAC); + + /* symmetric encryption parameters */ + /* symmetric algorithm */ + tpm_buf_append_u16(&buf, TPM_ALG_AES); + /* bits for symmetric algorithm */ + tpm_buf_append_u16(&buf, AES_KEY_BITS); + /* symmetric algorithm mode (must be CFB) */ + tpm_buf_append_u16(&buf, TPM_ALG_CFB); + /* hash algorithm for session */ + tpm_buf_append_u16(&buf, TPM_ALG_SHA256); + + rc = tpm_transmit_cmd(chip, &buf, 0, "start auth session"); + tpm2_flush_context(chip, null_key); + + if (rc == TPM2_RC_SUCCESS) + rc = tpm2_parse_start_auth_session(auth, &buf); + + tpm_buf_destroy(&buf); + + if (rc) + goto out; + + out: + return rc; +} +EXPORT_SYMBOL(tpm2_start_auth_session); + +/* + * A mask containing the object attributes for the kernel held null primary key + * used in HMAC encryption. For more information on specific attributes look up + * to "8.3 TPMA_OBJECT (Object Attributes)". + */ +#define TPM2_OA_NULL_KEY ( \ + TPM2_OA_NO_DA | \ + TPM2_OA_FIXED_TPM | \ + TPM2_OA_FIXED_PARENT | \ + TPM2_OA_SENSITIVE_DATA_ORIGIN | \ + TPM2_OA_USER_WITH_AUTH | \ + TPM2_OA_DECRYPT | \ + TPM2_OA_RESTRICTED) + +/** + * tpm2_parse_create_primary() - parse the data returned from TPM_CC_CREATE_PRIMARY + * + * @chip: The TPM the primary was created under + * @buf: The response buffer from the chip + * @handle: pointer to be filled in with the return handle of the primary + * @hierarchy: The hierarchy the primary was created for + * @name: pointer to be filled in with the primary key name + * + * Return: + * * 0 - OK + * * -errno - A system error + * * TPM_RC - A TPM error + */ +static int tpm2_parse_create_primary(struct tpm_chip *chip, struct tpm_buf *buf, + u32 *handle, u32 hierarchy, u8 *name) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + off_t offset_r = TPM_HEADER_SIZE, offset_t; + u16 len = TPM_HEADER_SIZE; + u32 total_len = be32_to_cpu(head->length); + u32 val, param_len, keyhandle; + + keyhandle = tpm_buf_read_u32(buf, &offset_r); + if (handle) + *handle = keyhandle; + else + tpm2_flush_context(chip, keyhandle); + + param_len = tpm_buf_read_u32(buf, &offset_r); + /* + * param_len doesn't include the header, but all the other + * lengths and offsets do, so add it to parm len to make + * the comparisons easier + */ + param_len += TPM_HEADER_SIZE; + + if (param_len + 8 > total_len) + return -EINVAL; + len = tpm_buf_read_u16(buf, &offset_r); + offset_t = offset_r; + if (name) { + /* + * now we have the public area, compute the name of + * the object + */ + put_unaligned_be16(TPM_ALG_SHA256, name); + sha256(&buf->data[offset_r], len, name + 2); + } + + /* validate the public key */ + val = tpm_buf_read_u16(buf, &offset_t); + + /* key type (must be what we asked for) */ + if (val != TPM_ALG_ECC) + return -EINVAL; + val = tpm_buf_read_u16(buf, &offset_t); + + /* name algorithm */ + if (val != TPM_ALG_SHA256) + return -EINVAL; + val = tpm_buf_read_u32(buf, &offset_t); + + /* object properties */ + if (val != TPM2_OA_NULL_KEY) + return -EINVAL; + + /* auth policy (empty) */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != 0) + return -EINVAL; + + /* symmetric key parameters */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != TPM_ALG_AES) + return -EINVAL; + + /* symmetric key length */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != AES_KEY_BITS) + return -EINVAL; + + /* symmetric encryption scheme */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != TPM_ALG_CFB) + return -EINVAL; + + /* signing scheme */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != TPM_ALG_NULL) + return -EINVAL; + + /* ECC Curve */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != TPM2_ECC_NIST_P256) + return -EINVAL; + + /* KDF Scheme */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != TPM_ALG_NULL) + return -EINVAL; + + /* extract public key (x and y points) */ + val = tpm_buf_read_u16(buf, &offset_t); + if (val != EC_PT_SZ) + return -EINVAL; + memcpy(chip->null_ec_key_x, &buf->data[offset_t], val); + offset_t += val; + val = tpm_buf_read_u16(buf, &offset_t); + if (val != EC_PT_SZ) + return -EINVAL; + memcpy(chip->null_ec_key_y, &buf->data[offset_t], val); + offset_t += val; + + /* original length of the whole TPM2B */ + offset_r += len; + + /* should have exactly consumed the TPM2B public structure */ + if (offset_t != offset_r) + return -EINVAL; + if (offset_r > param_len) + return -EINVAL; + + /* creation data (skip) */ + len = tpm_buf_read_u16(buf, &offset_r); + offset_r += len; + if (offset_r > param_len) + return -EINVAL; + + /* creation digest (must be sha256) */ + len = tpm_buf_read_u16(buf, &offset_r); + offset_r += len; + if (len != SHA256_DIGEST_SIZE || offset_r > param_len) + return -EINVAL; + + /* TPMT_TK_CREATION follows */ + /* tag, must be TPM_ST_CREATION (0x8021) */ + val = tpm_buf_read_u16(buf, &offset_r); + if (val != TPM2_ST_CREATION || offset_r > param_len) + return -EINVAL; + + /* hierarchy */ + val = tpm_buf_read_u32(buf, &offset_r); + if (val != hierarchy || offset_r > param_len) + return -EINVAL; + + /* the ticket digest HMAC (might not be sha256) */ + len = tpm_buf_read_u16(buf, &offset_r); + offset_r += len; + if (offset_r > param_len) + return -EINVAL; + + /* + * finally we have the name, which is a sha256 digest plus a 2 + * byte algorithm type + */ + len = tpm_buf_read_u16(buf, &offset_r); + if (offset_r + len != param_len + 8) + return -EINVAL; + if (len != SHA256_DIGEST_SIZE + 2) + return -EINVAL; + + if (memcmp(chip->null_key_name, &buf->data[offset_r], + SHA256_DIGEST_SIZE + 2) != 0) { + dev_err(&chip->dev, "NULL Seed name comparison failed\n"); + return -EINVAL; + } + + return 0; +} + +/** + * tpm2_create_primary() - create a primary key using a fixed P-256 template + * + * @chip: the TPM chip to create under + * @hierarchy: The hierarchy handle to create under + * @handle: The returned volatile handle on success + * @name: The name of the returned key + * + * For platforms that might not have a persistent primary, this can be + * used to create one quickly on the fly (it uses Elliptic Curve not + * RSA, so even slow TPMs can create one fast). The template uses the + * TCG mandated H one for non-endorsement ECC primaries, i.e. P-256 + * elliptic curve (the only current one all TPM2s are required to + * have) a sha256 name hash and no policy. + * + * Return: + * * 0 - OK + * * -errno - A system error + * * TPM_RC - A TPM error + */ +static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy, + u32 *handle, u8 *name) +{ + int rc; + struct tpm_buf buf; + struct tpm_buf template; + + rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE_PRIMARY); + if (rc) + return rc; + + rc = tpm_buf_init_sized(&template); + if (rc) { + tpm_buf_destroy(&buf); + return rc; + } + + /* + * create the template. Note: in order for userspace to + * verify the security of the system, it will have to create + * and certify this NULL primary, meaning all the template + * parameters will have to be identical, so conform exactly to + * the TCG TPM v2.0 Provisioning Guidance for the SRK ECC + * key H template (H has zero size unique points) + */ + + /* key type */ + tpm_buf_append_u16(&template, TPM_ALG_ECC); + + /* name algorithm */ + tpm_buf_append_u16(&template, TPM_ALG_SHA256); + + /* object properties */ + tpm_buf_append_u32(&template, TPM2_OA_NULL_KEY); + + /* sauth policy (empty) */ + tpm_buf_append_u16(&template, 0); + + /* BEGIN parameters: key specific; for ECC*/ + + /* symmetric algorithm */ + tpm_buf_append_u16(&template, TPM_ALG_AES); + + /* bits for symmetric algorithm */ + tpm_buf_append_u16(&template, AES_KEY_BITS); + + /* algorithm mode (must be CFB) */ + tpm_buf_append_u16(&template, TPM_ALG_CFB); + + /* scheme (NULL means any scheme) */ + tpm_buf_append_u16(&template, TPM_ALG_NULL); + + /* ECC Curve ID */ + tpm_buf_append_u16(&template, TPM2_ECC_NIST_P256); + + /* KDF Scheme */ + tpm_buf_append_u16(&template, TPM_ALG_NULL); + + /* unique: key specific; for ECC it is two zero size points */ + tpm_buf_append_u16(&template, 0); + tpm_buf_append_u16(&template, 0); + + /* END parameters */ + + /* primary handle */ + tpm_buf_append_u32(&buf, hierarchy); + tpm_buf_append_empty_auth(&buf, TPM2_RS_PW); + + /* sensitive create size is 4 for two empty buffers */ + tpm_buf_append_u16(&buf, 4); + + /* sensitive create auth data (empty) */ + tpm_buf_append_u16(&buf, 0); + + /* sensitive create sensitive data (empty) */ + tpm_buf_append_u16(&buf, 0); + + /* the public template */ + tpm_buf_append(&buf, template.data, template.length); + tpm_buf_destroy(&template); + + /* outside info (empty) */ + tpm_buf_append_u16(&buf, 0); + + /* creation PCR (none) */ + tpm_buf_append_u32(&buf, 0); + + rc = tpm_transmit_cmd(chip, &buf, 0, + "attempting to create NULL primary"); + + if (rc == TPM2_RC_SUCCESS) + rc = tpm2_parse_create_primary(chip, &buf, handle, hierarchy, + name); + + tpm_buf_destroy(&buf); + + return rc; +} + +static int tpm2_create_null_primary(struct tpm_chip *chip) +{ + u32 null_key; + int rc; + + rc = tpm2_create_primary(chip, TPM2_RH_NULL, &null_key, + chip->null_key_name); + + if (rc == TPM2_RC_SUCCESS) { + unsigned int offset = 0; /* dummy offset for null key context */ + + rc = tpm2_save_context(chip, null_key, chip->null_key_context, + sizeof(chip->null_key_context), &offset); + tpm2_flush_context(chip, null_key); + } + + return rc; +} + +/** + * tpm2_sessions_init() - start of day initialization for the sessions code + * @chip: TPM chip + * + * Derive and context save the null primary and allocate memory in the + * struct tpm_chip for the authorizations. + */ +int tpm2_sessions_init(struct tpm_chip *chip) +{ + int rc; + + rc = tpm2_create_null_primary(chip); + if (rc) + dev_err(&chip->dev, "TPM: security failed (NULL seed derivation): %d\n", rc); + + chip->auth = kmalloc(sizeof(*chip->auth), GFP_KERNEL); + if (!chip->auth) + return -ENOMEM; + + return rc; +} +#endif /* CONFIG_TCG_TPM2_HMAC */ diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index 363afdd4d1..4892d491da 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -68,8 +68,8 @@ void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space) kfree(space->session_buf); } -static int tpm2_load_context(struct tpm_chip *chip, u8 *buf, - unsigned int *offset, u32 *handle) +int tpm2_load_context(struct tpm_chip *chip, u8 *buf, + unsigned int *offset, u32 *handle) { struct tpm_buf tbuf; struct tpm2_context *ctx; @@ -105,6 +105,9 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf, *handle = 0; tpm_buf_destroy(&tbuf); return -ENOENT; + } else if (tpm2_rc_value(rc) == TPM2_RC_INTEGRITY) { + tpm_buf_destroy(&tbuf); + return -EINVAL; } else if (rc > 0) { dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n", __func__, rc); @@ -119,8 +122,8 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf, return 0; } -static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf, - unsigned int buf_size, unsigned int *offset) +int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf, + unsigned int buf_size, unsigned int *offset) { struct tpm_buf tbuf; unsigned int body_size; diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index 9c924a1440..2d2ae37153 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -51,34 +51,40 @@ static struct tpm_inf_dev tpm_dev; static inline void tpm_data_out(unsigned char data, unsigned char offset) { +#ifdef CONFIG_HAS_IOPORT if (tpm_dev.iotype == TPM_INF_IO_PORT) outb(data, tpm_dev.data_regs + offset); else +#endif writeb(data, tpm_dev.mem_base + tpm_dev.data_regs + offset); } static inline unsigned char tpm_data_in(unsigned char offset) { +#ifdef CONFIG_HAS_IOPORT if (tpm_dev.iotype == TPM_INF_IO_PORT) return inb(tpm_dev.data_regs + offset); - else - return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset); +#endif + return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset); } static inline void tpm_config_out(unsigned char data, unsigned char offset) { +#ifdef CONFIG_HAS_IOPORT if (tpm_dev.iotype == TPM_INF_IO_PORT) outb(data, tpm_dev.config_port + offset); else +#endif writeb(data, tpm_dev.mem_base + tpm_dev.index_off + offset); } static inline unsigned char tpm_config_in(unsigned char offset) { +#ifdef CONFIG_HAS_IOPORT if (tpm_dev.iotype == TPM_INF_IO_PORT) return inb(tpm_dev.config_port + offset); - else - return readb(tpm_dev.mem_base + tpm_dev.index_off + offset); +#endif + return readb(tpm_dev.mem_base + tpm_dev.index_off + offset); } /* TPM header definitions */ diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 0c20fbc089..fdef214b9f 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -1058,11 +1058,6 @@ static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value) clkrun_val &= ~LPC_CLKRUN_EN; iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET); - /* - * Write any random value on port 0x80 which is on LPC, to make - * sure LPC clock is running before sending any TPM command. - */ - outb(0xCC, 0x80); } else { data->clkrun_enabled--; if (data->clkrun_enabled) @@ -1073,13 +1068,15 @@ static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value) /* Enable LPC CLKRUN# */ clkrun_val |= LPC_CLKRUN_EN; iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET); - - /* - * Write any random value on port 0x80 which is on LPC, to make - * sure LPC clock is running before sending any TPM command. - */ - outb(0xCC, 0x80); } + +#ifdef CONFIG_HAS_IOPORT + /* + * Write any random value on port 0x80 which is on LPC, to make + * sure LPC clock is running before sending any TPM command. + */ + outb(0xCC, 0x80); +#endif } static const struct tpm_class_ops tpm_tis = { diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h index 13e99cf65e..690ad8e9b7 100644 --- a/drivers/char/tpm/tpm_tis_core.h +++ b/drivers/char/tpm/tpm_tis_core.h @@ -210,7 +210,7 @@ static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len, static inline bool is_bsw(void) { #ifdef CONFIG_X86 - return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0); + return (boot_cpu_data.x86_vfm == INTEL_ATOM_AIRMONT) ? 1 : 0; #else return false; #endif diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c index c9eca24bba..61b42c83ce 100644 --- a/drivers/char/tpm/tpm_tis_spi_main.c +++ b/drivers/char/tpm/tpm_tis_spi_main.c @@ -318,6 +318,7 @@ static void tpm_tis_spi_remove(struct spi_device *dev) } static const struct spi_device_id tpm_tis_spi_id[] = { + { "attpm20p", (unsigned long)tpm_tis_spi_probe }, { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe }, { "slb9670", (unsigned long)tpm_tis_spi_probe }, { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe }, diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 035f89f1a2..d9ee2dbc7e 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -2173,7 +2173,6 @@ static struct virtio_driver virtio_console = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtcons_probe, .remove = virtcons_remove, @@ -2188,7 +2187,6 @@ static struct virtio_driver virtio_rproc_serial = { .feature_table = rproc_serial_features, .feature_table_size = ARRAY_SIZE(rproc_serial_features), .driver.name = "virtio_rproc_serial", - .driver.owner = THIS_MODULE, .id_table = rproc_serial_id_table, .probe = virtcons_probe, .remove = virtcons_remove, diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 50af5fc7f5..3e9099504f 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -451,8 +451,8 @@ config COMMON_CLK_FIXED_MMIO config COMMON_CLK_K210 bool "Clock driver for the Canaan Kendryte K210 SoC" - depends on OF && RISCV && SOC_CANAAN - default SOC_CANAAN + depends on OF && RISCV && SOC_CANAAN_K210 + default SOC_CANAAN_K210 help Support for the Canaan Kendryte K210 RISC-V SoC clocks. @@ -489,6 +489,7 @@ source "drivers/clk/rockchip/Kconfig" source "drivers/clk/samsung/Kconfig" source "drivers/clk/sifive/Kconfig" source "drivers/clk/socfpga/Kconfig" +source "drivers/clk/sophgo/Kconfig" source "drivers/clk/sprd/Kconfig" source "drivers/clk/starfive/Kconfig" source "drivers/clk/sunxi/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 14fa8d4ecc..4abe16c8cc 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -118,6 +118,7 @@ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/ obj-$(CONFIG_CLK_SIFIVE) += sifive/ obj-y += socfpga/ +obj-y += sophgo/ obj-$(CONFIG_PLAT_SPEAR) += spear/ obj-y += sprd/ obj-$(CONFIG_ARCH_STI) += st/ diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c index 7cde328495..bdf5cbc12e 100644 --- a/drivers/clk/clk-en7523.c +++ b/drivers/clk/clk-en7523.c @@ -3,14 +3,16 @@ #include #include #include -#include #include +#include #include #define REG_PCI_CONTROL 0x88 #define REG_PCI_CONTROL_PERSTOUT BIT(29) #define REG_PCI_CONTROL_PERSTOUT1 BIT(26) +#define REG_PCI_CONTROL_REFCLK_EN0 BIT(23) #define REG_PCI_CONTROL_REFCLK_EN1 BIT(22) +#define REG_PCI_CONTROL_PERSTOUT2 BIT(16) #define REG_GSW_CLK_DIV_SEL 0x1b4 #define REG_EMI_CLK_DIV_SEL 0x1b8 #define REG_BUS_CLK_DIV_SEL 0x1bc @@ -18,10 +20,25 @@ #define REG_SPI_CLK_FREQ_SEL 0x1c8 #define REG_NPU_CLK_DIV_SEL 0x1fc #define REG_CRYPTO_CLKSRC 0x200 -#define REG_RESET_CONTROL 0x834 +#define REG_RESET_CONTROL2 0x830 +#define REG_RESET2_CONTROL_PCIE2 BIT(27) +#define REG_RESET_CONTROL1 0x834 #define REG_RESET_CONTROL_PCIEHB BIT(29) #define REG_RESET_CONTROL_PCIE1 BIT(27) #define REG_RESET_CONTROL_PCIE2 BIT(26) +/* EN7581 */ +#define REG_PCIE0_MEM 0x00 +#define REG_PCIE0_MEM_MASK 0x04 +#define REG_PCIE1_MEM 0x08 +#define REG_PCIE1_MEM_MASK 0x0c +#define REG_PCIE2_MEM 0x10 +#define REG_PCIE2_MEM_MASK 0x14 +#define REG_PCIE_RESET_OPEN_DRAIN 0x018c +#define REG_PCIE_RESET_OPEN_DRAIN_MASK GENMASK(2, 0) +#define REG_NP_SCU_PCIC 0x88 +#define REG_NP_SCU_SSTR 0x9c +#define REG_PCIE_XSI0_SEL_MASK GENMASK(14, 13) +#define REG_PCIE_XSI1_SEL_MASK GENMASK(12, 11) struct en_clk_desc { int id; @@ -40,6 +57,7 @@ struct en_clk_desc { u8 div_shift; u16 div_val0; u8 div_step; + u8 div_offset; }; struct en_clk_gate { @@ -47,6 +65,12 @@ struct en_clk_gate { struct clk_hw hw; }; +struct en_clk_soc_data { + const struct clk_ops pcie_ops; + int (*hw_init)(struct platform_device *pdev, void __iomem *base, + void __iomem *np_base); +}; + static const u32 gsw_base[] = { 400000000, 500000000 }; static const u32 emi_base[] = { 333000000, 400000000 }; static const u32 bus_base[] = { 500000000, 540000000 }; @@ -67,6 +91,7 @@ static const struct en_clk_desc en7523_base_clks[] = { .div_bits = 3, .div_shift = 0, .div_step = 1, + .div_offset = 1, }, { .id = EN7523_CLK_EMI, .name = "emi", @@ -80,6 +105,7 @@ static const struct en_clk_desc en7523_base_clks[] = { .div_bits = 3, .div_shift = 0, .div_step = 1, + .div_offset = 1, }, { .id = EN7523_CLK_BUS, .name = "bus", @@ -93,6 +119,7 @@ static const struct en_clk_desc en7523_base_clks[] = { .div_bits = 3, .div_shift = 0, .div_step = 1, + .div_offset = 1, }, { .id = EN7523_CLK_SLIC, .name = "slic", @@ -133,23 +160,19 @@ static const struct en_clk_desc en7523_base_clks[] = { .div_bits = 3, .div_shift = 0, .div_step = 1, + .div_offset = 1, }, { .id = EN7523_CLK_CRYPTO, .name = "crypto", .base_reg = REG_CRYPTO_CLKSRC, .base_bits = 1, - .base_shift = 8, + .base_shift = 0, .base_values = emi_base, .n_base_values = ARRAY_SIZE(emi_base), } }; -static const struct of_device_id of_match_clk_en7523[] = { - { .compatible = "airoha,en7523-scu", }, - { /* sentinel */ } -}; - static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i) { const struct en_clk_desc *desc = &en7523_base_clks[i]; @@ -184,7 +207,7 @@ static u32 en7523_get_div(void __iomem *base, int i) if (!val && desc->div_val0) return desc->div_val0; - return (val + 1) * desc->div_step; + return (val + desc->div_offset) * desc->div_step; } static int en7523_pci_is_enabled(struct clk_hw *hw) @@ -212,14 +235,14 @@ static int en7523_pci_prepare(struct clk_hw *hw) usleep_range(1000, 2000); /* Reset to default */ - val = readl(np_base + REG_RESET_CONTROL); + val = readl(np_base + REG_RESET_CONTROL1); mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 | REG_RESET_CONTROL_PCIEHB; - writel(val & ~mask, np_base + REG_RESET_CONTROL); + writel(val & ~mask, np_base + REG_RESET_CONTROL1); usleep_range(1000, 2000); - writel(val | mask, np_base + REG_RESET_CONTROL); + writel(val | mask, np_base + REG_RESET_CONTROL1); msleep(100); - writel(val & ~mask, np_base + REG_RESET_CONTROL); + writel(val & ~mask, np_base + REG_RESET_CONTROL1); usleep_range(5000, 10000); /* Release device */ @@ -247,14 +270,10 @@ static void en7523_pci_unprepare(struct clk_hw *hw) static struct clk_hw *en7523_register_pcie_clk(struct device *dev, void __iomem *np_base) { - static const struct clk_ops pcie_gate_ops = { - .is_enabled = en7523_pci_is_enabled, - .prepare = en7523_pci_prepare, - .unprepare = en7523_pci_unprepare, - }; + const struct en_clk_soc_data *soc_data = device_get_match_data(dev); struct clk_init_data init = { .name = "pcie", - .ops = &pcie_gate_ops, + .ops = &soc_data->pcie_ops, }; struct en_clk_gate *cg; @@ -264,7 +283,10 @@ static struct clk_hw *en7523_register_pcie_clk(struct device *dev, cg->base = np_base; cg->hw.init = &init; - en7523_pci_unprepare(&cg->hw); + + if (init.ops->disable) + init.ops->disable(&cg->hw); + init.ops->unprepare(&cg->hw); if (clk_hw_register(dev, &cg->hw)) return NULL; @@ -272,6 +294,111 @@ static struct clk_hw *en7523_register_pcie_clk(struct device *dev, return &cg->hw; } +static int en7581_pci_is_enabled(struct clk_hw *hw) +{ + struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); + u32 val, mask; + + mask = REG_PCI_CONTROL_REFCLK_EN0 | REG_PCI_CONTROL_REFCLK_EN1; + val = readl(cg->base + REG_PCI_CONTROL); + return (val & mask) == mask; +} + +static int en7581_pci_prepare(struct clk_hw *hw) +{ + struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); + void __iomem *np_base = cg->base; + u32 val, mask; + + mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 | + REG_RESET_CONTROL_PCIEHB; + val = readl(np_base + REG_RESET_CONTROL1); + writel(val & ~mask, np_base + REG_RESET_CONTROL1); + val = readl(np_base + REG_RESET_CONTROL2); + writel(val & ~REG_RESET2_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2); + usleep_range(5000, 10000); + + return 0; +} + +static int en7581_pci_enable(struct clk_hw *hw) +{ + struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); + void __iomem *np_base = cg->base; + u32 val, mask; + + mask = REG_PCI_CONTROL_REFCLK_EN0 | REG_PCI_CONTROL_REFCLK_EN1 | + REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT2 | + REG_PCI_CONTROL_PERSTOUT; + val = readl(np_base + REG_PCI_CONTROL); + writel(val | mask, np_base + REG_PCI_CONTROL); + msleep(250); + + return 0; +} + +static void en7581_pci_unprepare(struct clk_hw *hw) +{ + struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); + void __iomem *np_base = cg->base; + u32 val, mask; + + mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 | + REG_RESET_CONTROL_PCIEHB; + val = readl(np_base + REG_RESET_CONTROL1); + writel(val | mask, np_base + REG_RESET_CONTROL1); + mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2; + writel(val | mask, np_base + REG_RESET_CONTROL1); + val = readl(np_base + REG_RESET_CONTROL2); + writel(val | REG_RESET_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2); + msleep(100); +} + +static void en7581_pci_disable(struct clk_hw *hw) +{ + struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); + void __iomem *np_base = cg->base; + u32 val, mask; + + mask = REG_PCI_CONTROL_REFCLK_EN0 | REG_PCI_CONTROL_REFCLK_EN1 | + REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT2 | + REG_PCI_CONTROL_PERSTOUT; + val = readl(np_base + REG_PCI_CONTROL); + writel(val & ~mask, np_base + REG_PCI_CONTROL); + usleep_range(1000, 2000); +} + +static int en7581_clk_hw_init(struct platform_device *pdev, + void __iomem *base, + void __iomem *np_base) +{ + void __iomem *pb_base; + u32 val; + + pb_base = devm_platform_ioremap_resource(pdev, 2); + if (IS_ERR(pb_base)) + return PTR_ERR(pb_base); + + val = readl(np_base + REG_NP_SCU_SSTR); + val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK); + writel(val, np_base + REG_NP_SCU_SSTR); + val = readl(np_base + REG_NP_SCU_PCIC); + writel(val | 3, np_base + REG_NP_SCU_PCIC); + + writel(0x20000000, pb_base + REG_PCIE0_MEM); + writel(0xfc000000, pb_base + REG_PCIE0_MEM_MASK); + writel(0x24000000, pb_base + REG_PCIE1_MEM); + writel(0xfc000000, pb_base + REG_PCIE1_MEM_MASK); + writel(0x28000000, pb_base + REG_PCIE2_MEM); + writel(0xfc000000, pb_base + REG_PCIE2_MEM_MASK); + + val = readl(base + REG_PCIE_RESET_OPEN_DRAIN); + writel(val | REG_PCIE_RESET_OPEN_DRAIN_MASK, + base + REG_PCIE_RESET_OPEN_DRAIN); + + return 0; +} + static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data, void __iomem *base, void __iomem *np_base) { @@ -304,6 +431,7 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat static int en7523_clk_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; + const struct en_clk_soc_data *soc_data; struct clk_hw_onecell_data *clk_data; void __iomem *base, *np_base; int r; @@ -316,6 +444,13 @@ static int en7523_clk_probe(struct platform_device *pdev) if (IS_ERR(np_base)) return PTR_ERR(np_base); + soc_data = device_get_match_data(&pdev->dev); + if (soc_data->hw_init) { + r = soc_data->hw_init(pdev, base, np_base); + if (r) + return r; + } + clk_data = devm_kzalloc(&pdev->dev, struct_size(clk_data, hws, EN7523_NUM_CLOCKS), GFP_KERNEL); @@ -333,6 +468,31 @@ static int en7523_clk_probe(struct platform_device *pdev) return r; } +static const struct en_clk_soc_data en7523_data = { + .pcie_ops = { + .is_enabled = en7523_pci_is_enabled, + .prepare = en7523_pci_prepare, + .unprepare = en7523_pci_unprepare, + }, +}; + +static const struct en_clk_soc_data en7581_data = { + .pcie_ops = { + .is_enabled = en7581_pci_is_enabled, + .prepare = en7581_pci_prepare, + .enable = en7581_pci_enable, + .unprepare = en7581_pci_unprepare, + .disable = en7581_pci_disable, + }, + .hw_init = en7581_clk_hw_init, +}; + +static const struct of_device_id of_match_clk_en7523[] = { + { .compatible = "airoha,en7523-scu", .data = &en7523_data }, + { .compatible = "airoha,en7581-scu", .data = &en7581_data }, + { /* sentinel */ } +}; + static struct platform_driver clk_en7523_drv = { .probe = en7523_clk_probe, .driver = { diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c index ba0ff01bf4..856b008e07 100644 --- a/drivers/clk/clk-gemini.c +++ b/drivers/clk/clk-gemini.c @@ -67,12 +67,10 @@ struct gemini_gate_data { * struct clk_gemini_pci - Gemini PCI clock * @hw: corresponding clock hardware entry * @map: regmap to access the registers - * @rate: current rate */ struct clk_gemini_pci { struct clk_hw hw; struct regmap *map; - unsigned long rate; }; /** diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c index 2a0cea2946..6e68a41a70 100644 --- a/drivers/clk/clk-highbank.c +++ b/drivers/clk/clk-highbank.c @@ -37,7 +37,6 @@ struct hb_clk { struct clk_hw hw; void __iomem *reg; - char *parent_name; }; #define to_hb_clk(p) container_of(p, struct hb_clk, hw) diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c index bacdcbb287..820bb1e9e3 100644 --- a/drivers/clk/clk-loongson2.c +++ b/drivers/clk/clk-loongson2.c @@ -13,317 +13,348 @@ #include #include -#define LOONGSON2_PLL_MULT_SHIFT 32 -#define LOONGSON2_PLL_MULT_WIDTH 10 -#define LOONGSON2_PLL_DIV_SHIFT 26 -#define LOONGSON2_PLL_DIV_WIDTH 6 -#define LOONGSON2_APB_FREQSCALE_SHIFT 20 -#define LOONGSON2_APB_FREQSCALE_WIDTH 3 -#define LOONGSON2_USB_FREQSCALE_SHIFT 16 -#define LOONGSON2_USB_FREQSCALE_WIDTH 3 -#define LOONGSON2_SATA_FREQSCALE_SHIFT 12 -#define LOONGSON2_SATA_FREQSCALE_WIDTH 3 -#define LOONGSON2_BOOT_FREQSCALE_SHIFT 8 -#define LOONGSON2_BOOT_FREQSCALE_WIDTH 3 - -static void __iomem *loongson2_pll_base; - static const struct clk_parent_data pdata[] = { - { .fw_name = "ref_100m",}, + { .fw_name = "ref_100m", }, }; -static struct clk_hw *loongson2_clk_register(struct device *dev, - const char *name, - const char *parent_name, - const struct clk_ops *ops, - unsigned long flags) -{ - int ret; - struct clk_hw *hw; - struct clk_init_data init = { }; - - hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL); - if (!hw) - return ERR_PTR(-ENOMEM); - - init.name = name; - init.ops = ops; - init.flags = flags; - init.num_parents = 1; - - if (!parent_name) - init.parent_data = pdata; - else - init.parent_names = &parent_name; - - hw->init = &init; - - ret = devm_clk_hw_register(dev, hw); - if (ret) - hw = ERR_PTR(ret); - - return hw; -} - -static unsigned long loongson2_calc_pll_rate(int offset, unsigned long rate) -{ - u64 val; - u32 mult, div; - - val = readq(loongson2_pll_base + offset); - - mult = (val >> LOONGSON2_PLL_MULT_SHIFT) & - clk_div_mask(LOONGSON2_PLL_MULT_WIDTH); - div = (val >> LOONGSON2_PLL_DIV_SHIFT) & - clk_div_mask(LOONGSON2_PLL_DIV_WIDTH); - - return div_u64((u64)rate * mult, div); -} +enum loongson2_clk_type { + CLK_TYPE_PLL, + CLK_TYPE_SCALE, + CLK_TYPE_DIVIDER, + CLK_TYPE_GATE, + CLK_TYPE_FIXED, + CLK_TYPE_NONE, +}; -static unsigned long loongson2_node_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - return loongson2_calc_pll_rate(0x0, parent_rate); -} +struct loongson2_clk_provider { + void __iomem *base; + struct device *dev; + struct clk_hw_onecell_data clk_data; + spinlock_t clk_lock; /* protect access to DIV registers */ +}; -static const struct clk_ops loongson2_node_clk_ops = { - .recalc_rate = loongson2_node_recalc_rate, +struct loongson2_clk_data { + struct clk_hw hw; + void __iomem *reg; + u8 div_shift; + u8 div_width; + u8 mult_shift; + u8 mult_width; }; -static unsigned long loongson2_ddr_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - return loongson2_calc_pll_rate(0x10, parent_rate); -} +struct loongson2_clk_board_info { + u8 id; + enum loongson2_clk_type type; + const char *name; + const char *parent_name; + unsigned long fixed_rate; + u8 reg_offset; + u8 div_shift; + u8 div_width; + u8 mult_shift; + u8 mult_width; + u8 bit_idx; +}; -static const struct clk_ops loongson2_ddr_clk_ops = { - .recalc_rate = loongson2_ddr_recalc_rate, +#define CLK_DIV(_id, _name, _pname, _offset, _dshift, _dwidth) \ + { \ + .id = _id, \ + .type = CLK_TYPE_DIVIDER, \ + .name = _name, \ + .parent_name = _pname, \ + .reg_offset = _offset, \ + .div_shift = _dshift, \ + .div_width = _dwidth, \ + } + +#define CLK_PLL(_id, _name, _offset, _mshift, _mwidth, \ + _dshift, _dwidth) \ + { \ + .id = _id, \ + .type = CLK_TYPE_PLL, \ + .name = _name, \ + .parent_name = NULL, \ + .reg_offset = _offset, \ + .mult_shift = _mshift, \ + .mult_width = _mwidth, \ + .div_shift = _dshift, \ + .div_width = _dwidth, \ + } + +#define CLK_SCALE(_id, _name, _pname, _offset, \ + _dshift, _dwidth) \ + { \ + .id = _id, \ + .type = CLK_TYPE_SCALE, \ + .name = _name, \ + .parent_name = _pname, \ + .reg_offset = _offset, \ + .div_shift = _dshift, \ + .div_width = _dwidth, \ + } + +#define CLK_GATE(_id, _name, _pname, _offset, _bidx) \ + { \ + .id = _id, \ + .type = CLK_TYPE_GATE, \ + .name = _name, \ + .parent_name = _pname, \ + .reg_offset = _offset, \ + .bit_idx = _bidx, \ + } + +#define CLK_FIXED(_id, _name, _pname, _rate) \ + { \ + .id = _id, \ + .type = CLK_TYPE_FIXED, \ + .name = _name, \ + .parent_name = _pname, \ + .fixed_rate = _rate, \ + } + +static const struct loongson2_clk_board_info ls2k0500_clks[] = { + CLK_PLL(LOONGSON2_NODE_PLL, "pll_node", 0, 16, 8, 8, 6), + CLK_PLL(LOONGSON2_DDR_PLL, "pll_ddr", 0x8, 16, 8, 8, 6), + CLK_PLL(LOONGSON2_DC_PLL, "pll_soc", 0x10, 16, 8, 8, 6), + CLK_PLL(LOONGSON2_PIX0_PLL, "pll_pix0", 0x18, 16, 8, 8, 6), + CLK_PLL(LOONGSON2_PIX1_PLL, "pll_pix1", 0x20, 16, 8, 8, 6), + CLK_DIV(LOONGSON2_NODE_CLK, "clk_node", "pll_node", 0, 24, 6), + CLK_DIV(LOONGSON2_DDR_CLK, "clk_ddr", "pll_ddr", 0x8, 24, 6), + CLK_DIV(LOONGSON2_HDA_CLK, "clk_hda", "pll_ddr", 0xc, 8, 6), + CLK_DIV(LOONGSON2_GPU_CLK, "clk_gpu", "pll_soc", 0x10, 24, 6), + CLK_DIV(LOONGSON2_DC_CLK, "clk_sb", "pll_soc", 0x14, 0, 6), + CLK_DIV(LOONGSON2_GMAC_CLK, "clk_gmac", "pll_soc", 0x14, 8, 6), + CLK_DIV(LOONGSON2_PIX0_CLK, "clk_pix0", "pll_pix0", 0x18, 24, 6), + CLK_DIV(LOONGSON2_PIX1_CLK, "clk_pix1", "pll_pix1", 0x20, 24, 6), + CLK_SCALE(LOONGSON2_BOOT_CLK, "clk_boot", "clk_sb", 0x28, 8, 3), + CLK_SCALE(LOONGSON2_SATA_CLK, "clk_sata", "clk_sb", 0x28, 12, 3), + CLK_SCALE(LOONGSON2_USB_CLK, "clk_usb", "clk_sb", 0x28, 16, 3), + CLK_SCALE(LOONGSON2_APB_CLK, "clk_apb", "clk_sb", 0x28, 20, 3), + { /* Sentinel */ }, }; -static unsigned long loongson2_dc_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - return loongson2_calc_pll_rate(0x20, parent_rate); -} +static const struct loongson2_clk_board_info ls2k1000_clks[] = { + CLK_PLL(LOONGSON2_NODE_PLL, "pll_node", 0, 32, 10, 26, 6), + CLK_PLL(LOONGSON2_DDR_PLL, "pll_ddr", 0x10, 32, 10, 26, 6), + CLK_PLL(LOONGSON2_DC_PLL, "pll_dc", 0x20, 32, 10, 26, 6), + CLK_PLL(LOONGSON2_PIX0_PLL, "pll_pix0", 0x30, 32, 10, 26, 6), + CLK_PLL(LOONGSON2_PIX1_PLL, "pll_pix1", 0x40, 32, 10, 26, 6), + CLK_DIV(LOONGSON2_NODE_CLK, "clk_node", "pll_node", 0x8, 0, 6), + CLK_DIV(LOONGSON2_DDR_CLK, "clk_ddr", "pll_ddr", 0x18, 0, 6), + CLK_DIV(LOONGSON2_GPU_CLK, "clk_gpu", "pll_ddr", 0x18, 22, 6), + /* + * The hda clk divisor in the upper 32bits and the clk-prodiver + * layer code doesn't support 64bit io operation thus a conversion + * is required that subtract shift by 32 and add 4byte to the hda + * address + */ + CLK_DIV(LOONGSON2_HDA_CLK, "clk_hda", "pll_ddr", 0x22, 12, 7), + CLK_DIV(LOONGSON2_DC_CLK, "clk_dc", "pll_dc", 0x28, 0, 6), + CLK_DIV(LOONGSON2_GMAC_CLK, "clk_gmac", "pll_dc", 0x28, 22, 6), + CLK_DIV(LOONGSON2_PIX0_CLK, "clk_pix0", "pll_pix0", 0x38, 0, 6), + CLK_DIV(LOONGSON2_PIX1_CLK, "clk_pix1", "pll_pix1", 0x38, 0, 6), + CLK_SCALE(LOONGSON2_BOOT_CLK, "clk_boot", NULL, 0x50, 8, 3), + CLK_SCALE(LOONGSON2_SATA_CLK, "clk_sata", "clk_gmac", 0x50, 12, 3), + CLK_SCALE(LOONGSON2_USB_CLK, "clk_usb", "clk_gmac", 0x50, 16, 3), + CLK_SCALE(LOONGSON2_APB_CLK, "clk_apb", "clk_gmac", 0x50, 20, 3), + { /* Sentinel */ }, +}; -static const struct clk_ops loongson2_dc_clk_ops = { - .recalc_rate = loongson2_dc_recalc_rate, +static const struct loongson2_clk_board_info ls2k2000_clks[] = { + CLK_PLL(LOONGSON2_DC_PLL, "pll_0", 0, 21, 9, 32, 6), + CLK_PLL(LOONGSON2_DDR_PLL, "pll_1", 0x10, 21, 9, 32, 6), + CLK_PLL(LOONGSON2_NODE_PLL, "pll_2", 0x20, 21, 9, 32, 6), + CLK_PLL(LOONGSON2_PIX0_PLL, "pll_pix0", 0x30, 21, 9, 32, 6), + CLK_PLL(LOONGSON2_PIX1_PLL, "pll_pix1", 0x40, 21, 9, 32, 6), + CLK_GATE(LOONGSON2_OUT0_GATE, "out0_gate", "pll_0", 0, 40), + CLK_GATE(LOONGSON2_GMAC_GATE, "gmac_gate", "pll_0", 0, 41), + CLK_GATE(LOONGSON2_RIO_GATE, "rio_gate", "pll_0", 0, 42), + CLK_GATE(LOONGSON2_DC_GATE, "dc_gate", "pll_1", 0x10, 40), + CLK_GATE(LOONGSON2_DDR_GATE, "ddr_gate", "pll_1", 0x10, 41), + CLK_GATE(LOONGSON2_GPU_GATE, "gpu_gate", "pll_1", 0x10, 42), + CLK_GATE(LOONGSON2_HDA_GATE, "hda_gate", "pll_2", 0x20, 40), + CLK_GATE(LOONGSON2_NODE_GATE, "node_gate", "pll_2", 0x20, 41), + CLK_GATE(LOONGSON2_EMMC_GATE, "emmc_gate", "pll_2", 0x20, 42), + CLK_GATE(LOONGSON2_PIX0_GATE, "pix0_gate", "pll_pix0", 0x30, 40), + CLK_GATE(LOONGSON2_PIX1_GATE, "pix1_gate", "pll_pix1", 0x40, 40), + CLK_DIV(LOONGSON2_OUT0_CLK, "clk_out0", "out0_gate", 0, 0, 6), + CLK_DIV(LOONGSON2_GMAC_CLK, "clk_gmac", "gmac_gate", 0, 7, 6), + CLK_DIV(LOONGSON2_RIO_CLK, "clk_rio", "rio_gate", 0, 14, 6), + CLK_DIV(LOONGSON2_DC_CLK, "clk_dc", "dc_gate", 0x10, 0, 6), + CLK_DIV(LOONGSON2_GPU_CLK, "clk_gpu", "gpu_gate", 0x10, 7, 6), + CLK_DIV(LOONGSON2_DDR_CLK, "clk_ddr", "ddr_gate", 0x10, 14, 6), + CLK_DIV(LOONGSON2_HDA_CLK, "clk_hda", "hda_gate", 0x20, 0, 6), + CLK_DIV(LOONGSON2_NODE_CLK, "clk_node", "node_gate", 0x20, 7, 6), + CLK_DIV(LOONGSON2_EMMC_CLK, "clk_emmc", "emmc_gate", 0x20, 14, 6), + CLK_DIV(LOONGSON2_PIX0_CLK, "clk_pix0", "pll_pix0", 0x30, 0, 6), + CLK_DIV(LOONGSON2_PIX1_CLK, "clk_pix1", "pll_pix1", 0x40, 0, 6), + CLK_SCALE(LOONGSON2_SATA_CLK, "clk_sata", "clk_out0", 0x50, 12, 3), + CLK_SCALE(LOONGSON2_USB_CLK, "clk_usb", "clk_out0", 0x50, 16, 3), + CLK_SCALE(LOONGSON2_APB_CLK, "clk_apb", "clk_node", 0x50, 20, 3), + CLK_SCALE(LOONGSON2_BOOT_CLK, "clk_boot", NULL, 0x50, 23, 3), + CLK_SCALE(LOONGSON2_DES_CLK, "clk_des", "clk_node", 0x50, 40, 3), + CLK_SCALE(LOONGSON2_I2S_CLK, "clk_i2s", "clk_node", 0x50, 44, 3), + CLK_FIXED(LOONGSON2_MISC_CLK, "clk_misc", NULL, 50000000), + { /* Sentinel */ }, }; -static unsigned long loongson2_pix0_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) +static inline struct loongson2_clk_data *to_loongson2_clk(struct clk_hw *hw) { - return loongson2_calc_pll_rate(0x30, parent_rate); + return container_of(hw, struct loongson2_clk_data, hw); } -static const struct clk_ops loongson2_pix0_clk_ops = { - .recalc_rate = loongson2_pix0_recalc_rate, -}; - -static unsigned long loongson2_pix1_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) +static inline unsigned long loongson2_rate_part(u64 val, u8 shift, u8 width) { - return loongson2_calc_pll_rate(0x40, parent_rate); + return (val & GENMASK(shift + width - 1, shift)) >> shift; } -static const struct clk_ops loongson2_pix1_clk_ops = { - .recalc_rate = loongson2_pix1_recalc_rate, -}; - -static unsigned long loongson2_calc_rate(unsigned long rate, - int shift, int width) +static unsigned long loongson2_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) { - u64 val; - u32 mult; - - val = readq(loongson2_pll_base + 0x50); + u64 val, mult, div; + struct loongson2_clk_data *clk = to_loongson2_clk(hw); - mult = (val >> shift) & clk_div_mask(width); + val = readq(clk->reg); + mult = loongson2_rate_part(val, clk->mult_shift, clk->mult_width); + div = loongson2_rate_part(val, clk->div_shift, clk->div_width); - return div_u64((u64)rate * (mult + 1), 8); -} - -static unsigned long loongson2_boot_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - return loongson2_calc_rate(parent_rate, - LOONGSON2_BOOT_FREQSCALE_SHIFT, - LOONGSON2_BOOT_FREQSCALE_WIDTH); + return div_u64((u64)parent_rate * mult, div); } -static const struct clk_ops loongson2_boot_clk_ops = { - .recalc_rate = loongson2_boot_recalc_rate, +static const struct clk_ops loongson2_pll_recalc_ops = { + .recalc_rate = loongson2_pll_recalc_rate, }; -static unsigned long loongson2_apb_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) +static unsigned long loongson2_freqscale_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) { - return loongson2_calc_rate(parent_rate, - LOONGSON2_APB_FREQSCALE_SHIFT, - LOONGSON2_APB_FREQSCALE_WIDTH); -} + u64 val, mult; + struct loongson2_clk_data *clk = to_loongson2_clk(hw); -static const struct clk_ops loongson2_apb_clk_ops = { - .recalc_rate = loongson2_apb_recalc_rate, -}; + val = readq(clk->reg); + mult = loongson2_rate_part(val, clk->div_shift, clk->div_width) + 1; -static unsigned long loongson2_usb_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - return loongson2_calc_rate(parent_rate, - LOONGSON2_USB_FREQSCALE_SHIFT, - LOONGSON2_USB_FREQSCALE_WIDTH); + return div_u64((u64)parent_rate * mult, 8); } -static const struct clk_ops loongson2_usb_clk_ops = { - .recalc_rate = loongson2_usb_recalc_rate, +static const struct clk_ops loongson2_freqscale_recalc_ops = { + .recalc_rate = loongson2_freqscale_recalc_rate, }; -static unsigned long loongson2_sata_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) +static struct clk_hw *loongson2_clk_register(struct loongson2_clk_provider *clp, + const struct loongson2_clk_board_info *cld, + const struct clk_ops *ops) { - return loongson2_calc_rate(parent_rate, - LOONGSON2_SATA_FREQSCALE_SHIFT, - LOONGSON2_SATA_FREQSCALE_WIDTH); -} + int ret; + struct clk_hw *hw; + struct loongson2_clk_data *clk; + struct clk_init_data init = { }; -static const struct clk_ops loongson2_sata_clk_ops = { - .recalc_rate = loongson2_sata_recalc_rate, -}; + clk = devm_kzalloc(clp->dev, sizeof(*clk), GFP_KERNEL); + if (!clk) + return ERR_PTR(-ENOMEM); -static inline int loongson2_check_clk_hws(struct clk_hw *clks[], unsigned int count) -{ - unsigned int i; + init.name = cld->name; + init.ops = ops; + init.flags = 0; + init.num_parents = 1; - for (i = 0; i < count; i++) - if (IS_ERR(clks[i])) { - pr_err("Loongson2 clk %u: register failed with %ld\n", - i, PTR_ERR(clks[i])); - return PTR_ERR(clks[i]); - } + if (!cld->parent_name) + init.parent_data = pdata; + else + init.parent_names = &cld->parent_name; + + clk->reg = clp->base + cld->reg_offset; + clk->div_shift = cld->div_shift; + clk->div_width = cld->div_width; + clk->mult_shift = cld->mult_shift; + clk->mult_width = cld->mult_width; + clk->hw.init = &init; - return 0; + hw = &clk->hw; + ret = devm_clk_hw_register(clp->dev, hw); + if (ret) + clk = ERR_PTR(ret); + + return hw; } static int loongson2_clk_probe(struct platform_device *pdev) { - int ret; - struct clk_hw **hws; - struct clk_hw_onecell_data *clk_hw_data; - spinlock_t loongson2_clk_lock; + int i, clks_num = 0; + struct clk_hw *hw; struct device *dev = &pdev->dev; + struct loongson2_clk_provider *clp; + const struct loongson2_clk_board_info *p, *data; - loongson2_pll_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(loongson2_pll_base)) - return PTR_ERR(loongson2_pll_base); - - clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, LOONGSON2_CLK_END), - GFP_KERNEL); - if (WARN_ON(!clk_hw_data)) - return -ENOMEM; - - clk_hw_data->num = LOONGSON2_CLK_END; - hws = clk_hw_data->hws; - - hws[LOONGSON2_NODE_PLL] = loongson2_clk_register(dev, "node_pll", - NULL, - &loongson2_node_clk_ops, 0); - - hws[LOONGSON2_DDR_PLL] = loongson2_clk_register(dev, "ddr_pll", - NULL, - &loongson2_ddr_clk_ops, 0); + data = device_get_match_data(dev); + if (!data) + return -EINVAL; - hws[LOONGSON2_DC_PLL] = loongson2_clk_register(dev, "dc_pll", - NULL, - &loongson2_dc_clk_ops, 0); + for (p = data; p->name; p++) + clks_num++; - hws[LOONGSON2_PIX0_PLL] = loongson2_clk_register(dev, "pix0_pll", - NULL, - &loongson2_pix0_clk_ops, 0); - - hws[LOONGSON2_PIX1_PLL] = loongson2_clk_register(dev, "pix1_pll", - NULL, - &loongson2_pix1_clk_ops, 0); + clp = devm_kzalloc(dev, struct_size(clp, clk_data.hws, clks_num), + GFP_KERNEL); + if (!clp) + return -ENOMEM; - hws[LOONGSON2_BOOT_CLK] = loongson2_clk_register(dev, "boot", - NULL, - &loongson2_boot_clk_ops, 0); + clp->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(clp->base)) + return PTR_ERR(clp->base); + + spin_lock_init(&clp->clk_lock); + clp->clk_data.num = clks_num + 1; + clp->dev = dev; + + for (i = 0; i < clks_num; i++) { + p = &data[i]; + switch (p->type) { + case CLK_TYPE_PLL: + hw = loongson2_clk_register(clp, p, + &loongson2_pll_recalc_ops); + break; + case CLK_TYPE_SCALE: + hw = loongson2_clk_register(clp, p, + &loongson2_freqscale_recalc_ops); + break; + case CLK_TYPE_DIVIDER: + hw = devm_clk_hw_register_divider(dev, p->name, + p->parent_name, 0, + clp->base + p->reg_offset, + p->div_shift, p->div_width, + CLK_DIVIDER_ONE_BASED, + &clp->clk_lock); + break; + case CLK_TYPE_GATE: + hw = devm_clk_hw_register_gate(dev, p->name, p->parent_name, 0, + clp->base + p->reg_offset, + p->bit_idx, 0, + &clp->clk_lock); + break; + case CLK_TYPE_FIXED: + hw = clk_hw_register_fixed_rate_parent_data(dev, p->name, pdata, + 0, p->fixed_rate); + break; + default: + return dev_err_probe(dev, -EINVAL, "Invalid clk type\n"); + } - hws[LOONGSON2_NODE_CLK] = devm_clk_hw_register_divider(dev, "node", - "node_pll", 0, - loongson2_pll_base + 0x8, 0, - 6, CLK_DIVIDER_ONE_BASED, - &loongson2_clk_lock); + if (IS_ERR(hw)) + return dev_err_probe(dev, PTR_ERR(hw), + "Register clk: %s, type: %u failed!\n", + p->name, p->type); - /* - * The hda clk divisor in the upper 32bits and the clk-prodiver - * layer code doesn't support 64bit io operation thus a conversion - * is required that subtract shift by 32 and add 4byte to the hda - * address - */ - hws[LOONGSON2_HDA_CLK] = devm_clk_hw_register_divider(dev, "hda", - "ddr_pll", 0, - loongson2_pll_base + 0x22, 12, - 7, CLK_DIVIDER_ONE_BASED, - &loongson2_clk_lock); - - hws[LOONGSON2_GPU_CLK] = devm_clk_hw_register_divider(dev, "gpu", - "ddr_pll", 0, - loongson2_pll_base + 0x18, 22, - 6, CLK_DIVIDER_ONE_BASED, - &loongson2_clk_lock); - - hws[LOONGSON2_DDR_CLK] = devm_clk_hw_register_divider(dev, "ddr", - "ddr_pll", 0, - loongson2_pll_base + 0x18, 0, - 6, CLK_DIVIDER_ONE_BASED, - &loongson2_clk_lock); - - hws[LOONGSON2_GMAC_CLK] = devm_clk_hw_register_divider(dev, "gmac", - "dc_pll", 0, - loongson2_pll_base + 0x28, 22, - 6, CLK_DIVIDER_ONE_BASED, - &loongson2_clk_lock); - - hws[LOONGSON2_DC_CLK] = devm_clk_hw_register_divider(dev, "dc", - "dc_pll", 0, - loongson2_pll_base + 0x28, 0, - 6, CLK_DIVIDER_ONE_BASED, - &loongson2_clk_lock); - - hws[LOONGSON2_APB_CLK] = loongson2_clk_register(dev, "apb", - "gmac", - &loongson2_apb_clk_ops, 0); - - hws[LOONGSON2_USB_CLK] = loongson2_clk_register(dev, "usb", - "gmac", - &loongson2_usb_clk_ops, 0); - - hws[LOONGSON2_SATA_CLK] = loongson2_clk_register(dev, "sata", - "gmac", - &loongson2_sata_clk_ops, 0); - - hws[LOONGSON2_PIX0_CLK] = clk_hw_register_divider(NULL, "pix0", - "pix0_pll", 0, - loongson2_pll_base + 0x38, 0, 6, - CLK_DIVIDER_ONE_BASED, - &loongson2_clk_lock); - - hws[LOONGSON2_PIX1_CLK] = clk_hw_register_divider(NULL, "pix1", - "pix1_pll", 0, - loongson2_pll_base + 0x48, 0, 6, - CLK_DIVIDER_ONE_BASED, - &loongson2_clk_lock); - - ret = loongson2_check_clk_hws(hws, LOONGSON2_CLK_END); - if (ret) - return ret; + clp->clk_data.hws[p->id] = hw; + } - return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_hw_data); + return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, &clp->clk_data); } static const struct of_device_id loongson2_clk_match_table[] = { - { .compatible = "loongson,ls2k-clk" }, + { .compatible = "loongson,ls2k0500-clk", .data = &ls2k0500_clks }, + { .compatible = "loongson,ls2k-clk", .data = &ls2k1000_clks }, + { .compatible = "loongson,ls2k2000-clk", .data = &ls2k2000_clks }, { } }; MODULE_DEVICE_TABLE(of, loongson2_clk_match_table); @@ -338,4 +369,5 @@ static struct platform_driver loongson2_clk_driver = { module_platform_driver(loongson2_clk_driver); MODULE_DESCRIPTION("Loongson2 clock driver"); +MODULE_AUTHOR("Loongson Technology Corporation Limited"); MODULE_LICENSE("GPL"); diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index 8cbe24789c..d86a02563f 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -2,9 +2,10 @@ /* * System Control and Power Interface (SCMI) Protocol based clock driver * - * Copyright (C) 2018-2022 ARM Ltd. + * Copyright (C) 2018-2024 ARM Ltd. */ +#include #include #include #include @@ -16,6 +17,17 @@ #define NOT_ATOMIC false #define ATOMIC true +enum scmi_clk_feats { + SCMI_CLK_ATOMIC_SUPPORTED, + SCMI_CLK_STATE_CTRL_SUPPORTED, + SCMI_CLK_RATE_CTRL_SUPPORTED, + SCMI_CLK_PARENT_CTRL_SUPPORTED, + SCMI_CLK_DUTY_CYCLE_SUPPORTED, + SCMI_CLK_FEATS_COUNT +}; + +#define SCMI_MAX_CLK_OPS BIT(SCMI_CLK_FEATS_COUNT) + static const struct scmi_clk_proto_ops *scmi_proto_clk_ops; struct scmi_clk { @@ -158,41 +170,44 @@ static int scmi_clk_atomic_is_enabled(struct clk_hw *hw) return !!enabled; } -/* - * We can provide enable/disable/is_enabled atomic callbacks only if the - * underlying SCMI transport for an SCMI instance is configured to handle - * SCMI commands in an atomic manner. - * - * When no SCMI atomic transport support is available we instead provide only - * the prepare/unprepare API, as allowed by the clock framework when atomic - * calls are not available. - * - * Two distinct sets of clk_ops are provided since we could have multiple SCMI - * instances with different underlying transport quality, so they cannot be - * shared. - */ -static const struct clk_ops scmi_clk_ops = { - .recalc_rate = scmi_clk_recalc_rate, - .round_rate = scmi_clk_round_rate, - .set_rate = scmi_clk_set_rate, - .prepare = scmi_clk_enable, - .unprepare = scmi_clk_disable, - .set_parent = scmi_clk_set_parent, - .get_parent = scmi_clk_get_parent, - .determine_rate = scmi_clk_determine_rate, -}; +static int scmi_clk_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) +{ + int ret; + u32 val; + struct scmi_clk *clk = to_scmi_clk(hw); -static const struct clk_ops scmi_atomic_clk_ops = { - .recalc_rate = scmi_clk_recalc_rate, - .round_rate = scmi_clk_round_rate, - .set_rate = scmi_clk_set_rate, - .enable = scmi_clk_atomic_enable, - .disable = scmi_clk_atomic_disable, - .is_enabled = scmi_clk_atomic_is_enabled, - .set_parent = scmi_clk_set_parent, - .get_parent = scmi_clk_get_parent, - .determine_rate = scmi_clk_determine_rate, -}; + ret = scmi_proto_clk_ops->config_oem_get(clk->ph, clk->id, + SCMI_CLOCK_CFG_DUTY_CYCLE, + &val, NULL, false); + if (!ret) { + duty->num = val; + duty->den = 100; + } else { + dev_warn(clk->dev, + "Failed to get duty cycle for clock ID %d\n", clk->id); + } + + return ret; +} + +static int scmi_clk_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) +{ + int ret; + u32 val; + struct scmi_clk *clk = to_scmi_clk(hw); + + /* SCMI OEM Duty Cycle is expressed as a percentage */ + val = (duty->num * 100) / duty->den; + ret = scmi_proto_clk_ops->config_oem_set(clk->ph, clk->id, + SCMI_CLOCK_CFG_DUTY_CYCLE, + val, false); + if (ret) + dev_warn(clk->dev, + "Failed to set duty cycle(%u/%u) for clock ID %d\n", + duty->num, duty->den, clk->id); + + return ret; +} static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk, const struct clk_ops *scmi_ops) @@ -230,17 +245,153 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk, return ret; } +/** + * scmi_clk_ops_alloc() - Alloc and configure clock operations + * @dev: A device reference for devres + * @feats_key: A bitmap representing the desired clk_ops capabilities + * + * Allocate and configure a proper set of clock operations depending on the + * specifically required SCMI clock features. + * + * Return: A pointer to the allocated and configured clk_ops on success, + * or NULL on allocation failure. + */ +static const struct clk_ops * +scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key) +{ + struct clk_ops *ops; + + ops = devm_kzalloc(dev, sizeof(*ops), GFP_KERNEL); + if (!ops) + return NULL; + /* + * We can provide enable/disable/is_enabled atomic callbacks only if the + * underlying SCMI transport for an SCMI instance is configured to + * handle SCMI commands in an atomic manner. + * + * When no SCMI atomic transport support is available we instead provide + * only the prepare/unprepare API, as allowed by the clock framework + * when atomic calls are not available. + */ + if (feats_key & BIT(SCMI_CLK_STATE_CTRL_SUPPORTED)) { + if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED)) { + ops->enable = scmi_clk_atomic_enable; + ops->disable = scmi_clk_atomic_disable; + } else { + ops->prepare = scmi_clk_enable; + ops->unprepare = scmi_clk_disable; + } + } + + if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED)) + ops->is_enabled = scmi_clk_atomic_is_enabled; + + /* Rate ops */ + ops->recalc_rate = scmi_clk_recalc_rate; + ops->round_rate = scmi_clk_round_rate; + ops->determine_rate = scmi_clk_determine_rate; + if (feats_key & BIT(SCMI_CLK_RATE_CTRL_SUPPORTED)) + ops->set_rate = scmi_clk_set_rate; + + /* Parent ops */ + ops->get_parent = scmi_clk_get_parent; + if (feats_key & BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED)) + ops->set_parent = scmi_clk_set_parent; + + /* Duty cycle */ + if (feats_key & BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED)) { + ops->get_duty_cycle = scmi_clk_get_duty_cycle; + ops->set_duty_cycle = scmi_clk_set_duty_cycle; + } + + return ops; +} + +/** + * scmi_clk_ops_select() - Select a proper set of clock operations + * @sclk: A reference to an SCMI clock descriptor + * @atomic_capable: A flag to indicate if atomic mode is supported by the + * transport + * @atomic_threshold_us: Platform atomic threshold value in microseconds: + * clk_ops are atomic when clock enable latency is less + * than this threshold + * @clk_ops_db: A reference to the array used as a database to store all the + * created clock operations combinations. + * @db_size: Maximum number of entries held by @clk_ops_db + * + * After having built a bitmap descriptor to represent the set of features + * needed by this SCMI clock, at first use it to lookup into the set of + * previously allocated clk_ops to check if a suitable combination of clock + * operations was already created; when no match is found allocate a brand new + * set of clk_ops satisfying the required combination of features and save it + * for future references. + * + * In this way only one set of clk_ops is ever created for each different + * combination that is effectively needed by a driver instance. + * + * Return: A pointer to the allocated and configured clk_ops on success, or + * NULL otherwise. + */ +static const struct clk_ops * +scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable, + unsigned int atomic_threshold_us, + const struct clk_ops **clk_ops_db, size_t db_size) +{ + const struct scmi_clock_info *ci = sclk->info; + unsigned int feats_key = 0; + const struct clk_ops *ops; + + /* + * Note that when transport is atomic but SCMI protocol did not + * specify (or support) an enable_latency associated with a + * clock, we default to use atomic operations mode. + */ + if (atomic_capable && ci->enable_latency <= atomic_threshold_us) + feats_key |= BIT(SCMI_CLK_ATOMIC_SUPPORTED); + + if (!ci->state_ctrl_forbidden) + feats_key |= BIT(SCMI_CLK_STATE_CTRL_SUPPORTED); + + if (!ci->rate_ctrl_forbidden) + feats_key |= BIT(SCMI_CLK_RATE_CTRL_SUPPORTED); + + if (!ci->parent_ctrl_forbidden) + feats_key |= BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED); + + if (ci->extended_config) + feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED); + + if (WARN_ON(feats_key >= db_size)) + return NULL; + + /* Lookup previously allocated ops */ + ops = clk_ops_db[feats_key]; + if (ops) + return ops; + + /* Did not find a pre-allocated clock_ops */ + ops = scmi_clk_ops_alloc(sclk->dev, feats_key); + if (!ops) + return NULL; + + /* Store new ops combinations */ + clk_ops_db[feats_key] = ops; + + return ops; +} + static int scmi_clocks_probe(struct scmi_device *sdev) { int idx, count, err; - unsigned int atomic_threshold; - bool is_atomic; + unsigned int atomic_threshold_us; + bool transport_is_atomic; struct clk_hw **hws; struct clk_hw_onecell_data *clk_data; struct device *dev = &sdev->dev; struct device_node *np = dev->of_node; const struct scmi_handle *handle = sdev->handle; struct scmi_protocol_handle *ph; + const struct clk_ops *scmi_clk_ops_db[SCMI_MAX_CLK_OPS] = {}; if (!handle) return -ENODEV; @@ -264,7 +415,8 @@ static int scmi_clocks_probe(struct scmi_device *sdev) clk_data->num = count; hws = clk_data->hws; - is_atomic = handle->is_transport_atomic(handle, &atomic_threshold); + transport_is_atomic = handle->is_transport_atomic(handle, + &atomic_threshold_us); for (idx = 0; idx < count; idx++) { struct scmi_clk *sclk; @@ -286,15 +438,17 @@ static int scmi_clocks_probe(struct scmi_device *sdev) sclk->dev = dev; /* - * Note that when transport is atomic but SCMI protocol did not - * specify (or support) an enable_latency associated with a - * clock, we default to use atomic operations mode. + * Note that the scmi_clk_ops_db is on the stack, not global, + * because it cannot be shared between mulitple probe-sequences + * to avoid sharing the devm_ allocated clk_ops between multiple + * SCMI clk driver instances. */ - if (is_atomic && - sclk->info->enable_latency <= atomic_threshold) - scmi_ops = &scmi_atomic_clk_ops; - else - scmi_ops = &scmi_clk_ops; + scmi_ops = scmi_clk_ops_select(sclk, transport_is_atomic, + atomic_threshold_us, + scmi_clk_ops_db, + ARRAY_SIZE(scmi_clk_ops_db)); + if (!scmi_ops) + return -ENOMEM; /* Initialize clock parent data. */ if (sclk->info->num_parents > 0) { @@ -318,8 +472,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev) } else { dev_dbg(dev, "Registered clock:%s%s\n", sclk->info->name, - scmi_ops == &scmi_atomic_clk_ops ? - " (atomic ops)" : ""); + scmi_ops->enable ? " (atomic ops)" : ""); hws[idx] = &sclk->hw; } } diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index 9cd80522ca..2f83fb97c6 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -158,22 +158,60 @@ vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt, va_list ap) { struct clk_lookup_alloc *cla; + struct va_format vaf; + const char *failure; + va_list ap_copy; + size_t max_size; + ssize_t res; cla = kzalloc(sizeof(*cla), GFP_KERNEL); if (!cla) return NULL; + va_copy(ap_copy, ap); + cla->cl.clk_hw = hw; if (con_id) { - strscpy(cla->con_id, con_id, sizeof(cla->con_id)); + res = strscpy(cla->con_id, con_id, sizeof(cla->con_id)); + if (res < 0) { + max_size = sizeof(cla->con_id); + failure = "connection"; + goto fail; + } cla->cl.con_id = cla->con_id; } if (dev_fmt) { - vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap); + res = vsnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap); + if (res >= sizeof(cla->dev_id)) { + max_size = sizeof(cla->dev_id); + failure = "device"; + goto fail; + } cla->cl.dev_id = cla->dev_id; } + va_end(ap_copy); + + return &cla->cl; + +fail: + if (dev_fmt) + vaf.fmt = dev_fmt; + else + vaf.fmt = "null-device"; + vaf.va = &ap_copy; + pr_err("%pV:%s: %s ID is greater than %zu\n", + &vaf, con_id, failure, max_size); + va_end(ap_copy); + + /* + * Don't fail in this case, but as the entry won't ever match just + * fill it with something that also won't match. + */ + strscpy(cla->con_id, "bad", sizeof(cla->con_id)); + strscpy(cla->dev_id, "bad", sizeof(cla->dev_id)); + return &cla->cl; } diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c index ad2d0df43d..ec60ecb517 100644 --- a/drivers/clk/davinci/da8xx-cfgchip.c +++ b/drivers/clk/davinci/da8xx-cfgchip.c @@ -508,7 +508,7 @@ da8xx_cfgchip_register_usb0_clk48(struct device *dev, const char * const parent_names[] = { "usb_refclkin", "pll0_auxclk" }; struct clk *fck_clk; struct da8xx_usb0_clk48 *usb0; - struct clk_init_data init; + struct clk_init_data init = {}; int ret; fck_clk = devm_clk_get(dev, "fck"); @@ -583,7 +583,7 @@ da8xx_cfgchip_register_usb1_clk48(struct device *dev, { const char * const parent_names[] = { "usb0_clk48", "usb_refclkin" }; struct da8xx_usb1_clk48 *usb1; - struct clk_init_data init; + struct clk_init_data init = {}; int ret; usb1 = devm_kzalloc(dev, sizeof(*usb1), GFP_KERNEL); diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig index db3bca5f4e..6da0fba682 100644 --- a/drivers/clk/imx/Kconfig +++ b/drivers/clk/imx/Kconfig @@ -114,6 +114,13 @@ config CLK_IMX93 help Build the driver for i.MX93 CCM Clock Driver +config CLK_IMX95_BLK_CTL + tristate "IMX95 Clock Driver for BLK CTL" + depends on ARCH_MXC || COMPILE_TEST + select MXC_CLK + help + Build the clock driver for i.MX95 BLK CTL + config CLK_IMXRT1050 tristate "IMXRT1050 CCM Clock Driver" depends on SOC_IMXRT || COMPILE_TEST diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile index d4b8e10b19..03f2b2a1ab 100644 --- a/drivers/clk/imx/Makefile +++ b/drivers/clk/imx/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_CLK_IMX8MP) += clk-imx8mp.o clk-imx8mp-audiomix.o obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o obj-$(CONFIG_CLK_IMX93) += clk-imx93.o +obj-$(CONFIG_CLK_IMX95_BLK_CTL) += clk-imx95-blk-ctl.o obj-$(CONFIG_MXC_CLK_SCU) += clk-imx-scu.o clk-imx-lpcg-scu.o clk-imx-acm.o clk-imx-scu-$(CONFIG_CLK_IMX8QXP) += clk-scu.o clk-imx8qxp.o \ diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c index 55ed211a5e..b381d6f784 100644 --- a/drivers/clk/imx/clk-imx8mp-audiomix.c +++ b/drivers/clk/imx/clk-imx8mp-audiomix.c @@ -7,10 +7,12 @@ #include #include +#include #include #include #include #include +#include #include @@ -18,6 +20,7 @@ #define CLKEN0 0x000 #define CLKEN1 0x004 +#define EARC 0x200 #define SAI1_MCLK_SEL 0x300 #define SAI2_MCLK_SEL 0x304 #define SAI3_MCLK_SEL 0x308 @@ -26,6 +29,11 @@ #define SAI7_MCLK_SEL 0x314 #define PDM_SEL 0x318 #define SAI_PLL_GNRL_CTL 0x400 +#define SAI_PLL_FDIVL_CTL0 0x404 +#define SAI_PLL_FDIVL_CTL1 0x408 +#define SAI_PLL_SSCG_CTL 0x40C +#define SAI_PLL_MNIT_CTL 0x410 +#define IPG_LP_CTRL 0x504 #define SAIn_MCLK1_PARENT(n) \ static const struct clk_parent_data \ @@ -182,26 +190,82 @@ static struct clk_imx8mp_audiomix_sel sels[] = { CLK_SAIn(7) }; +static const u16 audiomix_regs[] = { + CLKEN0, + CLKEN1, + EARC, + SAI1_MCLK_SEL, + SAI2_MCLK_SEL, + SAI3_MCLK_SEL, + SAI5_MCLK_SEL, + SAI6_MCLK_SEL, + SAI7_MCLK_SEL, + PDM_SEL, + SAI_PLL_GNRL_CTL, + SAI_PLL_FDIVL_CTL0, + SAI_PLL_FDIVL_CTL1, + SAI_PLL_SSCG_CTL, + SAI_PLL_MNIT_CTL, + IPG_LP_CTRL, +}; + +struct clk_imx8mp_audiomix_priv { + void __iomem *base; + u32 regs_save[ARRAY_SIZE(audiomix_regs)]; + + /* Must be last */ + struct clk_hw_onecell_data clk_data; +}; + +static void clk_imx8mp_audiomix_save_restore(struct device *dev, bool save) +{ + struct clk_imx8mp_audiomix_priv *priv = dev_get_drvdata(dev); + void __iomem *base = priv->base; + int i; + + if (save) { + for (i = 0; i < ARRAY_SIZE(audiomix_regs); i++) + priv->regs_save[i] = readl(base + audiomix_regs[i]); + } else { + for (i = 0; i < ARRAY_SIZE(audiomix_regs); i++) + writel(priv->regs_save[i], base + audiomix_regs[i]); + } +} + static int clk_imx8mp_audiomix_probe(struct platform_device *pdev) { - struct clk_hw_onecell_data *priv; + struct clk_imx8mp_audiomix_priv *priv; + struct clk_hw_onecell_data *clk_hw_data; struct device *dev = &pdev->dev; void __iomem *base; struct clk_hw *hw; - int i; + int i, ret; priv = devm_kzalloc(dev, - struct_size(priv, hws, IMX8MP_CLK_AUDIOMIX_END), + struct_size(priv, clk_data.hws, IMX8MP_CLK_AUDIOMIX_END), GFP_KERNEL); if (!priv) return -ENOMEM; - priv->num = IMX8MP_CLK_AUDIOMIX_END; + clk_hw_data = &priv->clk_data; + clk_hw_data->num = IMX8MP_CLK_AUDIOMIX_END; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); + priv->base = base; + dev_set_drvdata(dev, priv); + + /* + * pm_runtime_enable needs to be called before clk register. + * That is to make core->rpm_enabled to be true for clock + * usage. + */ + pm_runtime_get_noresume(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + for (i = 0; i < ARRAY_SIZE(sels); i++) { if (sels[i].num_parents == 1) { hw = devm_clk_hw_register_gate_parent_data(dev, @@ -216,10 +280,12 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev) 0, NULL, NULL); } - if (IS_ERR(hw)) - return PTR_ERR(hw); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_clk_register; + } - priv->hws[sels[i].clkid] = hw; + clk_hw_data->hws[sels[i].clkid] = hw; } /* SAI PLL */ @@ -228,39 +294,84 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev) ARRAY_SIZE(clk_imx8mp_audiomix_pll_parents), CLK_SET_RATE_NO_REPARENT, base + SAI_PLL_GNRL_CTL, 0, 2, 0, NULL, NULL); - priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_REF_SEL] = hw; + clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_REF_SEL] = hw; hw = imx_dev_clk_hw_pll14xx(dev, "sai_pll", "sai_pll_ref_sel", base + 0x400, &imx_1443x_pll); - if (IS_ERR(hw)) - return PTR_ERR(hw); - priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL] = hw; + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_clk_register; + } + clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL] = hw; hw = devm_clk_hw_register_mux_parent_data_table(dev, "sai_pll_bypass", clk_imx8mp_audiomix_pll_bypass_sels, ARRAY_SIZE(clk_imx8mp_audiomix_pll_bypass_sels), CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, base + SAI_PLL_GNRL_CTL, 16, 1, 0, NULL, NULL); - if (IS_ERR(hw)) - return PTR_ERR(hw); - priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS] = hw; + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_clk_register; + } + + clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS] = hw; hw = devm_clk_hw_register_gate(dev, "sai_pll_out", "sai_pll_bypass", 0, base + SAI_PLL_GNRL_CTL, 13, 0, NULL); - if (IS_ERR(hw)) - return PTR_ERR(hw); - priv->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT] = hw; + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_clk_register; + } + clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT] = hw; hw = devm_clk_hw_register_fixed_factor(dev, "sai_pll_out_div2", "sai_pll_out", 0, 1, 2); - if (IS_ERR(hw)) - return PTR_ERR(hw); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_clk_register; + } + + ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get, + clk_hw_data); + if (ret) + goto err_clk_register; + + pm_runtime_put_sync(dev); + return 0; + +err_clk_register: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + return ret; +} + +static void clk_imx8mp_audiomix_remove(struct platform_device *pdev) +{ + pm_runtime_disable(&pdev->dev); +} + +static int clk_imx8mp_audiomix_runtime_suspend(struct device *dev) +{ + clk_imx8mp_audiomix_save_restore(dev, true); - return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get, - priv); + return 0; } +static int clk_imx8mp_audiomix_runtime_resume(struct device *dev) +{ + clk_imx8mp_audiomix_save_restore(dev, false); + + return 0; +} + +static const struct dev_pm_ops clk_imx8mp_audiomix_pm_ops = { + RUNTIME_PM_OPS(clk_imx8mp_audiomix_runtime_suspend, + clk_imx8mp_audiomix_runtime_resume, NULL) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + static const struct of_device_id clk_imx8mp_audiomix_of_match[] = { { .compatible = "fsl,imx8mp-audio-blk-ctrl" }, { /* sentinel */ } @@ -269,9 +380,11 @@ MODULE_DEVICE_TABLE(of, clk_imx8mp_audiomix_of_match); static struct platform_driver clk_imx8mp_audiomix_driver = { .probe = clk_imx8mp_audiomix_probe, + .remove_new = clk_imx8mp_audiomix_remove, .driver = { .name = "imx8mp-audio-blk-ctrl", .of_match_table = clk_imx8mp_audiomix_of_match, + .pm = pm_ptr(&clk_imx8mp_audiomix_pm_ops), }, }; diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c new file mode 100644 index 0000000000..74f595f9e5 --- /dev/null +++ b/drivers/clk/imx/clk-imx95-blk-ctl.c @@ -0,0 +1,438 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2024 NXP + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + CLK_GATE, + CLK_DIVIDER, + CLK_MUX, +}; + +struct imx95_blk_ctl { + struct device *dev; + spinlock_t lock; + struct clk *clk_apb; + + void __iomem *base; + /* clock gate register */ + u32 clk_reg_restore; +}; + +struct imx95_blk_ctl_clk_dev_data { + const char *name; + const char * const *parent_names; + u32 num_parents; + u32 reg; + u32 bit_idx; + u32 bit_width; + u32 clk_type; + u32 flags; + u32 flags2; + u32 type; +}; + +struct imx95_blk_ctl_dev_data { + const struct imx95_blk_ctl_clk_dev_data *clk_dev_data; + u32 num_clks; + bool rpm_enabled; + u32 clk_reg_offset; +}; + +static const struct imx95_blk_ctl_clk_dev_data vpublk_clk_dev_data[] = { + [IMX95_CLK_VPUBLK_WAVE] = { + .name = "vpublk_wave_vpu", + .parent_names = (const char *[]){ "vpu", }, + .num_parents = 1, + .reg = 8, + .bit_idx = 0, + .type = CLK_GATE, + .flags = CLK_SET_RATE_PARENT, + .flags2 = CLK_GATE_SET_TO_DISABLE, + }, + [IMX95_CLK_VPUBLK_JPEG_ENC] = { + .name = "vpublk_jpeg_enc", + .parent_names = (const char *[]){ "vpujpeg", }, + .num_parents = 1, + .reg = 8, + .bit_idx = 1, + .type = CLK_GATE, + .flags = CLK_SET_RATE_PARENT, + .flags2 = CLK_GATE_SET_TO_DISABLE, + }, + [IMX95_CLK_VPUBLK_JPEG_DEC] = { + .name = "vpublk_jpeg_dec", + .parent_names = (const char *[]){ "vpujpeg", }, + .num_parents = 1, + .reg = 8, + .bit_idx = 2, + .type = CLK_GATE, + .flags = CLK_SET_RATE_PARENT, + .flags2 = CLK_GATE_SET_TO_DISABLE, + } +}; + +static const struct imx95_blk_ctl_dev_data vpublk_dev_data = { + .num_clks = ARRAY_SIZE(vpublk_clk_dev_data), + .clk_dev_data = vpublk_clk_dev_data, + .rpm_enabled = true, + .clk_reg_offset = 8, +}; + +static const struct imx95_blk_ctl_clk_dev_data camblk_clk_dev_data[] = { + [IMX95_CLK_CAMBLK_CSI2_FOR0] = { + .name = "camblk_csi2_for0", + .parent_names = (const char *[]){ "camisi", }, + .num_parents = 1, + .reg = 0, + .bit_idx = 0, + .type = CLK_GATE, + .flags = CLK_SET_RATE_PARENT, + .flags2 = CLK_GATE_SET_TO_DISABLE, + }, + [IMX95_CLK_CAMBLK_CSI2_FOR1] = { + .name = "camblk_csi2_for1", + .parent_names = (const char *[]){ "camisi", }, + .num_parents = 1, + .reg = 0, + .bit_idx = 1, + .type = CLK_GATE, + .flags = CLK_SET_RATE_PARENT, + .flags2 = CLK_GATE_SET_TO_DISABLE, + }, + [IMX95_CLK_CAMBLK_ISP_AXI] = { + .name = "camblk_isp_axi", + .parent_names = (const char *[]){ "camaxi", }, + .num_parents = 1, + .reg = 0, + .bit_idx = 4, + .type = CLK_GATE, + .flags = CLK_SET_RATE_PARENT, + .flags2 = CLK_GATE_SET_TO_DISABLE, + }, + [IMX95_CLK_CAMBLK_ISP_PIXEL] = { + .name = "camblk_isp_pixel", + .parent_names = (const char *[]){ "camisi", }, + .num_parents = 1, + .reg = 0, + .bit_idx = 5, + .type = CLK_GATE, + .flags = CLK_SET_RATE_PARENT, + .flags2 = CLK_GATE_SET_TO_DISABLE, + }, + [IMX95_CLK_CAMBLK_ISP] = { + .name = "camblk_isp", + .parent_names = (const char *[]){ "camisi", }, + .num_parents = 1, + .reg = 0, + .bit_idx = 6, + .type = CLK_GATE, + .flags = CLK_SET_RATE_PARENT, + .flags2 = CLK_GATE_SET_TO_DISABLE, + } +}; + +static const struct imx95_blk_ctl_dev_data camblk_dev_data = { + .num_clks = ARRAY_SIZE(camblk_clk_dev_data), + .clk_dev_data = camblk_clk_dev_data, + .clk_reg_offset = 0, +}; + +static const struct imx95_blk_ctl_clk_dev_data lvds_clk_dev_data[] = { + [IMX95_CLK_DISPMIX_LVDS_PHY_DIV] = { + .name = "ldb_phy_div", + .parent_names = (const char *[]){ "ldbpll", }, + .num_parents = 1, + .reg = 0, + .bit_idx = 0, + .bit_width = 1, + .type = CLK_DIVIDER, + .flags2 = CLK_DIVIDER_POWER_OF_TWO, + }, + [IMX95_CLK_DISPMIX_LVDS_CH0_GATE] = { + .name = "lvds_ch0_gate", + .parent_names = (const char *[]){ "ldb_phy_div", }, + .num_parents = 1, + .reg = 0, + .bit_idx = 1, + .bit_width = 1, + .type = CLK_GATE, + .flags = CLK_SET_RATE_PARENT, + .flags2 = CLK_GATE_SET_TO_DISABLE, + }, + [IMX95_CLK_DISPMIX_LVDS_CH1_GATE] = { + .name = "lvds_ch1_gate", + .parent_names = (const char *[]){ "ldb_phy_div", }, + .num_parents = 1, + .reg = 0, + .bit_idx = 2, + .bit_width = 1, + .type = CLK_GATE, + .flags = CLK_SET_RATE_PARENT, + .flags2 = CLK_GATE_SET_TO_DISABLE, + }, + [IMX95_CLK_DISPMIX_PIX_DI0_GATE] = { + .name = "lvds_di0_gate", + .parent_names = (const char *[]){ "ldb_pll_div7", }, + .num_parents = 1, + .reg = 0, + .bit_idx = 3, + .bit_width = 1, + .type = CLK_GATE, + .flags = CLK_SET_RATE_PARENT, + .flags2 = CLK_GATE_SET_TO_DISABLE, + }, + [IMX95_CLK_DISPMIX_PIX_DI1_GATE] = { + .name = "lvds_di1_gate", + .parent_names = (const char *[]){ "ldb_pll_div7", }, + .num_parents = 1, + .reg = 0, + .bit_idx = 4, + .bit_width = 1, + .type = CLK_GATE, + .flags = CLK_SET_RATE_PARENT, + .flags2 = CLK_GATE_SET_TO_DISABLE, + }, +}; + +static const struct imx95_blk_ctl_dev_data lvds_csr_dev_data = { + .num_clks = ARRAY_SIZE(lvds_clk_dev_data), + .clk_dev_data = lvds_clk_dev_data, + .clk_reg_offset = 0, +}; + +static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = { + [IMX95_CLK_DISPMIX_ENG0_SEL] = { + .name = "disp_engine0_sel", + .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", }, + .num_parents = 4, + .reg = 0, + .bit_idx = 0, + .bit_width = 2, + .type = CLK_MUX, + .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, + }, + [IMX95_CLK_DISPMIX_ENG1_SEL] = { + .name = "disp_engine1_sel", + .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", }, + .num_parents = 4, + .reg = 0, + .bit_idx = 2, + .bit_width = 2, + .type = CLK_MUX, + .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, + } +}; + +static const struct imx95_blk_ctl_dev_data dispmix_csr_dev_data = { + .num_clks = ARRAY_SIZE(dispmix_csr_clk_dev_data), + .clk_dev_data = dispmix_csr_clk_dev_data, + .clk_reg_offset = 0, +}; + +static int imx95_bc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct imx95_blk_ctl_dev_data *bc_data; + struct imx95_blk_ctl *bc; + struct clk_hw_onecell_data *clk_hw_data; + struct clk_hw **hws; + void __iomem *base; + int i, ret; + + bc = devm_kzalloc(dev, sizeof(*bc), GFP_KERNEL); + if (!bc) + return -ENOMEM; + bc->dev = dev; + dev_set_drvdata(&pdev->dev, bc); + + spin_lock_init(&bc->lock); + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + bc->base = base; + bc->clk_apb = devm_clk_get(dev, NULL); + if (IS_ERR(bc->clk_apb)) + return dev_err_probe(dev, PTR_ERR(bc->clk_apb), "failed to get APB clock\n"); + + ret = clk_prepare_enable(bc->clk_apb); + if (ret) { + dev_err(dev, "failed to enable apb clock: %d\n", ret); + return ret; + } + + bc_data = of_device_get_match_data(dev); + if (!bc_data) + return devm_of_platform_populate(dev); + + clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, bc_data->num_clks), + GFP_KERNEL); + if (!clk_hw_data) + return -ENOMEM; + + if (bc_data->rpm_enabled) + pm_runtime_enable(&pdev->dev); + + clk_hw_data->num = bc_data->num_clks; + hws = clk_hw_data->hws; + + for (i = 0; i < bc_data->num_clks; i++) { + const struct imx95_blk_ctl_clk_dev_data *data = &bc_data->clk_dev_data[i]; + void __iomem *reg = base + data->reg; + + if (data->type == CLK_MUX) { + hws[i] = clk_hw_register_mux(dev, data->name, data->parent_names, + data->num_parents, data->flags, reg, + data->bit_idx, data->bit_width, + data->flags2, &bc->lock); + } else if (data->type == CLK_DIVIDER) { + hws[i] = clk_hw_register_divider(dev, data->name, data->parent_names[0], + data->flags, reg, data->bit_idx, + data->bit_width, data->flags2, &bc->lock); + } else { + hws[i] = clk_hw_register_gate(dev, data->name, data->parent_names[0], + data->flags, reg, data->bit_idx, + data->flags2, &bc->lock); + } + if (IS_ERR(hws[i])) { + ret = PTR_ERR(hws[i]); + dev_err(dev, "failed to register: %s:%d\n", data->name, ret); + goto cleanup; + } + } + + ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, clk_hw_data); + if (ret) + goto cleanup; + + ret = devm_of_platform_populate(dev); + if (ret) { + of_clk_del_provider(dev->of_node); + goto cleanup; + } + + if (pm_runtime_enabled(bc->dev)) + clk_disable_unprepare(bc->clk_apb); + + return 0; + +cleanup: + for (i = 0; i < bc_data->num_clks; i++) { + if (IS_ERR_OR_NULL(hws[i])) + continue; + clk_hw_unregister(hws[i]); + } + + if (bc_data->rpm_enabled) + pm_runtime_disable(&pdev->dev); + + return ret; +} + +#ifdef CONFIG_PM +static int imx95_bc_runtime_suspend(struct device *dev) +{ + struct imx95_blk_ctl *bc = dev_get_drvdata(dev); + + clk_disable_unprepare(bc->clk_apb); + return 0; +} + +static int imx95_bc_runtime_resume(struct device *dev) +{ + struct imx95_blk_ctl *bc = dev_get_drvdata(dev); + + return clk_prepare_enable(bc->clk_apb); +} +#endif + +#ifdef CONFIG_PM_SLEEP +static int imx95_bc_suspend(struct device *dev) +{ + struct imx95_blk_ctl *bc = dev_get_drvdata(dev); + const struct imx95_blk_ctl_dev_data *bc_data; + int ret; + + bc_data = of_device_get_match_data(dev); + if (!bc_data) + return 0; + + if (bc_data->rpm_enabled) { + ret = pm_runtime_get_sync(bc->dev); + if (ret < 0) { + pm_runtime_put_noidle(bc->dev); + return ret; + } + } + + bc->clk_reg_restore = readl(bc->base + bc_data->clk_reg_offset); + + return 0; +} + +static int imx95_bc_resume(struct device *dev) +{ + struct imx95_blk_ctl *bc = dev_get_drvdata(dev); + const struct imx95_blk_ctl_dev_data *bc_data; + + bc_data = of_device_get_match_data(dev); + if (!bc_data) + return 0; + + writel(bc->clk_reg_restore, bc->base + bc_data->clk_reg_offset); + + if (bc_data->rpm_enabled) + pm_runtime_put(bc->dev); + + return 0; +} +#endif + +static const struct dev_pm_ops imx95_bc_pm_ops = { + SET_RUNTIME_PM_OPS(imx95_bc_runtime_suspend, imx95_bc_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(imx95_bc_suspend, imx95_bc_resume) +}; + +static const struct of_device_id imx95_bc_of_match[] = { + { .compatible = "nxp,imx95-camera-csr", .data = &camblk_dev_data }, + { .compatible = "nxp,imx95-display-master-csr", }, + { .compatible = "nxp,imx95-lvds-csr", .data = &lvds_csr_dev_data }, + { .compatible = "nxp,imx95-display-csr", .data = &dispmix_csr_dev_data }, + { .compatible = "nxp,imx95-vpu-csr", .data = &vpublk_dev_data }, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, imx95_bc_of_match); + +static struct platform_driver imx95_bc_driver = { + .probe = imx95_bc_probe, + .driver = { + .name = "imx95-blk-ctl", + .of_match_table = imx95_bc_of_match, + .pm = &imx95_bc_pm_ops, + }, +}; +module_platform_driver(imx95_bc_driver); + +MODULE_DESCRIPTION("NXP i.MX95 blk ctl driver"); +MODULE_AUTHOR("Peng Fan "); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig index 29ffd14d26..59a40a49f8 100644 --- a/drivers/clk/meson/Kconfig +++ b/drivers/clk/meson/Kconfig @@ -30,6 +30,10 @@ config COMMON_CLK_MESON_VID_PLL_DIV tristate select COMMON_CLK_MESON_REGMAP +config COMMON_CLK_MESON_VCLK + tristate + select COMMON_CLK_MESON_REGMAP + config COMMON_CLK_MESON_CLKC_UTILS tristate @@ -140,6 +144,7 @@ config COMMON_CLK_G12A select COMMON_CLK_MESON_EE_CLKC select COMMON_CLK_MESON_CPU_DYNDIV select COMMON_CLK_MESON_VID_PLL_DIV + select COMMON_CLK_MESON_VCLK select MFD_SYSCON help Support for the clock controller on Amlogic S905D2, S905X2 and S905Y2 diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile index 9ee4b954c8..9ba43fe7a0 100644 --- a/drivers/clk/meson/Makefile +++ b/drivers/clk/meson/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_COMMON_CLK_MESON_PLL) += clk-pll.o obj-$(CONFIG_COMMON_CLK_MESON_REGMAP) += clk-regmap.o obj-$(CONFIG_COMMON_CLK_MESON_SCLK_DIV) += sclk-div.o obj-$(CONFIG_COMMON_CLK_MESON_VID_PLL_DIV) += vid-pll-div.o +obj-$(CONFIG_COMMON_CLK_MESON_VCLK) += vclk.o # Amlogic Clock controllers diff --git a/drivers/clk/meson/a1-peripherals.c b/drivers/clk/meson/a1-peripherals.c index e2a1f12f91..621af1e6e4 100644 --- a/drivers/clk/meson/a1-peripherals.c +++ b/drivers/clk/meson/a1-peripherals.c @@ -2187,6 +2187,7 @@ static struct regmap_config a1_periphs_regmap_cfg = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, + .max_register = DMC_CLK_CTRL, }; static struct meson_clk_hw_data a1_periphs_clks = { diff --git a/drivers/clk/meson/a1-pll.c b/drivers/clk/meson/a1-pll.c index 4325e8a6a3..90b0aeeb04 100644 --- a/drivers/clk/meson/a1-pll.c +++ b/drivers/clk/meson/a1-pll.c @@ -299,6 +299,7 @@ static struct regmap_config a1_pll_regmap_cfg = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, + .max_register = ANACTRL_HIFIPLL_STS, }; static struct meson_clk_hw_data a1_pll_clks = { diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c index d80ab4728f..e4d0f46f47 100644 --- a/drivers/clk/meson/axg-aoclk.c +++ b/drivers/clk/meson/axg-aoclk.c @@ -340,4 +340,4 @@ static struct platform_driver axg_aoclkc_driver = { }; module_platform_driver(axg_aoclkc_driver); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c index ac34829609..e03a5bf899 100644 --- a/drivers/clk/meson/axg-audio.c +++ b/drivers/clk/meson/axg-audio.c @@ -1877,4 +1877,4 @@ module_platform_driver(axg_audio_driver); MODULE_DESCRIPTION("Amlogic AXG/G12A/SM1 Audio Clock driver"); MODULE_AUTHOR("Jerome Brunet "); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c index 5f60f2bcca..52d610110e 100644 --- a/drivers/clk/meson/axg.c +++ b/drivers/clk/meson/axg.c @@ -2185,4 +2185,4 @@ static struct platform_driver axg_driver = { }; module_platform_driver(axg_driver); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/clk-cpu-dyndiv.c b/drivers/clk/meson/clk-cpu-dyndiv.c index 8778c149d2..aa824b030c 100644 --- a/drivers/clk/meson/clk-cpu-dyndiv.c +++ b/drivers/clk/meson/clk-cpu-dyndiv.c @@ -69,4 +69,4 @@ EXPORT_SYMBOL_GPL(meson_clk_cpu_dyndiv_ops); MODULE_DESCRIPTION("Amlogic CPU Dynamic Clock divider"); MODULE_AUTHOR("Neil Armstrong "); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/clk-dualdiv.c b/drivers/clk/meson/clk-dualdiv.c index feae49a8f6..d46c02b51b 100644 --- a/drivers/clk/meson/clk-dualdiv.c +++ b/drivers/clk/meson/clk-dualdiv.c @@ -140,4 +140,4 @@ EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ro_ops); MODULE_DESCRIPTION("Amlogic dual divider driver"); MODULE_AUTHOR("Neil Armstrong "); MODULE_AUTHOR("Jerome Brunet "); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c index 20255e129b..eae9b7dc5a 100644 --- a/drivers/clk/meson/clk-mpll.c +++ b/drivers/clk/meson/clk-mpll.c @@ -177,4 +177,4 @@ EXPORT_SYMBOL_GPL(meson_clk_mpll_ops); MODULE_DESCRIPTION("Amlogic MPLL driver"); MODULE_AUTHOR("Michael Turquette "); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c index a6763439f7..ff3f0b1a3e 100644 --- a/drivers/clk/meson/clk-phase.c +++ b/drivers/clk/meson/clk-phase.c @@ -183,4 +183,4 @@ EXPORT_SYMBOL_GPL(meson_sclk_ws_inv_ops); MODULE_DESCRIPTION("Amlogic phase driver"); MODULE_AUTHOR("Jerome Brunet "); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index 6fa7639a30..07db8b5c30 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c @@ -436,8 +436,8 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, ret = meson_clk_pll_enable(hw); if (ret) { - pr_warn("%s: pll did not lock, trying to restore old rate %lu\n", - __func__, old_rate); + pr_warn("%s: pll %s didn't lock, trying to set old rate %lu\n", + __func__, clk_hw_get_name(hw), old_rate); /* * FIXME: Do we really need/want this HACK ? * It looks unsafe. what happens if the clock gets into a @@ -486,4 +486,4 @@ EXPORT_SYMBOL_GPL(meson_clk_pll_ro_ops); MODULE_DESCRIPTION("Amlogic PLL driver"); MODULE_AUTHOR("Carlo Caione "); MODULE_AUTHOR("Jerome Brunet "); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c index 8ad8977cf1..ad116d24f7 100644 --- a/drivers/clk/meson/clk-regmap.c +++ b/drivers/clk/meson/clk-regmap.c @@ -183,4 +183,4 @@ EXPORT_SYMBOL_GPL(clk_regmap_mux_ro_ops); MODULE_DESCRIPTION("Amlogic regmap backed clock driver"); MODULE_AUTHOR("Jerome Brunet "); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c index c6b1d55cd7..58976ed8b9 100644 --- a/drivers/clk/meson/g12a-aoclk.c +++ b/drivers/clk/meson/g12a-aoclk.c @@ -475,4 +475,4 @@ static struct platform_driver g12a_aoclkc_driver = { }; module_platform_driver(g12a_aoclkc_driver); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index 90f4c61030..56e66ecc30 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -22,6 +22,7 @@ #include "clk-regmap.h" #include "clk-cpu-dyndiv.h" #include "vid-pll-div.h" +#include "vclk.h" #include "meson-eeclk.h" #include "g12a.h" @@ -3165,7 +3166,7 @@ static struct clk_regmap g12a_vclk2_sel = { .ops = &clk_regmap_mux_ops, .parent_hws = g12a_vclk_parent_hws, .num_parents = ARRAY_SIZE(g12a_vclk_parent_hws), - .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE, + .flags = CLK_SET_RATE_NO_REPARENT, }, }; @@ -3193,7 +3194,6 @@ static struct clk_regmap g12a_vclk2_input = { .ops = &clk_regmap_gate_ops, .parent_hws = (const struct clk_hw *[]) { &g12a_vclk2_sel.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, }, }; @@ -3215,19 +3215,32 @@ static struct clk_regmap g12a_vclk_div = { }; static struct clk_regmap g12a_vclk2_div = { - .data = &(struct clk_regmap_div_data){ - .offset = HHI_VIID_CLK_DIV, - .shift = 0, - .width = 8, + .data = &(struct meson_vclk_div_data){ + .div = { + .reg_off = HHI_VIID_CLK_DIV, + .shift = 0, + .width = 8, + }, + .enable = { + .reg_off = HHI_VIID_CLK_DIV, + .shift = 16, + .width = 1, + }, + .reset = { + .reg_off = HHI_VIID_CLK_DIV, + .shift = 17, + .width = 1, + }, + .flags = CLK_DIVIDER_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "vclk2_div", - .ops = &clk_regmap_divider_ops, + .ops = &meson_vclk_div_ops, .parent_hws = (const struct clk_hw *[]) { &g12a_vclk2_input.hw }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, + .flags = CLK_SET_RATE_GATE, }, }; @@ -3246,16 +3259,24 @@ static struct clk_regmap g12a_vclk = { }; static struct clk_regmap g12a_vclk2 = { - .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VIID_CLK_CNTL, - .bit_idx = 19, + .data = &(struct meson_vclk_gate_data){ + .enable = { + .reg_off = HHI_VIID_CLK_CNTL, + .shift = 19, + .width = 1, + }, + .reset = { + .reg_off = HHI_VIID_CLK_CNTL, + .shift = 15, + .width = 1, + }, }, .hw.init = &(struct clk_init_data) { .name = "vclk2", - .ops = &clk_regmap_gate_ops, + .ops = &meson_vclk_gate_ops, .parent_hws = (const struct clk_hw *[]) { &g12a_vclk2_div.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -3339,7 +3360,7 @@ static struct clk_regmap g12a_vclk2_div1 = { .ops = &clk_regmap_gate_ops, .parent_hws = (const struct clk_hw *[]) { &g12a_vclk2.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -3353,7 +3374,7 @@ static struct clk_regmap g12a_vclk2_div2_en = { .ops = &clk_regmap_gate_ops, .parent_hws = (const struct clk_hw *[]) { &g12a_vclk2.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -3367,7 +3388,7 @@ static struct clk_regmap g12a_vclk2_div4_en = { .ops = &clk_regmap_gate_ops, .parent_hws = (const struct clk_hw *[]) { &g12a_vclk2.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -3381,7 +3402,7 @@ static struct clk_regmap g12a_vclk2_div6_en = { .ops = &clk_regmap_gate_ops, .parent_hws = (const struct clk_hw *[]) { &g12a_vclk2.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -3395,7 +3416,7 @@ static struct clk_regmap g12a_vclk2_div12_en = { .ops = &clk_regmap_gate_ops, .parent_hws = (const struct clk_hw *[]) { &g12a_vclk2.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -3461,6 +3482,7 @@ static struct clk_fixed_factor g12a_vclk2_div2 = { &g12a_vclk2_div2_en.hw }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -3474,6 +3496,7 @@ static struct clk_fixed_factor g12a_vclk2_div4 = { &g12a_vclk2_div4_en.hw }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -3487,6 +3510,7 @@ static struct clk_fixed_factor g12a_vclk2_div6 = { &g12a_vclk2_div6_en.hw }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -3500,6 +3524,7 @@ static struct clk_fixed_factor g12a_vclk2_div12 = { &g12a_vclk2_div12_en.hw }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -3561,7 +3586,7 @@ static struct clk_regmap g12a_cts_encl_sel = { .ops = &clk_regmap_mux_ops, .parent_hws = g12a_cts_parent_hws, .num_parents = ARRAY_SIZE(g12a_cts_parent_hws), - .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE, + .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, }, }; @@ -3717,15 +3742,26 @@ static struct clk_regmap g12a_mipi_dsi_pxclk_sel = { .ops = &clk_regmap_mux_ops, .parent_hws = g12a_mipi_dsi_pxclk_parent_hws, .num_parents = ARRAY_SIZE(g12a_mipi_dsi_pxclk_parent_hws), - .flags = CLK_SET_RATE_NO_REPARENT, + .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, }, }; +/* + * FIXME: Force as bypass by forcing a single /1 table entry, and doensn't on boot value + * when setting a clock whith this node in the clock path, but doesn't garantee the divider + * is at /1 at boot until a rate is set. + */ +static const struct clk_div_table g12a_mipi_dsi_pxclk_div_table[] = { + { .val = 0, .div = 1 }, + { /* sentinel */ }, +}; + static struct clk_regmap g12a_mipi_dsi_pxclk_div = { .data = &(struct clk_regmap_div_data){ .offset = HHI_MIPIDSI_PHY_CLK_CNTL, .shift = 0, .width = 7, + .table = g12a_mipi_dsi_pxclk_div_table, }, .hw.init = &(struct clk_init_data){ .name = "mipi_dsi_pxclk_div", @@ -5578,4 +5614,4 @@ static struct platform_driver g12a_driver = { }; module_platform_driver(g12a_driver); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c index 4aec1740ac..dbda563729 100644 --- a/drivers/clk/meson/gxbb-aoclk.c +++ b/drivers/clk/meson/gxbb-aoclk.c @@ -300,4 +300,4 @@ static struct platform_driver gxbb_aoclkc_driver = { }, }; module_platform_driver(gxbb_aoclkc_driver); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 1b1279d947..29507b8c43 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -3569,4 +3569,4 @@ static struct platform_driver gxbb_driver = { }; module_platform_driver(gxbb_driver); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c index bf466fef26..b8a9d59e67 100644 --- a/drivers/clk/meson/meson-aoclk.c +++ b/drivers/clk/meson/meson-aoclk.c @@ -89,4 +89,4 @@ int meson_aoclkc_probe(struct platform_device *pdev) return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks); } EXPORT_SYMBOL_GPL(meson_aoclkc_probe); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c index 845ca8bfa3..3cbc7f233b 100644 --- a/drivers/clk/meson/meson-eeclk.c +++ b/drivers/clk/meson/meson-eeclk.c @@ -58,4 +58,4 @@ int meson_eeclkc_probe(struct platform_device *pdev) return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks); } EXPORT_SYMBOL_GPL(meson_eeclkc_probe); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/s4-peripherals.c b/drivers/clk/meson/s4-peripherals.c index 6c35de3d53..73340c7e81 100644 --- a/drivers/clk/meson/s4-peripherals.c +++ b/drivers/clk/meson/s4-peripherals.c @@ -2978,7 +2978,7 @@ static struct clk_regmap s4_pwm_j_div = { .name = "pwm_j_div", .ops = &clk_regmap_divider_ops, .parent_hws = (const struct clk_hw *[]) { - &s4_pwm_h_mux.hw + &s4_pwm_j_mux.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, @@ -3751,6 +3751,7 @@ static struct regmap_config clkc_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, + .max_register = CLKCTRL_DEMOD_CLK_CTRL, }; static struct meson_clk_hw_data s4_periphs_clks = { @@ -3799,6 +3800,7 @@ static const struct of_device_id clkc_match_table[] = { }, {} }; +MODULE_DEVICE_TABLE(of, clkc_match_table); static struct platform_driver s4_driver = { .probe = meson_s4_periphs_probe, diff --git a/drivers/clk/meson/s4-pll.c b/drivers/clk/meson/s4-pll.c index 8dfaeccaad..707c107a52 100644 --- a/drivers/clk/meson/s4-pll.c +++ b/drivers/clk/meson/s4-pll.c @@ -38,6 +38,11 @@ static struct clk_regmap s4_fixed_pll_dco = { .shift = 0, .width = 8, }, + .frac = { + .reg_off = ANACTRL_FIXPLL_CTRL1, + .shift = 0, + .width = 17, + }, .n = { .reg_off = ANACTRL_FIXPLL_CTRL0, .shift = 10, @@ -798,6 +803,7 @@ static struct regmap_config clkc_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, + .max_register = ANACTRL_HDMIPLL_CTRL0, }; static struct meson_clk_hw_data s4_pll_clks = { @@ -853,6 +859,7 @@ static const struct of_device_id clkc_match_table[] = { }, {} }; +MODULE_DEVICE_TABLE(of, clkc_match_table); static struct platform_driver s4_driver = { .probe = meson_s4_pll_probe, diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c index d12c45c4c2..987f5b0658 100644 --- a/drivers/clk/meson/sclk-div.c +++ b/drivers/clk/meson/sclk-div.c @@ -251,4 +251,4 @@ EXPORT_SYMBOL_GPL(meson_sclk_div_ops); MODULE_DESCRIPTION("Amlogic Sample divider driver"); MODULE_AUTHOR("Jerome Brunet "); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/vclk.c b/drivers/clk/meson/vclk.c new file mode 100644 index 0000000000..e886df55d6 --- /dev/null +++ b/drivers/clk/meson/vclk.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024 Neil Armstrong + */ + +#include +#include "vclk.h" + +/* The VCLK gate has a supplementary reset bit to pulse after ungating */ + +static inline struct meson_vclk_gate_data * +clk_get_meson_vclk_gate_data(struct clk_regmap *clk) +{ + return (struct meson_vclk_gate_data *)clk->data; +} + +static int meson_vclk_gate_enable(struct clk_hw *hw) +{ + struct clk_regmap *clk = to_clk_regmap(hw); + struct meson_vclk_gate_data *vclk = clk_get_meson_vclk_gate_data(clk); + + meson_parm_write(clk->map, &vclk->enable, 1); + + /* Do a reset pulse */ + meson_parm_write(clk->map, &vclk->reset, 1); + meson_parm_write(clk->map, &vclk->reset, 0); + + return 0; +} + +static void meson_vclk_gate_disable(struct clk_hw *hw) +{ + struct clk_regmap *clk = to_clk_regmap(hw); + struct meson_vclk_gate_data *vclk = clk_get_meson_vclk_gate_data(clk); + + meson_parm_write(clk->map, &vclk->enable, 0); +} + +static int meson_vclk_gate_is_enabled(struct clk_hw *hw) +{ + struct clk_regmap *clk = to_clk_regmap(hw); + struct meson_vclk_gate_data *vclk = clk_get_meson_vclk_gate_data(clk); + + return meson_parm_read(clk->map, &vclk->enable); +} + +const struct clk_ops meson_vclk_gate_ops = { + .enable = meson_vclk_gate_enable, + .disable = meson_vclk_gate_disable, + .is_enabled = meson_vclk_gate_is_enabled, +}; +EXPORT_SYMBOL_GPL(meson_vclk_gate_ops); + +/* The VCLK Divider has supplementary reset & enable bits */ + +static inline struct meson_vclk_div_data * +clk_get_meson_vclk_div_data(struct clk_regmap *clk) +{ + return (struct meson_vclk_div_data *)clk->data; +} + +static unsigned long meson_vclk_div_recalc_rate(struct clk_hw *hw, + unsigned long prate) +{ + struct clk_regmap *clk = to_clk_regmap(hw); + struct meson_vclk_div_data *vclk = clk_get_meson_vclk_div_data(clk); + + return divider_recalc_rate(hw, prate, meson_parm_read(clk->map, &vclk->div), + vclk->table, vclk->flags, vclk->div.width); +} + +static int meson_vclk_div_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_regmap *clk = to_clk_regmap(hw); + struct meson_vclk_div_data *vclk = clk_get_meson_vclk_div_data(clk); + + return divider_determine_rate(hw, req, vclk->table, vclk->div.width, + vclk->flags); +} + +static int meson_vclk_div_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_regmap *clk = to_clk_regmap(hw); + struct meson_vclk_div_data *vclk = clk_get_meson_vclk_div_data(clk); + int ret; + + ret = divider_get_val(rate, parent_rate, vclk->table, vclk->div.width, + vclk->flags); + if (ret < 0) + return ret; + + meson_parm_write(clk->map, &vclk->div, ret); + + return 0; +}; + +static int meson_vclk_div_enable(struct clk_hw *hw) +{ + struct clk_regmap *clk = to_clk_regmap(hw); + struct meson_vclk_div_data *vclk = clk_get_meson_vclk_div_data(clk); + + /* Unreset the divider when ungating */ + meson_parm_write(clk->map, &vclk->reset, 0); + meson_parm_write(clk->map, &vclk->enable, 1); + + return 0; +} + +static void meson_vclk_div_disable(struct clk_hw *hw) +{ + struct clk_regmap *clk = to_clk_regmap(hw); + struct meson_vclk_div_data *vclk = clk_get_meson_vclk_div_data(clk); + + /* Reset the divider when gating */ + meson_parm_write(clk->map, &vclk->enable, 0); + meson_parm_write(clk->map, &vclk->reset, 1); +} + +static int meson_vclk_div_is_enabled(struct clk_hw *hw) +{ + struct clk_regmap *clk = to_clk_regmap(hw); + struct meson_vclk_div_data *vclk = clk_get_meson_vclk_div_data(clk); + + return meson_parm_read(clk->map, &vclk->enable); +} + +const struct clk_ops meson_vclk_div_ops = { + .recalc_rate = meson_vclk_div_recalc_rate, + .determine_rate = meson_vclk_div_determine_rate, + .set_rate = meson_vclk_div_set_rate, + .enable = meson_vclk_div_enable, + .disable = meson_vclk_div_disable, + .is_enabled = meson_vclk_div_is_enabled, +}; +EXPORT_SYMBOL_GPL(meson_vclk_div_ops); + +MODULE_DESCRIPTION("Amlogic vclk clock driver"); +MODULE_AUTHOR("Neil Armstrong "); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/vclk.h b/drivers/clk/meson/vclk.h new file mode 100644 index 0000000000..20b0b181db --- /dev/null +++ b/drivers/clk/meson/vclk.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Neil Armstrong + */ + +#ifndef __VCLK_H +#define __VCLK_H + +#include "clk-regmap.h" +#include "parm.h" + +/** + * struct meson_vclk_gate_data - vclk_gate regmap backed specific data + * + * @enable: vclk enable field + * @reset: vclk reset field + * @flags: hardware-specific flags + * + * Flags: + * Same as clk_gate except CLK_GATE_HIWORD_MASK which is ignored + */ +struct meson_vclk_gate_data { + struct parm enable; + struct parm reset; + u8 flags; +}; + +extern const struct clk_ops meson_vclk_gate_ops; + +/** + * struct meson_vclk_div_data - vclk_div regmap back specific data + * + * @div: divider field + * @enable: vclk divider enable field + * @reset: vclk divider reset field + * @table: array of value/divider pairs, last entry should have div = 0 + * + * Flags: + * Same as clk_divider except CLK_DIVIDER_HIWORD_MASK which is ignored + */ +struct meson_vclk_div_data { + struct parm div; + struct parm enable; + struct parm reset; + const struct clk_div_table *table; + u8 flags; +}; + +extern const struct clk_ops meson_vclk_div_ops; + +#endif /* __VCLK_H */ diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c index daff235bc7..ee129f8679 100644 --- a/drivers/clk/meson/vid-pll-div.c +++ b/drivers/clk/meson/vid-pll-div.c @@ -96,4 +96,4 @@ EXPORT_SYMBOL_GPL(meson_vid_pll_div_ro_ops); MODULE_DESCRIPTION("Amlogic video pll divider driver"); MODULE_AUTHOR("Neil Armstrong "); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c index 22eab91a67..28ec0da88c 100644 --- a/drivers/clk/microchip/clk-mpfs.c +++ b/drivers/clk/microchip/clk-mpfs.c @@ -4,12 +4,10 @@ * * Copyright (C) 2020-2022 Microchip Technology Inc. All rights reserved. */ -#include #include #include #include #include -#include #include #include @@ -361,93 +359,6 @@ static int mpfs_clk_register_periphs(struct device *dev, struct mpfs_periph_hw_c return 0; } -/* - * Peripheral clock resets - */ - -#if IS_ENABLED(CONFIG_RESET_CONTROLLER) - -u32 mpfs_reset_read(struct device *dev) -{ - struct mpfs_clock_data *clock_data = dev_get_drvdata(dev->parent); - - return readl_relaxed(clock_data->base + REG_SUBBLK_RESET_CR); -} -EXPORT_SYMBOL_NS_GPL(mpfs_reset_read, MCHP_CLK_MPFS); - -void mpfs_reset_write(struct device *dev, u32 val) -{ - struct mpfs_clock_data *clock_data = dev_get_drvdata(dev->parent); - - writel_relaxed(val, clock_data->base + REG_SUBBLK_RESET_CR); -} -EXPORT_SYMBOL_NS_GPL(mpfs_reset_write, MCHP_CLK_MPFS); - -static void mpfs_reset_unregister_adev(void *_adev) -{ - struct auxiliary_device *adev = _adev; - - auxiliary_device_delete(adev); - auxiliary_device_uninit(adev); -} - -static void mpfs_reset_adev_release(struct device *dev) -{ - struct auxiliary_device *adev = to_auxiliary_dev(dev); - - kfree(adev); -} - -static struct auxiliary_device *mpfs_reset_adev_alloc(struct mpfs_clock_data *clk_data) -{ - struct auxiliary_device *adev; - int ret; - - adev = kzalloc(sizeof(*adev), GFP_KERNEL); - if (!adev) - return ERR_PTR(-ENOMEM); - - adev->name = "reset-mpfs"; - adev->dev.parent = clk_data->dev; - adev->dev.release = mpfs_reset_adev_release; - adev->id = 666u; - - ret = auxiliary_device_init(adev); - if (ret) { - kfree(adev); - return ERR_PTR(ret); - } - - return adev; -} - -static int mpfs_reset_controller_register(struct mpfs_clock_data *clk_data) -{ - struct auxiliary_device *adev; - int ret; - - adev = mpfs_reset_adev_alloc(clk_data); - if (IS_ERR(adev)) - return PTR_ERR(adev); - - ret = auxiliary_device_add(adev); - if (ret) { - auxiliary_device_uninit(adev); - return ret; - } - - return devm_add_action_or_reset(clk_data->dev, mpfs_reset_unregister_adev, adev); -} - -#else /* !CONFIG_RESET_CONTROLLER */ - -static int mpfs_reset_controller_register(struct mpfs_clock_data *clk_data) -{ - return 0; -} - -#endif /* !CONFIG_RESET_CONTROLLER */ - static int mpfs_clk_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -499,7 +410,7 @@ static int mpfs_clk_probe(struct platform_device *pdev) if (ret) return ret; - return mpfs_reset_controller_register(clk_data); + return mpfs_reset_controller_register(dev, clk_data->base + REG_SUBBLK_RESET_CR); } static const struct of_device_id mpfs_clk_of_match_table[] = { @@ -532,3 +443,4 @@ MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Driver"); MODULE_AUTHOR("Padmarao Begari "); MODULE_AUTHOR("Daire McNamara "); MODULE_AUTHOR("Conor Dooley "); +MODULE_IMPORT_NS(MCHP_CLK_MPFS); diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c index 69ebf65081..81efa88506 100644 --- a/drivers/clk/nxp/clk-lpc18xx-cgu.c +++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c @@ -250,7 +250,6 @@ static struct lpc18xx_cgu_base_clk lpc18xx_cgu_base_clks[] = { struct lpc18xx_pll { struct clk_hw hw; void __iomem *reg; - spinlock_t *lock; u8 flags; }; diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c index e12bb9abf6..e8632db2c5 100644 --- a/drivers/clk/qcom/apss-ipq-pll.c +++ b/drivers/clk/qcom/apss-ipq-pll.c @@ -8,43 +8,14 @@ #include "clk-alpha-pll.h" -/* - * Even though APSS PLL type is of existing one (like Huayra), its offsets - * are different from the one mentioned in the clk-alpha-pll.c, since the - * PLL is specific to APSS, so lets the define the same. - */ -static const u8 ipq_pll_offsets[][PLL_OFF_MAX_REGS] = { - [CLK_ALPHA_PLL_TYPE_HUAYRA] = { - [PLL_OFF_L_VAL] = 0x08, - [PLL_OFF_ALPHA_VAL] = 0x10, - [PLL_OFF_USER_CTL] = 0x18, - [PLL_OFF_CONFIG_CTL] = 0x20, - [PLL_OFF_CONFIG_CTL_U] = 0x24, - [PLL_OFF_STATUS] = 0x28, - [PLL_OFF_TEST_CTL] = 0x30, - [PLL_OFF_TEST_CTL_U] = 0x34, - }, - [CLK_ALPHA_PLL_TYPE_STROMER_PLUS] = { - [PLL_OFF_L_VAL] = 0x08, - [PLL_OFF_ALPHA_VAL] = 0x10, - [PLL_OFF_ALPHA_VAL_U] = 0x14, - [PLL_OFF_USER_CTL] = 0x18, - [PLL_OFF_USER_CTL_U] = 0x1c, - [PLL_OFF_CONFIG_CTL] = 0x20, - [PLL_OFF_STATUS] = 0x28, - [PLL_OFF_TEST_CTL] = 0x30, - [PLL_OFF_TEST_CTL_U] = 0x34, - }, -}; - static struct clk_alpha_pll ipq_pll_huayra = { .offset = 0x0, - .regs = ipq_pll_offsets[CLK_ALPHA_PLL_TYPE_HUAYRA], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA_APSS], .flags = SUPPORTS_DYNAMIC_UPDATE, .clkr = { .enable_reg = 0x0, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ + .hw.init = &(const struct clk_init_data) { .name = "a53pll", .parent_data = &(const struct clk_parent_data) { .fw_name = "xo", @@ -57,12 +28,7 @@ static struct clk_alpha_pll ipq_pll_huayra = { static struct clk_alpha_pll ipq_pll_stromer = { .offset = 0x0, - /* - * Reuse CLK_ALPHA_PLL_TYPE_STROMER_PLUS register offsets. - * Although this is a bit confusing, but the offset values - * are correct nevertheless. - */ - .regs = ipq_pll_offsets[CLK_ALPHA_PLL_TYPE_STROMER_PLUS], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_STROMER], .flags = SUPPORTS_DYNAMIC_UPDATE, .clkr = { .enable_reg = 0x0, @@ -80,12 +46,16 @@ static struct clk_alpha_pll ipq_pll_stromer = { static struct clk_alpha_pll ipq_pll_stromer_plus = { .offset = 0x0, - .regs = ipq_pll_offsets[CLK_ALPHA_PLL_TYPE_STROMER_PLUS], + /* + * The register offsets of the Stromer Plus PLL used in IPQ5332 + * are the same as the Stromer PLL's offsets. + */ + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_STROMER], .flags = SUPPORTS_DYNAMIC_UPDATE, .clkr = { .enable_reg = 0x0, .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ + .hw.init = &(const struct clk_init_data) { .name = "a53pll", .parent_data = &(const struct clk_parent_data) { .fw_name = "xo", @@ -171,25 +141,25 @@ static const struct apss_pll_data ipq5018_pll_data = { .pll_config = &ipq5018_pll_config, }; -static struct apss_pll_data ipq5332_pll_data = { +static const struct apss_pll_data ipq5332_pll_data = { .pll_type = CLK_ALPHA_PLL_TYPE_STROMER_PLUS, .pll = &ipq_pll_stromer_plus, .pll_config = &ipq5332_pll_config, }; -static struct apss_pll_data ipq8074_pll_data = { +static const struct apss_pll_data ipq8074_pll_data = { .pll_type = CLK_ALPHA_PLL_TYPE_HUAYRA, .pll = &ipq_pll_huayra, .pll_config = &ipq8074_pll_config, }; -static struct apss_pll_data ipq6018_pll_data = { +static const struct apss_pll_data ipq6018_pll_data = { .pll_type = CLK_ALPHA_PLL_TYPE_HUAYRA, .pll = &ipq_pll_huayra, .pll_config = &ipq6018_pll_config, }; -static struct apss_pll_data ipq9574_pll_data = { +static const struct apss_pll_data ipq9574_pll_data = { .pll_type = CLK_ALPHA_PLL_TYPE_HUAYRA, .pll = &ipq_pll_huayra, .pll_config = &ipq9574_pll_config, diff --git a/drivers/clk/qcom/camcc-sc7280.c b/drivers/clk/qcom/camcc-sc7280.c index d89ddb2298..582fb3ba9c 100644 --- a/drivers/clk/qcom/camcc-sc7280.c +++ b/drivers/clk/qcom/camcc-sc7280.c @@ -2260,6 +2260,7 @@ static struct gdsc cam_cc_bps_gdsc = { .name = "cam_cc_bps_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, .flags = HW_CTRL | RETAIN_FF_ENABLE, }; @@ -2269,6 +2270,7 @@ static struct gdsc cam_cc_ife_0_gdsc = { .name = "cam_cc_ife_0_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, .flags = RETAIN_FF_ENABLE, }; @@ -2278,6 +2280,7 @@ static struct gdsc cam_cc_ife_1_gdsc = { .name = "cam_cc_ife_1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, .flags = RETAIN_FF_ENABLE, }; @@ -2287,6 +2290,7 @@ static struct gdsc cam_cc_ife_2_gdsc = { .name = "cam_cc_ife_2_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, .flags = RETAIN_FF_ENABLE, }; @@ -2296,6 +2300,7 @@ static struct gdsc cam_cc_ipe_0_gdsc = { .name = "cam_cc_ipe_0_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, .flags = HW_CTRL | RETAIN_FF_ENABLE, }; diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index 003308a288..c51647e37d 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -83,6 +83,16 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = { [PLL_OFF_TEST_CTL_U] = 0x20, [PLL_OFF_STATUS] = 0x24, }, + [CLK_ALPHA_PLL_TYPE_HUAYRA_APSS] = { + [PLL_OFF_L_VAL] = 0x08, + [PLL_OFF_ALPHA_VAL] = 0x10, + [PLL_OFF_USER_CTL] = 0x18, + [PLL_OFF_CONFIG_CTL] = 0x20, + [PLL_OFF_CONFIG_CTL_U] = 0x24, + [PLL_OFF_STATUS] = 0x28, + [PLL_OFF_TEST_CTL] = 0x30, + [PLL_OFF_TEST_CTL_U] = 0x34, + }, [CLK_ALPHA_PLL_TYPE_BRAMMO] = { [PLL_OFF_L_VAL] = 0x04, [PLL_OFF_ALPHA_VAL] = 0x08, @@ -213,9 +223,9 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = { [PLL_OFF_USER_CTL] = 0x18, [PLL_OFF_USER_CTL_U] = 0x1c, [PLL_OFF_CONFIG_CTL] = 0x20, + [PLL_OFF_STATUS] = 0x28, [PLL_OFF_TEST_CTL] = 0x30, [PLL_OFF_TEST_CTL_U] = 0x34, - [PLL_OFF_STATUS] = 0x28, }, [CLK_ALPHA_PLL_TYPE_STROMER_PLUS] = { [PLL_OFF_L_VAL] = 0x04, @@ -2113,6 +2123,15 @@ void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regma { u32 lval = config->l; + /* + * If the bootloader left the PLL enabled it's likely that there are + * RCGs that will lock up if we disable the PLL below. + */ + if (trion_pll_is_enabled(pll, regmap)) { + pr_debug("Lucid Evo PLL is already enabled, skipping configuration\n"); + return; + } + lval |= TRION_PLL_CAL_VAL << LUCID_EVO_PLL_CAL_L_VAL_SHIFT; clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), lval); clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha); diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h index fb6d50263b..c7055b6c42 100644 --- a/drivers/clk/qcom/clk-alpha-pll.h +++ b/drivers/clk/qcom/clk-alpha-pll.h @@ -15,6 +15,7 @@ enum { CLK_ALPHA_PLL_TYPE_DEFAULT, CLK_ALPHA_PLL_TYPE_HUAYRA, + CLK_ALPHA_PLL_TYPE_HUAYRA_APSS, CLK_ALPHA_PLL_TYPE_BRAMMO, CLK_ALPHA_PLL_TYPE_FABIA, CLK_ALPHA_PLL_TYPE_TRION, @@ -73,8 +74,10 @@ struct pll_vco { /** * struct clk_alpha_pll - phase locked loop (PLL) * @offset: base address of registers - * @vco_table: array of VCO settings * @regs: alpha pll register map (see @clk_alpha_pll_regs) + * @vco_table: array of VCO settings + * @num_vco: number of VCO settings in @vco_table + * @flags: bitmask to indicate features supported by the hardware * @clkr: regmap clock handle */ struct clk_alpha_pll { diff --git a/drivers/clk/qcom/clk-cbf-8996.c b/drivers/clk/qcom/clk-cbf-8996.c index fe24b4abea..76bf523431 100644 --- a/drivers/clk/qcom/clk-cbf-8996.c +++ b/drivers/clk/qcom/clk-cbf-8996.c @@ -41,17 +41,6 @@ enum { #define CBF_PLL_OFFSET 0xf000 -static const u8 cbf_pll_regs[PLL_OFF_MAX_REGS] = { - [PLL_OFF_L_VAL] = 0x08, - [PLL_OFF_ALPHA_VAL] = 0x10, - [PLL_OFF_USER_CTL] = 0x18, - [PLL_OFF_CONFIG_CTL] = 0x20, - [PLL_OFF_CONFIG_CTL_U] = 0x24, - [PLL_OFF_TEST_CTL] = 0x30, - [PLL_OFF_TEST_CTL_U] = 0x34, - [PLL_OFF_STATUS] = 0x28, -}; - static struct alpha_pll_config cbfpll_config = { .l = 72, .config_ctl_val = 0x200d4828, @@ -67,7 +56,7 @@ static struct alpha_pll_config cbfpll_config = { static struct clk_alpha_pll cbf_pll = { .offset = CBF_PLL_OFFSET, - .regs = cbf_pll_regs, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA_APSS], .flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE, .clkr.hw.init = &(struct clk_init_data){ .name = "cbf_pll", diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h index e6d84c8c79..d7414361e4 100644 --- a/drivers/clk/qcom/clk-rcg.h +++ b/drivers/clk/qcom/clk-rcg.h @@ -17,6 +17,23 @@ struct freq_tbl { u16 n; }; +#define C(s, h, m, n) { (s), (2 * (h) - 1), (m), (n) } +#define FM(f, confs) { (f), ARRAY_SIZE(confs), (confs) } +#define FMS(f, s, h, m, n) { (f), 1, (const struct freq_conf []){ C(s, h, m, n) } } + +struct freq_conf { + u8 src; + u8 pre_div; + u16 m; + u16 n; +}; + +struct freq_multi_tbl { + unsigned long freq; + size_t num_confs; + const struct freq_conf *confs; +}; + /** * struct mn - M/N:D counter * @mnctr_en_bit: bit to enable mn counter @@ -138,6 +155,7 @@ extern const struct clk_ops clk_dyn_rcg_ops; * @safe_src_index: safe src index value * @parent_map: map from software's parent index to hardware's src_sel field * @freq_tbl: frequency table + * @freq_multi_tbl: frequency table for clocks reachable with multiple RCGs conf * @clkr: regmap clock handle * @cfg_off: defines the cfg register offset from the CMD_RCGR + CFG_REG * @parked_cfg: cached value of the CFG register for parked RCGs @@ -149,7 +167,10 @@ struct clk_rcg2 { u8 hid_width; u8 safe_src_index; const struct parent_map *parent_map; - const struct freq_tbl *freq_tbl; + union { + const struct freq_tbl *freq_tbl; + const struct freq_multi_tbl *freq_multi_tbl; + }; struct clk_regmap clkr; u8 cfg_off; u32 parked_cfg; @@ -169,6 +190,7 @@ struct clk_rcg2_gfx3d { extern const struct clk_ops clk_rcg2_ops; extern const struct clk_ops clk_rcg2_floor_ops; +extern const struct clk_ops clk_rcg2_fm_ops; extern const struct clk_ops clk_rcg2_mux_closest_ops; extern const struct clk_ops clk_edp_pixel_ops; extern const struct clk_ops clk_byte_ops; diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 5183c74b07..30b19bd39d 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -260,6 +260,115 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, return 0; } +static const struct freq_conf * +__clk_rcg2_select_conf(struct clk_hw *hw, const struct freq_multi_tbl *f, + unsigned long req_rate) +{ + unsigned long rate_diff, best_rate_diff = ULONG_MAX; + const struct freq_conf *conf, *best_conf = NULL; + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + const char *name = clk_hw_get_name(hw); + unsigned long parent_rate, rate; + struct clk_hw *p; + int index, i; + + /* Exit early if only one config is defined */ + if (f->num_confs == 1) { + best_conf = f->confs; + goto exit; + } + + /* Search in each provided config the one that is near the wanted rate */ + for (i = 0, conf = f->confs; i < f->num_confs; i++, conf++) { + index = qcom_find_src_index(hw, rcg->parent_map, conf->src); + if (index < 0) + continue; + + p = clk_hw_get_parent_by_index(hw, index); + if (!p) + continue; + + parent_rate = clk_hw_get_rate(p); + rate = calc_rate(parent_rate, conf->n, conf->m, conf->n, conf->pre_div); + + if (rate == req_rate) { + best_conf = conf; + goto exit; + } + + rate_diff = abs_diff(req_rate, rate); + if (rate_diff < best_rate_diff) { + best_rate_diff = rate_diff; + best_conf = conf; + } + } + + /* + * Very unlikely. Warn if we couldn't find a correct config + * due to parent not found in every config. + */ + if (unlikely(!best_conf)) { + WARN(1, "%s: can't find a configuration for rate %lu\n", + name, req_rate); + return ERR_PTR(-EINVAL); + } + +exit: + return best_conf; +} + +static int _freq_tbl_fm_determine_rate(struct clk_hw *hw, const struct freq_multi_tbl *f, + struct clk_rate_request *req) +{ + unsigned long clk_flags, rate = req->rate; + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + const struct freq_conf *conf; + struct clk_hw *p; + int index; + + f = qcom_find_freq_multi(f, rate); + if (!f || !f->confs) + return -EINVAL; + + conf = __clk_rcg2_select_conf(hw, f, rate); + if (IS_ERR(conf)) + return PTR_ERR(conf); + index = qcom_find_src_index(hw, rcg->parent_map, conf->src); + if (index < 0) + return index; + + clk_flags = clk_hw_get_flags(hw); + p = clk_hw_get_parent_by_index(hw, index); + if (!p) + return -EINVAL; + + if (clk_flags & CLK_SET_RATE_PARENT) { + rate = f->freq; + if (conf->pre_div) { + if (!rate) + rate = req->rate; + rate /= 2; + rate *= conf->pre_div + 1; + } + + if (conf->n) { + u64 tmp = rate; + + tmp = tmp * conf->n; + do_div(tmp, conf->m); + rate = tmp; + } + } else { + rate = clk_hw_get_rate(p); + } + + req->best_parent_hw = p; + req->best_parent_rate = rate; + req->rate = f->freq; + + return 0; +} + static int clk_rcg2_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { @@ -276,6 +385,14 @@ static int clk_rcg2_determine_floor_rate(struct clk_hw *hw, return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR); } +static int clk_rcg2_fm_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + + return _freq_tbl_fm_determine_rate(hw, rcg->freq_multi_tbl, req); +} + static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f, u32 *_cfg) { @@ -371,6 +488,30 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, return clk_rcg2_configure(rcg, f); } +static int __clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate) +{ + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + const struct freq_multi_tbl *f; + const struct freq_conf *conf; + struct freq_tbl f_tbl = {}; + + f = qcom_find_freq_multi(rcg->freq_multi_tbl, rate); + if (!f || !f->confs) + return -EINVAL; + + conf = __clk_rcg2_select_conf(hw, f, rate); + if (IS_ERR(conf)) + return PTR_ERR(conf); + + f_tbl.freq = f->freq; + f_tbl.src = conf->src; + f_tbl.pre_div = conf->pre_div; + f_tbl.m = conf->m; + f_tbl.n = conf->n; + + return clk_rcg2_configure(rcg, &f_tbl); +} + static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { @@ -383,6 +524,12 @@ static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate, return __clk_rcg2_set_rate(hw, rate, FLOOR); } +static int clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + return __clk_rcg2_fm_set_rate(hw, rate); +} + static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate, u8 index) { @@ -395,6 +542,12 @@ static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw, return __clk_rcg2_set_rate(hw, rate, FLOOR); } +static int clk_rcg2_fm_set_rate_and_parent(struct clk_hw *hw, + unsigned long rate, unsigned long parent_rate, u8 index) +{ + return __clk_rcg2_fm_set_rate(hw, rate); +} + static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); @@ -505,6 +658,19 @@ const struct clk_ops clk_rcg2_floor_ops = { }; EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops); +const struct clk_ops clk_rcg2_fm_ops = { + .is_enabled = clk_rcg2_is_enabled, + .get_parent = clk_rcg2_get_parent, + .set_parent = clk_rcg2_set_parent, + .recalc_rate = clk_rcg2_recalc_rate, + .determine_rate = clk_rcg2_fm_determine_rate, + .set_rate = clk_rcg2_fm_set_rate, + .set_rate_and_parent = clk_rcg2_fm_set_rate_and_parent, + .get_duty_cycle = clk_rcg2_get_duty_cycle, + .set_duty_cycle = clk_rcg2_set_duty_cycle, +}; +EXPORT_SYMBOL_GPL(clk_rcg2_fm_ops); + const struct clk_ops clk_rcg2_mux_closest_ops = { .determine_rate = __clk_mux_determine_rate_closest, .get_parent = clk_rcg2_get_parent, @@ -1138,7 +1304,39 @@ clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) return clk_rcg2_recalc_rate(hw, parent_rate); } +static int clk_rcg2_shared_init(struct clk_hw *hw) +{ + /* + * This does a few things: + * + * 1. Sets rcg->parked_cfg to reflect the value at probe so that the + * proper parent is reported from clk_rcg2_shared_get_parent(). + * + * 2. Clears the force enable bit of the RCG because we rely on child + * clks (branches) to turn the RCG on/off with a hardware feedback + * mechanism and only set the force enable bit in the RCG when we + * want to make sure the clk stays on for parent switches or + * parking. + * + * 3. Parks shared RCGs on the safe source at registration because we + * can't be certain that the parent clk will stay on during boot, + * especially if the parent is shared. If this RCG is enabled at + * boot, and the parent is turned off, the RCG will get stuck on. A + * GDSC can wedge if is turned on and the RCG is stuck on because + * the GDSC's controller will hang waiting for the clk status to + * toggle on when it never does. + * + * The safest option here is to "park" the RCG at init so that the clk + * can never get stuck on or off. This ensures the GDSC can't get + * wedged. + */ + clk_rcg2_shared_disable(hw); + + return 0; +} + const struct clk_ops clk_rcg2_shared_ops = { + .init = clk_rcg2_shared_init, .enable = clk_rcg2_shared_enable, .disable = clk_rcg2_shared_disable, .get_parent = clk_rcg2_shared_get_parent, diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c index 745026ef4d..9da034f8f2 100644 --- a/drivers/clk/qcom/clk-rpm.c +++ b/drivers/clk/qcom/clk-rpm.c @@ -98,7 +98,6 @@ struct clk_rpm { }; struct rpm_cc { - struct qcom_rpm *rpm; struct clk_rpm **clks; size_t num_clks; u32 xo_buffer_value; diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index 75f09e6e05..48f81e3a5e 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -41,6 +41,24 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate) } EXPORT_SYMBOL_GPL(qcom_find_freq); +const struct freq_multi_tbl *qcom_find_freq_multi(const struct freq_multi_tbl *f, + unsigned long rate) +{ + if (!f) + return NULL; + + if (!f->freq) + return f; + + for (; f->freq; f++) + if (rate <= f->freq) + return f; + + /* Default to our fastest rate */ + return f - 1; +} +EXPORT_SYMBOL_GPL(qcom_find_freq_multi); + const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f, unsigned long rate) { diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h index 9c8f7b798d..2d4a8a837e 100644 --- a/drivers/clk/qcom/common.h +++ b/drivers/clk/qcom/common.h @@ -45,6 +45,8 @@ extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate); extern const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f, unsigned long rate); +extern const struct freq_multi_tbl *qcom_find_freq_multi(const struct freq_multi_tbl *f, + unsigned long rate); extern void qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count); extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c index 7bc679871f..d2be56c589 100644 --- a/drivers/clk/qcom/gcc-ipq8074.c +++ b/drivers/clk/qcom/gcc-ipq8074.c @@ -1677,15 +1677,23 @@ static struct clk_regmap_div nss_port4_tx_div_clk_src = { }, }; -static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = { - F(19200000, P_XO, 1, 0, 0), - F(25000000, P_UNIPHY1_RX, 12.5, 0, 0), - F(25000000, P_UNIPHY0_RX, 5, 0, 0), - F(78125000, P_UNIPHY1_RX, 4, 0, 0), - F(125000000, P_UNIPHY1_RX, 2.5, 0, 0), - F(125000000, P_UNIPHY0_RX, 1, 0, 0), - F(156250000, P_UNIPHY1_RX, 2, 0, 0), - F(312500000, P_UNIPHY1_RX, 1, 0, 0), +static const struct freq_conf ftbl_nss_port5_rx_clk_src_25[] = { + C(P_UNIPHY1_RX, 12.5, 0, 0), + C(P_UNIPHY0_RX, 5, 0, 0), +}; + +static const struct freq_conf ftbl_nss_port5_rx_clk_src_125[] = { + C(P_UNIPHY1_RX, 2.5, 0, 0), + C(P_UNIPHY0_RX, 1, 0, 0), +}; + +static const struct freq_multi_tbl ftbl_nss_port5_rx_clk_src[] = { + FMS(19200000, P_XO, 1, 0, 0), + FM(25000000, ftbl_nss_port5_rx_clk_src_25), + FMS(78125000, P_UNIPHY1_RX, 4, 0, 0), + FM(125000000, ftbl_nss_port5_rx_clk_src_125), + FMS(156250000, P_UNIPHY1_RX, 2, 0, 0), + FMS(312500000, P_UNIPHY1_RX, 1, 0, 0), { } }; @@ -1712,14 +1720,14 @@ gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map[] = { static struct clk_rcg2 nss_port5_rx_clk_src = { .cmd_rcgr = 0x68060, - .freq_tbl = ftbl_nss_port5_rx_clk_src, + .freq_multi_tbl = ftbl_nss_port5_rx_clk_src, .hid_width = 5, .parent_map = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map, .clkr.hw.init = &(struct clk_init_data){ .name = "nss_port5_rx_clk_src", .parent_data = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias, .num_parents = ARRAY_SIZE(gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias), - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_fm_ops, }, }; @@ -1739,15 +1747,23 @@ static struct clk_regmap_div nss_port5_rx_div_clk_src = { }, }; -static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = { - F(19200000, P_XO, 1, 0, 0), - F(25000000, P_UNIPHY1_TX, 12.5, 0, 0), - F(25000000, P_UNIPHY0_TX, 5, 0, 0), - F(78125000, P_UNIPHY1_TX, 4, 0, 0), - F(125000000, P_UNIPHY1_TX, 2.5, 0, 0), - F(125000000, P_UNIPHY0_TX, 1, 0, 0), - F(156250000, P_UNIPHY1_TX, 2, 0, 0), - F(312500000, P_UNIPHY1_TX, 1, 0, 0), +static const struct freq_conf ftbl_nss_port5_tx_clk_src_25[] = { + C(P_UNIPHY1_TX, 12.5, 0, 0), + C(P_UNIPHY0_TX, 5, 0, 0), +}; + +static const struct freq_conf ftbl_nss_port5_tx_clk_src_125[] = { + C(P_UNIPHY1_TX, 2.5, 0, 0), + C(P_UNIPHY0_TX, 1, 0, 0), +}; + +static const struct freq_multi_tbl ftbl_nss_port5_tx_clk_src[] = { + FMS(19200000, P_XO, 1, 0, 0), + FM(25000000, ftbl_nss_port5_tx_clk_src_25), + FMS(78125000, P_UNIPHY1_TX, 4, 0, 0), + FM(125000000, ftbl_nss_port5_tx_clk_src_125), + FMS(156250000, P_UNIPHY1_TX, 2, 0, 0), + FMS(312500000, P_UNIPHY1_TX, 1, 0, 0), { } }; @@ -1774,14 +1790,14 @@ gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map[] = { static struct clk_rcg2 nss_port5_tx_clk_src = { .cmd_rcgr = 0x68068, - .freq_tbl = ftbl_nss_port5_tx_clk_src, + .freq_multi_tbl = ftbl_nss_port5_tx_clk_src, .hid_width = 5, .parent_map = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map, .clkr.hw.init = &(struct clk_init_data){ .name = "nss_port5_tx_clk_src", .parent_data = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias, .num_parents = ARRAY_SIZE(gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias), - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_fm_ops, }, }; @@ -1801,15 +1817,23 @@ static struct clk_regmap_div nss_port5_tx_div_clk_src = { }, }; -static const struct freq_tbl ftbl_nss_port6_rx_clk_src[] = { - F(19200000, P_XO, 1, 0, 0), - F(25000000, P_UNIPHY2_RX, 5, 0, 0), - F(25000000, P_UNIPHY2_RX, 12.5, 0, 0), - F(78125000, P_UNIPHY2_RX, 4, 0, 0), - F(125000000, P_UNIPHY2_RX, 1, 0, 0), - F(125000000, P_UNIPHY2_RX, 2.5, 0, 0), - F(156250000, P_UNIPHY2_RX, 2, 0, 0), - F(312500000, P_UNIPHY2_RX, 1, 0, 0), +static const struct freq_conf ftbl_nss_port6_rx_clk_src_25[] = { + C(P_UNIPHY2_RX, 5, 0, 0), + C(P_UNIPHY2_RX, 12.5, 0, 0), +}; + +static const struct freq_conf ftbl_nss_port6_rx_clk_src_125[] = { + C(P_UNIPHY2_RX, 1, 0, 0), + C(P_UNIPHY2_RX, 2.5, 0, 0), +}; + +static const struct freq_multi_tbl ftbl_nss_port6_rx_clk_src[] = { + FMS(19200000, P_XO, 1, 0, 0), + FM(25000000, ftbl_nss_port6_rx_clk_src_25), + FMS(78125000, P_UNIPHY2_RX, 4, 0, 0), + FM(125000000, ftbl_nss_port6_rx_clk_src_125), + FMS(156250000, P_UNIPHY2_RX, 2, 0, 0), + FMS(312500000, P_UNIPHY2_RX, 1, 0, 0), { } }; @@ -1831,14 +1855,14 @@ static const struct parent_map gcc_xo_uniphy2_rx_tx_ubi32_bias_map[] = { static struct clk_rcg2 nss_port6_rx_clk_src = { .cmd_rcgr = 0x68070, - .freq_tbl = ftbl_nss_port6_rx_clk_src, + .freq_multi_tbl = ftbl_nss_port6_rx_clk_src, .hid_width = 5, .parent_map = gcc_xo_uniphy2_rx_tx_ubi32_bias_map, .clkr.hw.init = &(struct clk_init_data){ .name = "nss_port6_rx_clk_src", .parent_data = gcc_xo_uniphy2_rx_tx_ubi32_bias, .num_parents = ARRAY_SIZE(gcc_xo_uniphy2_rx_tx_ubi32_bias), - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_fm_ops, }, }; @@ -1858,15 +1882,23 @@ static struct clk_regmap_div nss_port6_rx_div_clk_src = { }, }; -static const struct freq_tbl ftbl_nss_port6_tx_clk_src[] = { - F(19200000, P_XO, 1, 0, 0), - F(25000000, P_UNIPHY2_TX, 5, 0, 0), - F(25000000, P_UNIPHY2_TX, 12.5, 0, 0), - F(78125000, P_UNIPHY2_TX, 4, 0, 0), - F(125000000, P_UNIPHY2_TX, 1, 0, 0), - F(125000000, P_UNIPHY2_TX, 2.5, 0, 0), - F(156250000, P_UNIPHY2_TX, 2, 0, 0), - F(312500000, P_UNIPHY2_TX, 1, 0, 0), +static const struct freq_conf ftbl_nss_port6_tx_clk_src_25[] = { + C(P_UNIPHY2_TX, 5, 0, 0), + C(P_UNIPHY2_TX, 12.5, 0, 0), +}; + +static const struct freq_conf ftbl_nss_port6_tx_clk_src_125[] = { + C(P_UNIPHY2_TX, 1, 0, 0), + C(P_UNIPHY2_TX, 2.5, 0, 0), +}; + +static const struct freq_multi_tbl ftbl_nss_port6_tx_clk_src[] = { + FMS(19200000, P_XO, 1, 0, 0), + FM(25000000, ftbl_nss_port6_tx_clk_src_25), + FMS(78125000, P_UNIPHY1_RX, 4, 0, 0), + FM(125000000, ftbl_nss_port6_tx_clk_src_125), + FMS(156250000, P_UNIPHY1_RX, 2, 0, 0), + FMS(312500000, P_UNIPHY1_RX, 1, 0, 0), { } }; @@ -1888,14 +1920,14 @@ static const struct parent_map gcc_xo_uniphy2_tx_rx_ubi32_bias_map[] = { static struct clk_rcg2 nss_port6_tx_clk_src = { .cmd_rcgr = 0x68078, - .freq_tbl = ftbl_nss_port6_tx_clk_src, + .freq_multi_tbl = ftbl_nss_port6_tx_clk_src, .hid_width = 5, .parent_map = gcc_xo_uniphy2_tx_rx_ubi32_bias_map, .clkr.hw.init = &(struct clk_init_data){ .name = "nss_port6_tx_clk_src", .parent_data = gcc_xo_uniphy2_tx_rx_ubi32_bias, .num_parents = ARRAY_SIZE(gcc_xo_uniphy2_tx_rx_ubi32_bias), - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_fm_ops, }, }; diff --git a/drivers/clk/qcom/gcc-msm8917.c b/drivers/clk/qcom/gcc-msm8917.c index f2dd132e2f..f2b8729e41 100644 --- a/drivers/clk/qcom/gcc-msm8917.c +++ b/drivers/clk/qcom/gcc-msm8917.c @@ -3278,6 +3278,7 @@ static const struct of_device_id gcc_msm8917_match_table[] = { { .compatible = "qcom,gcc-qm215", .data = &gcc_qm215_desc }, {}, }; +MODULE_DEVICE_TABLE(of, gcc_msm8917_match_table); static struct platform_driver gcc_msm8917_driver = { .probe = gcc_msm8917_probe, diff --git a/drivers/clk/qcom/gcc-msm8953.c b/drivers/clk/qcom/gcc-msm8953.c index 68359534ff..7563bff581 100644 --- a/drivers/clk/qcom/gcc-msm8953.c +++ b/drivers/clk/qcom/gcc-msm8953.c @@ -4227,6 +4227,7 @@ static const struct of_device_id gcc_msm8953_match_table[] = { { .compatible = "qcom,gcc-msm8953" }, {}, }; +MODULE_DEVICE_TABLE(of, gcc_msm8953_match_table); static struct platform_driver gcc_msm8953_driver = { .probe = gcc_msm8953_probe, diff --git a/drivers/clk/qcom/gcc-sa8775p.c b/drivers/clk/qcom/gcc-sa8775p.c index 5bcbfbf52c..9bbc0836fa 100644 --- a/drivers/clk/qcom/gcc-sa8775p.c +++ b/drivers/clk/qcom/gcc-sa8775p.c @@ -4305,74 +4305,114 @@ static struct clk_branch gcc_video_axi1_clk = { static struct gdsc pcie_0_gdsc = { .gdscr = 0xa9004, + .collapse_ctrl = 0x4b104, + .collapse_mask = BIT(0), + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "pcie_0_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE | RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc pcie_1_gdsc = { .gdscr = 0x77004, + .collapse_ctrl = 0x4b104, + .collapse_mask = BIT(1), + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "pcie_1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE | RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc ufs_card_gdsc = { .gdscr = 0x81004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "ufs_card_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc ufs_phy_gdsc = { .gdscr = 0x83004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "ufs_phy_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc usb20_prim_gdsc = { .gdscr = 0x1c004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "usb20_prim_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc usb30_prim_gdsc = { .gdscr = 0x1b004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "usb30_prim_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc usb30_sec_gdsc = { .gdscr = 0x2f004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "usb30_sec_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc emac0_gdsc = { .gdscr = 0xb6004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "emac0_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc emac1_gdsc = { .gdscr = 0xb4004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "emac1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct clk_regmap *gcc_sa8775p_clocks[] = { diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c index f45a831890..67ea9cf530 100644 --- a/drivers/clk/qcom/gcc-sc7280.c +++ b/drivers/clk/qcom/gcc-sc7280.c @@ -3463,6 +3463,9 @@ static int gcc_sc7280_probe(struct platform_device *pdev) qcom_branch_set_clk_en(regmap, 0x71004);/* GCC_GPU_CFG_AHB_CLK */ regmap_update_bits(regmap, 0x7100C, BIT(13), BIT(13)); + /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */ + qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true); + ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, ARRAY_SIZE(gcc_dfs_clocks)); if (ret) diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c index a47ef9dfa8..1f748141d1 100644 --- a/drivers/clk/qcom/gcc-sm8150.c +++ b/drivers/clk/qcom/gcc-sm8150.c @@ -207,28 +207,6 @@ static const struct clk_parent_data gcc_parents_7[] = { { .hw = &gpll0_out_even.clkr.hw }, }; -static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = { - F(19200000, P_BI_TCXO, 1, 0, 0), - F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0), - F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), - { } -}; - -static struct clk_rcg2 gcc_cpuss_ahb_clk_src = { - .cmd_rcgr = 0x48014, - .mnd_width = 0, - .hid_width = 5, - .parent_map = gcc_parent_map_0, - .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_cpuss_ahb_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, - }, -}; - static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = { F(19200000, P_BI_TCXO, 1, 0, 0), F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), @@ -1361,24 +1339,6 @@ static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = { }, }; -static struct clk_branch gcc_cpuss_ahb_clk = { - .halt_reg = 0x48000, - .halt_check = BRANCH_HALT_VOTED, - .clkr = { - .enable_reg = 0x52004, - .enable_mask = BIT(21), - .hw.init = &(struct clk_init_data){ - .name = "gcc_cpuss_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ - &gcc_cpuss_ahb_clk_src.clkr.hw }, - .num_parents = 1, - /* required for cpuss */ - .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_cpuss_dvm_bus_clk = { .halt_reg = 0x48190, .halt_check = BRANCH_HALT, @@ -2685,24 +2645,6 @@ static struct clk_branch gcc_sdcc4_apps_clk = { }, }; -static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = { - .halt_reg = 0x4819c, - .halt_check = BRANCH_HALT_VOTED, - .clkr = { - .enable_reg = 0x52004, - .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ - .name = "gcc_sys_noc_cpuss_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ - &gcc_cpuss_ahb_clk_src.clkr.hw }, - .num_parents = 1, - /* required for cpuss */ - .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_tsif_ahb_clk = { .halt_reg = 0x36004, .halt_check = BRANCH_HALT, @@ -3550,8 +3492,6 @@ static struct clk_regmap *gcc_sm8150_clocks[] = { [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr, [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr, [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr, - [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr, - [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr, [GCC_CPUSS_DVM_BUS_CLK] = &gcc_cpuss_dvm_bus_clk.clkr, [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr, [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr, @@ -3669,7 +3609,6 @@ static struct clk_regmap *gcc_sm8150_clocks[] = { [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr, [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr, [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr, - [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr, [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr, [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr, [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr, diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c index 1404017be9..a263f0c412 100644 --- a/drivers/clk/qcom/gcc-x1e80100.c +++ b/drivers/clk/qcom/gcc-x1e80100.c @@ -2812,7 +2812,7 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = { static struct clk_branch gcc_pcie_0_pipe_clk = { .halt_reg = 0xa0044, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52010, .enable_mask = BIT(25), @@ -2901,7 +2901,7 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = { static struct clk_branch gcc_pcie_1_pipe_clk = { .halt_reg = 0x2c044, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52020, .enable_mask = BIT(30), @@ -2990,7 +2990,7 @@ static struct clk_branch gcc_pcie_2_mstr_axi_clk = { static struct clk_branch gcc_pcie_2_pipe_clk = { .halt_reg = 0x13044, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52020, .enable_mask = BIT(23), @@ -3110,7 +3110,7 @@ static struct clk_branch gcc_pcie_3_phy_rchng_clk = { static struct clk_branch gcc_pcie_3_pipe_clk = { .halt_reg = 0x58050, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52020, .enable_mask = BIT(3), @@ -3235,7 +3235,7 @@ static struct clk_branch gcc_pcie_4_phy_rchng_clk = { static struct clk_branch gcc_pcie_4_pipe_clk = { .halt_reg = 0x6b044, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52008, .enable_mask = BIT(4), @@ -3360,7 +3360,7 @@ static struct clk_branch gcc_pcie_5_phy_rchng_clk = { static struct clk_branch gcc_pcie_5_pipe_clk = { .halt_reg = 0x2f044, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52018, .enable_mask = BIT(17), @@ -3498,7 +3498,7 @@ static struct clk_branch gcc_pcie_6a_phy_rchng_clk = { static struct clk_branch gcc_pcie_6a_pipe_clk = { .halt_reg = 0x31050, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52018, .enable_mask = BIT(26), @@ -3636,7 +3636,7 @@ static struct clk_branch gcc_pcie_6b_phy_rchng_clk = { static struct clk_branch gcc_pcie_6b_pipe_clk = { .halt_reg = 0x8d050, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52000, .enable_mask = BIT(30), @@ -5109,7 +5109,7 @@ static struct clk_branch gcc_usb3_mp_phy_com_aux_clk = { static struct clk_branch gcc_usb3_mp_phy_pipe_0_clk = { .halt_reg = 0x17290, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x17290, .enable_mask = BIT(0), @@ -5122,7 +5122,7 @@ static struct clk_branch gcc_usb3_mp_phy_pipe_0_clk = { static struct clk_branch gcc_usb3_mp_phy_pipe_1_clk = { .halt_reg = 0x17298, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x17298, .enable_mask = BIT(0), @@ -5186,7 +5186,7 @@ static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = { static struct clk_branch gcc_usb3_prim_phy_pipe_clk = { .halt_reg = 0x39068, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .hwcg_reg = 0x39068, .hwcg_bit = 1, .clkr = { @@ -5257,7 +5257,7 @@ static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = { static struct clk_branch gcc_usb3_sec_phy_pipe_clk = { .halt_reg = 0xa1068, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .hwcg_reg = 0xa1068, .hwcg_bit = 1, .clkr = { @@ -5269,6 +5269,7 @@ static struct clk_branch gcc_usb3_sec_phy_pipe_clk = { &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw, }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -5327,7 +5328,7 @@ static struct clk_regmap_mux gcc_usb3_tert_phy_pipe_clk_src = { static struct clk_branch gcc_usb3_tert_phy_pipe_clk = { .halt_reg = 0xa2068, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .hwcg_reg = 0xa2068, .hwcg_bit = 1, .clkr = { @@ -5339,6 +5340,7 @@ static struct clk_branch gcc_usb3_tert_phy_pipe_clk = { &gcc_usb3_tert_phy_pipe_clk_src.clkr.hw, }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -5405,7 +5407,7 @@ static struct clk_branch gcc_usb4_0_master_clk = { static struct clk_branch gcc_usb4_0_phy_p2rr2p_pipe_clk = { .halt_reg = 0x9f0d8, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x9f0d8, .enable_mask = BIT(0), @@ -5418,7 +5420,7 @@ static struct clk_branch gcc_usb4_0_phy_p2rr2p_pipe_clk = { static struct clk_branch gcc_usb4_0_phy_pcie_pipe_clk = { .halt_reg = 0x9f048, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52010, .enable_mask = BIT(19), @@ -5457,7 +5459,7 @@ static struct clk_branch gcc_usb4_0_phy_rx1_clk = { static struct clk_branch gcc_usb4_0_phy_usb_pipe_clk = { .halt_reg = 0x9f0a4, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .hwcg_reg = 0x9f0a4, .hwcg_bit = 1, .clkr = { @@ -5582,7 +5584,7 @@ static struct clk_branch gcc_usb4_1_master_clk = { static struct clk_branch gcc_usb4_1_phy_p2rr2p_pipe_clk = { .halt_reg = 0x2b0d8, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x2b0d8, .enable_mask = BIT(0), @@ -5595,7 +5597,7 @@ static struct clk_branch gcc_usb4_1_phy_p2rr2p_pipe_clk = { static struct clk_branch gcc_usb4_1_phy_pcie_pipe_clk = { .halt_reg = 0x2b048, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52028, .enable_mask = BIT(0), @@ -5634,7 +5636,7 @@ static struct clk_branch gcc_usb4_1_phy_rx1_clk = { static struct clk_branch gcc_usb4_1_phy_usb_pipe_clk = { .halt_reg = 0x2b0a4, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .hwcg_reg = 0x2b0a4, .hwcg_bit = 1, .clkr = { @@ -5759,7 +5761,7 @@ static struct clk_branch gcc_usb4_2_master_clk = { static struct clk_branch gcc_usb4_2_phy_p2rr2p_pipe_clk = { .halt_reg = 0x110d8, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x110d8, .enable_mask = BIT(0), @@ -5772,7 +5774,7 @@ static struct clk_branch gcc_usb4_2_phy_p2rr2p_pipe_clk = { static struct clk_branch gcc_usb4_2_phy_pcie_pipe_clk = { .halt_reg = 0x11048, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52028, .enable_mask = BIT(1), @@ -5811,7 +5813,7 @@ static struct clk_branch gcc_usb4_2_phy_rx1_clk = { static struct clk_branch gcc_usb4_2_phy_usb_pipe_clk = { .halt_reg = 0x110a4, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .hwcg_reg = 0x110a4, .hwcg_bit = 1, .clkr = { diff --git a/drivers/clk/qcom/gpucc-sa8775p.c b/drivers/clk/qcom/gpucc-sa8775p.c index 1167c42da3..3deabf8333 100644 --- a/drivers/clk/qcom/gpucc-sa8775p.c +++ b/drivers/clk/qcom/gpucc-sa8775p.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2023, Linaro Limited */ @@ -161,7 +161,7 @@ static struct clk_rcg2 gpu_cc_ff_clk_src = { .name = "gpu_cc_ff_clk_src", .parent_data = gpu_cc_parent_data_0, .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0), - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -181,7 +181,7 @@ static struct clk_rcg2 gpu_cc_gmu_clk_src = { .parent_data = gpu_cc_parent_data_1, .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -200,7 +200,7 @@ static struct clk_rcg2 gpu_cc_hub_clk_src = { .name = "gpu_cc_hub_clk_src", .parent_data = gpu_cc_parent_data_2, .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2), - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -280,7 +280,7 @@ static struct clk_branch gpu_cc_ahb_clk = { &gpu_cc_hub_ahb_div_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -294,8 +294,7 @@ static struct clk_branch gpu_cc_cb_clk = { .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data){ .name = "gpu_cc_cb_clk", - .flags = CLK_IS_CRITICAL, - .ops = &clk_branch2_ops, + .ops = &clk_branch2_aon_ops, }, }, }; @@ -312,7 +311,7 @@ static struct clk_branch gpu_cc_crc_ahb_clk = { &gpu_cc_hub_ahb_div_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -330,7 +329,7 @@ static struct clk_branch gpu_cc_cx_ff_clk = { &gpu_cc_ff_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -348,7 +347,7 @@ static struct clk_branch gpu_cc_cx_gmu_clk = { &gpu_cc_gmu_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_aon_ops, }, }, @@ -362,7 +361,6 @@ static struct clk_branch gpu_cc_cx_snoc_dvm_clk = { .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data){ .name = "gpu_cc_cx_snoc_dvm_clk", - .flags = CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, @@ -380,7 +378,7 @@ static struct clk_branch gpu_cc_cxo_aon_clk = { &gpu_cc_xo_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -398,7 +396,7 @@ static struct clk_branch gpu_cc_cxo_clk = { &gpu_cc_xo_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -416,7 +414,7 @@ static struct clk_branch gpu_cc_demet_clk = { &gpu_cc_demet_div_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_aon_ops, }, }, @@ -430,7 +428,6 @@ static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = { .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data){ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk", - .flags = CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, @@ -448,7 +445,7 @@ static struct clk_branch gpu_cc_hub_aon_clk = { &gpu_cc_hub_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_aon_ops, }, }, @@ -466,7 +463,7 @@ static struct clk_branch gpu_cc_hub_cx_int_clk = { &gpu_cc_hub_cx_int_div_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_aon_ops, }, }, @@ -480,7 +477,6 @@ static struct clk_branch gpu_cc_memnoc_gfx_clk = { .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data){ .name = "gpu_cc_memnoc_gfx_clk", - .flags = CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, @@ -494,7 +490,6 @@ static struct clk_branch gpu_cc_sleep_clk = { .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data){ .name = "gpu_cc_sleep_clk", - .flags = CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, @@ -528,16 +523,22 @@ static struct clk_regmap *gpu_cc_sa8775p_clocks[] = { static struct gdsc cx_gdsc = { .gdscr = 0x9108, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .gds_hw_ctrl = 0x953c, .pd = { .name = "cx_gdsc", }, .pwrsts = PWRSTS_OFF_ON, - .flags = VOTABLE | RETAIN_FF_ENABLE | ALWAYS_ON, + .flags = VOTABLE | RETAIN_FF_ENABLE, }; static struct gdsc gx_gdsc = { .gdscr = 0x905c, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "gx_gdsc", .power_on = gdsc_gx_do_nothing_enable, diff --git a/drivers/clk/qcom/gpucc-sm8350.c b/drivers/clk/qcom/gpucc-sm8350.c index 38505d1388..8d9dcff40d 100644 --- a/drivers/clk/qcom/gpucc-sm8350.c +++ b/drivers/clk/qcom/gpucc-sm8350.c @@ -2,6 +2,7 @@ /* * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * Copyright (c) 2022, Linaro Limited + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -147,7 +148,7 @@ static struct clk_rcg2 gpu_cc_gmu_clk_src = { .parent_data = gpu_cc_parent_data_0, .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -169,7 +170,7 @@ static struct clk_rcg2 gpu_cc_hub_clk_src = { .parent_data = gpu_cc_parent_data_1, .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; diff --git a/drivers/clk/qcom/hfpll.c b/drivers/clk/qcom/hfpll.c index dac27e31ef..b0b0cb074b 100644 --- a/drivers/clk/qcom/hfpll.c +++ b/drivers/clk/qcom/hfpll.c @@ -14,7 +14,7 @@ #include "clk-regmap.h" #include "clk-hfpll.h" -static const struct hfpll_data hdata = { +static const struct hfpll_data qcs404 = { .mode_reg = 0x00, .l_reg = 0x04, .m_reg = 0x08, @@ -84,10 +84,12 @@ static const struct hfpll_data msm8976_cci = { }; static const struct of_device_id qcom_hfpll_match_table[] = { - { .compatible = "qcom,hfpll", .data = &hdata }, { .compatible = "qcom,msm8976-hfpll-a53", .data = &msm8976_a53 }, { .compatible = "qcom,msm8976-hfpll-a72", .data = &msm8976_a72 }, { .compatible = "qcom,msm8976-hfpll-cci", .data = &msm8976_cci }, + { .compatible = "qcom,qcs404-hfpll", .data = &qcs404 }, + /* Deprecated in bindings */ + { .compatible = "qcom,hfpll", .data = &qcs404 }, { } }; MODULE_DEVICE_TABLE(of, qcom_hfpll_match_table); diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c index 23b0b11f00..e7cfa8d220 100644 --- a/drivers/clk/qcom/kpss-xcc.c +++ b/drivers/clk/qcom/kpss-xcc.c @@ -58,9 +58,7 @@ static int kpss_xcc_driver_probe(struct platform_device *pdev) if (IS_ERR(hw)) return PTR_ERR(hw); - of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get, hw); - - return 0; + return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get, hw); } static struct platform_driver kpss_xcc_driver = { diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c index f45c2c4580..4b1815147f 100644 --- a/drivers/clk/renesas/clk-r8a73a4.c +++ b/drivers/clk/renesas/clk-r8a73a4.c @@ -30,8 +30,6 @@ struct r8a73a4_cpg { #define CPG_PLL2HCR 0xe4 #define CPG_PLL2SCR 0xf4 -#define CLK_ENABLE_ON_INIT BIT(0) - struct div4_clk { const char *name; unsigned int reg; diff --git a/drivers/clk/renesas/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c index 3ee3f57e4e..22e9be7240 100644 --- a/drivers/clk/renesas/clk-r8a7740.c +++ b/drivers/clk/renesas/clk-r8a7740.c @@ -26,28 +26,25 @@ struct r8a7740_cpg { #define CPG_USBCKCR 0x8c #define CPG_FRQCRC 0xe0 -#define CLK_ENABLE_ON_INIT BIT(0) - struct div4_clk { const char *name; unsigned int reg; unsigned int shift; - int flags; }; static struct div4_clk div4_clks[] = { - { "i", CPG_FRQCRA, 20, CLK_ENABLE_ON_INIT }, - { "zg", CPG_FRQCRA, 16, CLK_ENABLE_ON_INIT }, - { "b", CPG_FRQCRA, 8, CLK_ENABLE_ON_INIT }, - { "m1", CPG_FRQCRA, 4, CLK_ENABLE_ON_INIT }, - { "hp", CPG_FRQCRB, 4, 0 }, - { "hpp", CPG_FRQCRC, 20, 0 }, - { "usbp", CPG_FRQCRC, 16, 0 }, - { "s", CPG_FRQCRC, 12, 0 }, - { "zb", CPG_FRQCRC, 8, 0 }, - { "m3", CPG_FRQCRC, 4, 0 }, - { "cp", CPG_FRQCRC, 0, 0 }, - { NULL, 0, 0, 0 }, + { "i", CPG_FRQCRA, 20 }, + { "zg", CPG_FRQCRA, 16 }, + { "b", CPG_FRQCRA, 8 }, + { "m1", CPG_FRQCRA, 4 }, + { "hp", CPG_FRQCRB, 4 }, + { "hpp", CPG_FRQCRC, 20 }, + { "usbp", CPG_FRQCRC, 16 }, + { "s", CPG_FRQCRC, 12 }, + { "zb", CPG_FRQCRC, 8 }, + { "m3", CPG_FRQCRC, 4 }, + { "cp", CPG_FRQCRC, 0 }, + { NULL, 0, 0 }, }; static const struct clk_div_table div4_div_table[] = { diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c index 8c51090f13..47fc99ccd2 100644 --- a/drivers/clk/renesas/clk-sh73a0.c +++ b/drivers/clk/renesas/clk-sh73a0.c @@ -34,8 +34,6 @@ struct sh73a0_cpg { #define CPG_DSI0PHYCR 0x6c #define CPG_DSI1PHYCR 0x70 -#define CLK_ENABLE_ON_INIT BIT(0) - struct div4_clk { const char *name; const char *parent; diff --git a/drivers/clk/renesas/r8a779h0-cpg-mssr.c b/drivers/clk/renesas/r8a779h0-cpg-mssr.c index 71f67a1c86..079b55b30b 100644 --- a/drivers/clk/renesas/r8a779h0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a779h0-cpg-mssr.c @@ -184,14 +184,35 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] = { DEF_MOD("i2c1", 519, R8A779H0_CLK_S0D6_PER), DEF_MOD("i2c2", 520, R8A779H0_CLK_S0D6_PER), DEF_MOD("i2c3", 521, R8A779H0_CLK_S0D6_PER), + DEF_MOD("irqc", 611, R8A779H0_CLK_CL16M), + DEF_MOD("msi0", 618, R8A779H0_CLK_MSO), + DEF_MOD("msi1", 619, R8A779H0_CLK_MSO), + DEF_MOD("msi2", 620, R8A779H0_CLK_MSO), + DEF_MOD("msi3", 621, R8A779H0_CLK_MSO), + DEF_MOD("msi4", 622, R8A779H0_CLK_MSO), + DEF_MOD("msi5", 623, R8A779H0_CLK_MSO), DEF_MOD("rpc-if", 629, R8A779H0_CLK_RPCD2), + DEF_MOD("scif0", 702, R8A779H0_CLK_SASYNCPERD4), + DEF_MOD("scif1", 703, R8A779H0_CLK_SASYNCPERD4), + DEF_MOD("scif3", 704, R8A779H0_CLK_SASYNCPERD4), + DEF_MOD("scif4", 705, R8A779H0_CLK_SASYNCPERD4), DEF_MOD("sdhi0", 706, R8A779H0_CLK_SD0), DEF_MOD("sydm1", 709, R8A779H0_CLK_S0D6_PER), DEF_MOD("sydm2", 710, R8A779H0_CLK_S0D6_PER), + DEF_MOD("tmu0", 713, R8A779H0_CLK_SASYNCRT), + DEF_MOD("tmu1", 714, R8A779H0_CLK_SASYNCPERD2), + DEF_MOD("tmu2", 715, R8A779H0_CLK_SASYNCPERD2), + DEF_MOD("tmu3", 716, R8A779H0_CLK_SASYNCPERD2), + DEF_MOD("tmu4", 717, R8A779H0_CLK_SASYNCPERD2), DEF_MOD("wdt1:wdt0", 907, R8A779H0_CLK_R), + DEF_MOD("cmt0", 910, R8A779H0_CLK_R), + DEF_MOD("cmt1", 911, R8A779H0_CLK_R), + DEF_MOD("cmt2", 912, R8A779H0_CLK_R), + DEF_MOD("cmt3", 913, R8A779H0_CLK_R), DEF_MOD("pfc0", 915, R8A779H0_CLK_CP), DEF_MOD("pfc1", 916, R8A779H0_CLK_CP), DEF_MOD("pfc2", 917, R8A779H0_CLK_CP), + DEF_MOD("tsc2:tsc1", 919, R8A779H0_CLK_CL16M), }; /* diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c index 26b71547fd..16acc95f3c 100644 --- a/drivers/clk/renesas/r9a07g043-cpg.c +++ b/drivers/clk/renesas/r9a07g043-cpg.c @@ -149,7 +149,7 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = { #endif }; -static struct rzg2l_mod_clk r9a07g043_mod_clks[] = { +static const struct rzg2l_mod_clk r9a07g043_mod_clks[] = { #ifdef CONFIG_ARM64 DEF_MOD("gic", R9A07G043_GIC600_GICCLK, R9A07G043_CLK_P1, 0x514, 0), @@ -286,7 +286,7 @@ static struct rzg2l_mod_clk r9a07g043_mod_clks[] = { #endif }; -static struct rzg2l_reset r9a07g043_resets[] = { +static const struct rzg2l_reset r9a07g043_resets[] = { #ifdef CONFIG_ARM64 DEF_RST(R9A07G043_GIC600_GICRESET_N, 0x814, 0), DEF_RST(R9A07G043_GIC600_DBG_GICRESET_N, 0x814, 1), diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c index 48404cafea..f6df3f7a31 100644 --- a/drivers/clk/renesas/r9a07g044-cpg.c +++ b/drivers/clk/renesas/r9a07g044-cpg.c @@ -368,7 +368,7 @@ static const struct { #endif }; -static struct rzg2l_reset r9a07g044_resets[] = { +static const struct rzg2l_reset r9a07g044_resets[] = { DEF_RST(R9A07G044_GIC600_GICRESET_N, 0x814, 0), DEF_RST(R9A07G044_GIC600_DBG_GICRESET_N, 0x814, 1), DEF_RST(R9A07G044_IA55_RESETN, 0x818, 0), diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c index c3e6da2de1..b068733b14 100644 --- a/drivers/clk/renesas/r9a08g045-cpg.c +++ b/drivers/clk/renesas/r9a08g045-cpg.c @@ -240,6 +240,43 @@ static const unsigned int r9a08g045_crit_mod_clks[] __initconst = { MOD_CLK_BASE + R9A08G045_DMAC_ACLK, }; +static const struct rzg2l_cpg_pm_domain_init_data r9a08g045_pm_domains[] = { + /* Keep always-on domain on the first position for proper domains registration. */ + DEF_PD("always-on", R9A08G045_PD_ALWAYS_ON, + DEF_REG_CONF(0, 0), + RZG2L_PD_F_ALWAYS_ON), + DEF_PD("gic", R9A08G045_PD_GIC, + DEF_REG_CONF(CPG_BUS_ACPU_MSTOP, BIT(3)), + RZG2L_PD_F_ALWAYS_ON), + DEF_PD("ia55", R9A08G045_PD_IA55, + DEF_REG_CONF(CPG_BUS_PERI_CPU_MSTOP, BIT(13)), + RZG2L_PD_F_ALWAYS_ON), + DEF_PD("dmac", R9A08G045_PD_DMAC, + DEF_REG_CONF(CPG_BUS_REG1_MSTOP, GENMASK(3, 0)), + RZG2L_PD_F_ALWAYS_ON), + DEF_PD("wdt0", R9A08G045_PD_WDT0, + DEF_REG_CONF(CPG_BUS_REG0_MSTOP, BIT(0)), + RZG2L_PD_F_NONE), + DEF_PD("sdhi0", R9A08G045_PD_SDHI0, + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(0)), + RZG2L_PD_F_NONE), + DEF_PD("sdhi1", R9A08G045_PD_SDHI1, + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(1)), + RZG2L_PD_F_NONE), + DEF_PD("sdhi2", R9A08G045_PD_SDHI2, + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(11)), + RZG2L_PD_F_NONE), + DEF_PD("eth0", R9A08G045_PD_ETHER0, + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(2)), + RZG2L_PD_F_NONE), + DEF_PD("eth1", R9A08G045_PD_ETHER1, + DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(3)), + RZG2L_PD_F_NONE), + DEF_PD("scif0", R9A08G045_PD_SCIF0, + DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(1)), + RZG2L_PD_F_NONE), +}; + const struct rzg2l_cpg_info r9a08g045_cpg_info = { /* Core Clocks */ .core_clks = r9a08g045_core_clks, @@ -260,5 +297,9 @@ const struct rzg2l_cpg_info r9a08g045_cpg_info = { .resets = r9a08g045_resets, .num_resets = R9A08G045_VBAT_BRESETN + 1, /* Last reset ID + 1 */ + /* Power domains */ + .pm_domains = r9a08g045_pm_domains, + .num_pm_domains = ARRAY_SIZE(r9a08g045_pm_domains), + .has_clk_mon_regs = true, }; diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c index 3d2daa4ba2..04b78064d4 100644 --- a/drivers/clk/renesas/rzg2l-cpg.c +++ b/drivers/clk/renesas/rzg2l-cpg.c @@ -139,7 +139,6 @@ struct rzg2l_pll5_mux_dsi_div_param { * @num_resets: Number of Module Resets in info->resets[] * @last_dt_core_clk: ID of the last Core Clock exported to DT * @info: Pointer to platform data - * @genpd: PM domain * @mux_dsi_div_params: pll5 mux and dsi div parameters */ struct rzg2l_cpg_priv { @@ -156,8 +155,6 @@ struct rzg2l_cpg_priv { const struct rzg2l_cpg_info *info; - struct generic_pm_domain genpd; - struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params; }; @@ -1559,9 +1556,34 @@ static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv, return true; } +/** + * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure + * @onecell_data: cell data + * @domains: generic PM domains + */ +struct rzg2l_cpg_pm_domains { + struct genpd_onecell_data onecell_data; + struct generic_pm_domain *domains[]; +}; + +/** + * struct rzg2l_cpg_pd - RZ/G2L power domain data structure + * @genpd: generic PM domain + * @priv: pointer to CPG private data structure + * @conf: CPG PM domain configuration info + * @id: RZ/G2L power domain ID + */ +struct rzg2l_cpg_pd { + struct generic_pm_domain genpd; + struct rzg2l_cpg_priv *priv; + struct rzg2l_cpg_pm_domain_conf conf; + u16 id; +}; + static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev) { - struct rzg2l_cpg_priv *priv = container_of(domain, struct rzg2l_cpg_priv, genpd); + struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd); + struct rzg2l_cpg_priv *priv = pd->priv; struct device_node *np = dev->of_node; struct of_phandle_args clkspec; bool once = true; @@ -1617,31 +1639,180 @@ static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device } static void rzg2l_cpg_genpd_remove(void *data) +{ + struct genpd_onecell_data *celldata = data; + + for (unsigned int i = 0; i < celldata->num_domains; i++) + pm_genpd_remove(celldata->domains[i]); +} + +static void rzg2l_cpg_genpd_remove_simple(void *data) { pm_genpd_remove(data); } +static int rzg2l_cpg_power_on(struct generic_pm_domain *domain) +{ + struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd); + struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop; + struct rzg2l_cpg_priv *priv = pd->priv; + + /* Set MSTOP. */ + if (mstop.mask) + writel(mstop.mask << 16, priv->base + mstop.off); + + return 0; +} + +static int rzg2l_cpg_power_off(struct generic_pm_domain *domain) +{ + struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd); + struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop; + struct rzg2l_cpg_priv *priv = pd->priv; + + /* Set MSTOP. */ + if (mstop.mask) + writel(mstop.mask | (mstop.mask << 16), priv->base + mstop.off); + + return 0; +} + +static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd, bool always_on) +{ + struct dev_power_governor *governor; + + pd->genpd.flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; + pd->genpd.attach_dev = rzg2l_cpg_attach_dev; + pd->genpd.detach_dev = rzg2l_cpg_detach_dev; + if (always_on) { + pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON; + governor = &pm_domain_always_on_gov; + } else { + pd->genpd.power_on = rzg2l_cpg_power_on; + pd->genpd.power_off = rzg2l_cpg_power_off; + governor = &simple_qos_governor; + } + + return pm_genpd_init(&pd->genpd, governor, !always_on); +} + static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv) { struct device *dev = priv->dev; struct device_node *np = dev->of_node; - struct generic_pm_domain *genpd = &priv->genpd; + struct rzg2l_cpg_pd *pd; + int ret; + + pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + + pd->genpd.name = np->name; + pd->priv = priv; + ret = rzg2l_cpg_pd_setup(pd, true); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove_simple, &pd->genpd); + if (ret) + return ret; + + return of_genpd_add_provider_simple(np, &pd->genpd); +} + +static struct generic_pm_domain * +rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args *spec, void *data) +{ + struct generic_pm_domain *domain = ERR_PTR(-ENOENT); + struct genpd_onecell_data *genpd = data; + + if (spec->args_count != 1) + return ERR_PTR(-EINVAL); + + for (unsigned int i = 0; i < genpd->num_domains; i++) { + struct rzg2l_cpg_pd *pd = container_of(genpd->domains[i], struct rzg2l_cpg_pd, + genpd); + + if (pd->id == spec->args[0]) { + domain = &pd->genpd; + break; + } + } + + return domain; +} + +static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv) +{ + const struct rzg2l_cpg_info *info = priv->info; + struct device *dev = priv->dev; + struct device_node *np = dev->of_node; + struct rzg2l_cpg_pm_domains *domains; + struct generic_pm_domain *parent; + u32 ncells; int ret; - genpd->name = np->name; - genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON | - GENPD_FLAG_ACTIVE_WAKEUP; - genpd->attach_dev = rzg2l_cpg_attach_dev; - genpd->detach_dev = rzg2l_cpg_detach_dev; - ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false); + ret = of_property_read_u32(np, "#power-domain-cells", &ncells); + if (ret) + return ret; + + /* For backward compatibility. */ + if (!ncells) + return rzg2l_cpg_add_clk_domain(priv); + + domains = devm_kzalloc(dev, struct_size(domains, domains, info->num_pm_domains), + GFP_KERNEL); + if (!domains) + return -ENOMEM; + + domains->onecell_data.domains = domains->domains; + domains->onecell_data.num_domains = info->num_pm_domains; + domains->onecell_data.xlate = rzg2l_cpg_pm_domain_xlate; + + ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, &domains->onecell_data); if (ret) return ret; - ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd); + for (unsigned int i = 0; i < info->num_pm_domains; i++) { + bool always_on = !!(info->pm_domains[i].flags & RZG2L_PD_F_ALWAYS_ON); + struct rzg2l_cpg_pd *pd; + + pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + + pd->genpd.name = info->pm_domains[i].name; + pd->conf = info->pm_domains[i].conf; + pd->id = info->pm_domains[i].id; + pd->priv = priv; + + ret = rzg2l_cpg_pd_setup(pd, always_on); + if (ret) + return ret; + + if (always_on) { + ret = rzg2l_cpg_power_on(&pd->genpd); + if (ret) + return ret; + } + + domains->domains[i] = &pd->genpd; + /* Parent should be on the very first entry of info->pm_domains[]. */ + if (!i) { + parent = &pd->genpd; + continue; + } + + ret = pm_genpd_add_subdomain(parent, &pd->genpd); + if (ret) + return ret; + } + + ret = of_genpd_add_provider_onecell(np, &domains->onecell_data); if (ret) return ret; - return of_genpd_add_provider_simple(np, genpd); + return 0; } static int __init rzg2l_cpg_probe(struct platform_device *pdev) @@ -1697,7 +1868,7 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev) if (error) return error; - error = rzg2l_cpg_add_clk_domain(priv); + error = rzg2l_cpg_add_pm_domains(priv); if (error) return error; diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h index 6e38c8fc88..ecfe7e7ea8 100644 --- a/drivers/clk/renesas/rzg2l-cpg.h +++ b/drivers/clk/renesas/rzg2l-cpg.h @@ -27,6 +27,18 @@ #define CPG_PL6_ETH_SSEL (0x418) #define CPG_PL5_SDIV (0x420) #define CPG_RST_MON (0x680) +#define CPG_BUS_ACPU_MSTOP (0xB60) +#define CPG_BUS_MCPU1_MSTOP (0xB64) +#define CPG_BUS_MCPU2_MSTOP (0xB68) +#define CPG_BUS_PERI_COM_MSTOP (0xB6C) +#define CPG_BUS_PERI_CPU_MSTOP (0xB70) +#define CPG_BUS_PERI_DDR_MSTOP (0xB74) +#define CPG_BUS_REG0_MSTOP (0xB7C) +#define CPG_BUS_REG1_MSTOP (0xB80) +#define CPG_BUS_TZCDDR_MSTOP (0xB84) +#define CPG_MHU_MSTOP (0xB88) +#define CPG_BUS_MCPU3_MSTOP (0xB90) +#define CPG_BUS_PERI_CPU2_MSTOP (0xB94) #define CPG_OTHERFUNC1_REG (0xBE8) #define CPG_SIPLL5_STBY_RESETB BIT(0) @@ -234,6 +246,55 @@ struct rzg2l_reset { #define DEF_RST(_id, _off, _bit) \ DEF_RST_MON(_id, _off, _bit, -1) +/** + * struct rzg2l_cpg_reg_conf - RZ/G2L register configuration data structure + * @off: register offset + * @mask: register mask + */ +struct rzg2l_cpg_reg_conf { + u16 off; + u16 mask; +}; + +#define DEF_REG_CONF(_off, _mask) ((struct rzg2l_cpg_reg_conf) { .off = (_off), .mask = (_mask) }) + +/** + * struct rzg2l_cpg_pm_domain_conf - PM domain configuration data structure + * @mstop: MSTOP register configuration + */ +struct rzg2l_cpg_pm_domain_conf { + struct rzg2l_cpg_reg_conf mstop; +}; + +/** + * struct rzg2l_cpg_pm_domain_init_data - PM domain init data + * @name: PM domain name + * @conf: PM domain configuration + * @flags: RZG2L PM domain flags (see RZG2L_PD_F_*) + * @id: PM domain ID (similar to the ones defined in + * include/dt-bindings/clock/-cpg.h) + */ +struct rzg2l_cpg_pm_domain_init_data { + const char * const name; + struct rzg2l_cpg_pm_domain_conf conf; + u32 flags; + u16 id; +}; + +#define DEF_PD(_name, _id, _mstop_conf, _flags) \ + { \ + .name = (_name), \ + .id = (_id), \ + .conf = { \ + .mstop = (_mstop_conf), \ + }, \ + .flags = (_flags), \ + } + +/* Power domain flags. */ +#define RZG2L_PD_F_ALWAYS_ON BIT(0) +#define RZG2L_PD_F_NONE (0) + /** * struct rzg2l_cpg_info - SoC-specific CPG Description * @@ -252,6 +313,8 @@ struct rzg2l_reset { * @crit_mod_clks: Array with Module Clock IDs of critical clocks that * should not be disabled without a knowledgeable driver * @num_crit_mod_clks: Number of entries in crit_mod_clks[] + * @pm_domains: PM domains init data array + * @num_pm_domains: Number of PM domains * @has_clk_mon_regs: Flag indicating whether the SoC has CLK_MON registers */ struct rzg2l_cpg_info { @@ -278,6 +341,10 @@ struct rzg2l_cpg_info { const unsigned int *crit_mod_clks; unsigned int num_crit_mod_clks; + /* Power domain. */ + const struct rzg2l_cpg_pm_domain_init_data *pm_domains; + unsigned int num_pm_domains; + bool has_clk_mon_regs; }; diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c index 975454a3dd..9101207868 100644 --- a/drivers/clk/rockchip/clk-mmc-phase.c +++ b/drivers/clk/rockchip/clk-mmc-phase.c @@ -14,7 +14,6 @@ struct rockchip_mmc_clock { struct clk_hw hw; void __iomem *reg; - int id; int shift; int cached_phase; struct notifier_block clk_rate_change_nb; diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c index 8cb21d10be..53d10b1c62 100644 --- a/drivers/clk/rockchip/clk-rk3568.c +++ b/drivers/clk/rockchip/clk-rk3568.c @@ -64,6 +64,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = { RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0), RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0), RK3036_PLL_RATE(800000000, 3, 200, 2, 1, 1, 0), + RK3036_PLL_RATE(724000000, 3, 181, 2, 1, 1, 0), RK3036_PLL_RATE(700000000, 3, 350, 4, 1, 1, 0), RK3036_PLL_RATE(696000000, 1, 116, 4, 1, 1, 0), RK3036_PLL_RATE(600000000, 1, 100, 4, 1, 1, 0), @@ -215,6 +216,7 @@ static const struct rockchip_cpuclk_reg_data rk3568_cpuclk_data = { PNAME(mux_pll_p) = { "xin24m" }; PNAME(mux_usb480m_p) = { "xin24m", "usb480m_phy", "clk_rtc_32k" }; +PNAME(mux_usb480m_phy_p) = { "clk_usbphy0_480m", "clk_usbphy1_480m"}; PNAME(mux_armclk_p) = { "apll", "gpll" }; PNAME(clk_i2s0_8ch_tx_p) = { "clk_i2s0_8ch_tx_src", "clk_i2s0_8ch_tx_frac", "i2s0_mclkin", "xin_osc0_half" }; PNAME(clk_i2s0_8ch_rx_p) = { "clk_i2s0_8ch_rx_src", "clk_i2s0_8ch_rx_frac", "i2s0_mclkin", "xin_osc0_half" }; @@ -485,6 +487,9 @@ static struct rockchip_clk_branch rk3568_clk_branches[] __initdata = { MUX(USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT, RK3568_MODE_CON0, 14, 2, MFLAGS), + MUX(USB480M_PHY, "usb480m_phy", mux_usb480m_phy_p, CLK_SET_RATE_PARENT, + RK3568_MISC_CON2, 15, 1, MFLAGS), + /* PD_CORE */ COMPOSITE(0, "sclk_core_src", apll_gpll_npll_p, CLK_IGNORE_UNUSED, RK3568_CLKSEL_CON(2), 8, 2, MFLAGS, 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY, diff --git a/drivers/clk/rockchip/rst-rk3588.c b/drivers/clk/rockchip/rst-rk3588.c index e855bb8d54..c4ebc01f1c 100644 --- a/drivers/clk/rockchip/rst-rk3588.c +++ b/drivers/clk/rockchip/rst-rk3588.c @@ -577,6 +577,7 @@ static const int rk3588_register_offset[] = { /* SOFTRST_CON59 */ RK3588_CRU_RESET_OFFSET(SRST_A_HDCP1_BIU, 59, 6), + RK3588_CRU_RESET_OFFSET(SRST_A_HDMIRX_BIU, 59, 7), RK3588_CRU_RESET_OFFSET(SRST_A_VO1_BIU, 59, 8), RK3588_CRU_RESET_OFFSET(SRST_H_VOP1_BIU, 59, 9), RK3588_CRU_RESET_OFFSET(SRST_H_VOP1_S_BIU, 59, 10), diff --git a/drivers/clk/samsung/clk-exynos-arm64.c b/drivers/clk/samsung/clk-exynos-arm64.c index 6fb7194df7..bf7de21f32 100644 --- a/drivers/clk/samsung/clk-exynos-arm64.c +++ b/drivers/clk/samsung/clk-exynos-arm64.c @@ -17,10 +17,17 @@ #include "clk-exynos-arm64.h" +/* PLL register bits */ +#define PLL_CON1_MANUAL BIT(1) + /* Gate register bits */ #define GATE_MANUAL BIT(20) #define GATE_ENABLE_HWACG BIT(28) +/* PLL_CONx_PLL register offsets range */ +#define PLL_CON_OFF_START 0x100 +#define PLL_CON_OFF_END 0x600 + /* Gate register offsets range */ #define GATE_OFF_START 0x2000 #define GATE_OFF_END 0x2fff @@ -38,17 +45,36 @@ struct exynos_arm64_cmu_data { struct samsung_clk_provider *ctx; }; +/* Check if the register offset is a GATE register */ +static bool is_gate_reg(unsigned long off) +{ + return off >= GATE_OFF_START && off <= GATE_OFF_END; +} + +/* Check if the register offset is a PLL_CONx register */ +static bool is_pll_conx_reg(unsigned long off) +{ + return off >= PLL_CON_OFF_START && off <= PLL_CON_OFF_END; +} + +/* Check if the register offset is a PLL_CON1 register */ +static bool is_pll_con1_reg(unsigned long off) +{ + return is_pll_conx_reg(off) && (off & 0xf) == 0x4 && !(off & 0x10); +} + /** * exynos_arm64_init_clocks - Set clocks initial configuration - * @np: CMU device tree node with "reg" property (CMU addr) - * @reg_offs: Register offsets array for clocks to init - * @reg_offs_len: Number of register offsets in reg_offs array + * @np: CMU device tree node with "reg" property (CMU addr) + * @cmu: CMU data * - * Set manual control mode for all gate clocks. + * Set manual control mode for all gate and PLL clocks. */ static void __init exynos_arm64_init_clocks(struct device_node *np, - const unsigned long *reg_offs, size_t reg_offs_len) + const struct samsung_cmu_info *cmu) { + const unsigned long *reg_offs = cmu->clk_regs; + size_t reg_offs_len = cmu->nr_clk_regs; void __iomem *reg_base; size_t i; @@ -60,14 +86,14 @@ static void __init exynos_arm64_init_clocks(struct device_node *np, void __iomem *reg = reg_base + reg_offs[i]; u32 val; - /* Modify only gate clock registers */ - if (reg_offs[i] < GATE_OFF_START || reg_offs[i] > GATE_OFF_END) - continue; - - val = readl(reg); - val |= GATE_MANUAL; - val &= ~GATE_ENABLE_HWACG; - writel(val, reg); + if (cmu->manual_plls && is_pll_con1_reg(reg_offs[i])) { + writel(PLL_CON1_MANUAL, reg); + } else if (is_gate_reg(reg_offs[i])) { + val = readl(reg); + val |= GATE_MANUAL; + val &= ~GATE_ENABLE_HWACG; + writel(val, reg); + } } iounmap(reg_base); @@ -177,7 +203,7 @@ void __init exynos_arm64_register_cmu(struct device *dev, pr_err("%s: could not enable bus clock %s; err = %d\n", __func__, cmu->clk_name, err); - exynos_arm64_init_clocks(np, cmu->clk_regs, cmu->nr_clk_regs); + exynos_arm64_init_clocks(np, cmu); samsung_cmu_register_one(np, cmu); } @@ -224,7 +250,7 @@ int __init exynos_arm64_register_cmu_pm(struct platform_device *pdev, __func__, cmu->clk_name, ret); if (set_manual) - exynos_arm64_init_clocks(np, cmu->clk_regs, cmu->nr_clk_regs); + exynos_arm64_init_clocks(np, cmu); reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(reg_base)) diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index a026ccca73..28945b6b0e 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -1040,19 +1040,20 @@ static unsigned long __init exynos4_get_xom(void) static void __init exynos4_clk_register_finpll(struct samsung_clk_provider *ctx) { struct samsung_fixed_rate_clock fclk; - struct clk *clk; - unsigned long finpll_f = 24000000; + unsigned long finpll_f; + unsigned int parent; char *parent_name; unsigned int xom = exynos4_get_xom(); parent_name = xom & 1 ? "xusbxti" : "xxti"; - clk = clk_get(NULL, parent_name); - if (IS_ERR(clk)) { + parent = xom & 1 ? CLK_XUSBXTI : CLK_XXTI; + + finpll_f = clk_hw_get_rate(ctx->clk_data.hws[parent]); + if (!finpll_f) { pr_err("%s: failed to lookup parent clock %s, assuming " "fin_pll clock frequency is 24MHz\n", __func__, parent_name); - } else { - finpll_f = clk_get_rate(clk); + finpll_f = 24000000; } fclk.id = CLK_FIN_PLL; diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c index 82cfa22c07..6215471c4a 100644 --- a/drivers/clk/samsung/clk-exynos850.c +++ b/drivers/clk/samsung/clk-exynos850.c @@ -14,13 +14,16 @@ #include #include "clk.h" +#include "clk-cpu.h" #include "clk-exynos-arm64.h" /* NOTE: Must be equal to the last clock ID increased by one */ -#define CLKS_NR_TOP (CLK_DOUT_G3D_SWITCH + 1) +#define CLKS_NR_TOP (CLK_DOUT_CPUCL1_SWITCH + 1) #define CLKS_NR_APM (CLK_GOUT_SYSREG_APM_PCLK + 1) #define CLKS_NR_AUD (CLK_GOUT_AUD_CMU_AUD_PCLK + 1) #define CLKS_NR_CMGP (CLK_GOUT_SYSREG_CMGP_PCLK + 1) +#define CLKS_NR_CPUCL0 (CLK_CLUSTER0_SCLK + 1) +#define CLKS_NR_CPUCL1 (CLK_CLUSTER1_SCLK + 1) #define CLKS_NR_G3D (CLK_GOUT_G3D_SYSREG_PCLK + 1) #define CLKS_NR_HSI (CLK_GOUT_HSI_CMU_HSI_PCLK + 1) #define CLKS_NR_IS (CLK_GOUT_IS_SYSREG_PCLK + 1) @@ -47,6 +50,10 @@ #define CLK_CON_MUX_MUX_CLKCMU_CORE_CCI 0x1018 #define CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD 0x101c #define CLK_CON_MUX_MUX_CLKCMU_CORE_SSS 0x1020 +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG 0x1024 +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x1028 +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_DBG 0x102c +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x1030 #define CLK_CON_MUX_MUX_CLKCMU_DPU 0x1034 #define CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH 0x1038 #define CLK_CON_MUX_MUX_CLKCMU_HSI_BUS 0x103c @@ -69,6 +76,10 @@ #define CLK_CON_DIV_CLKCMU_CORE_CCI 0x1824 #define CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD 0x1828 #define CLK_CON_DIV_CLKCMU_CORE_SSS 0x182c +#define CLK_CON_DIV_CLKCMU_CPUCL0_DBG 0x1830 +#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1834 +#define CLK_CON_DIV_CLKCMU_CPUCL1_DBG 0x1838 +#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x183c #define CLK_CON_DIV_CLKCMU_DPU 0x1840 #define CLK_CON_DIV_CLKCMU_G3D_SWITCH 0x1844 #define CLK_CON_DIV_CLKCMU_HSI_BUS 0x1848 @@ -97,6 +108,10 @@ #define CLK_CON_GAT_GATE_CLKCMU_CORE_CCI 0x2020 #define CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD 0x2024 #define CLK_CON_GAT_GATE_CLKCMU_CORE_SSS 0x2028 +#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG 0x202c +#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH 0x2030 +#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_DBG 0x2034 +#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH 0x2038 #define CLK_CON_GAT_GATE_CLKCMU_DPU 0x203c #define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH 0x2040 #define CLK_CON_GAT_GATE_CLKCMU_HSI_BUS 0x2044 @@ -130,6 +145,10 @@ static const unsigned long top_clk_regs[] __initconst = { CLK_CON_MUX_MUX_CLKCMU_CORE_CCI, CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD, CLK_CON_MUX_MUX_CLKCMU_CORE_SSS, + CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG, + CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_CPUCL1_DBG, + CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH, CLK_CON_MUX_MUX_CLKCMU_DPU, CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH, CLK_CON_MUX_MUX_CLKCMU_HSI_BUS, @@ -152,6 +171,10 @@ static const unsigned long top_clk_regs[] __initconst = { CLK_CON_DIV_CLKCMU_CORE_CCI, CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD, CLK_CON_DIV_CLKCMU_CORE_SSS, + CLK_CON_DIV_CLKCMU_CPUCL0_DBG, + CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, + CLK_CON_DIV_CLKCMU_CPUCL1_DBG, + CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, CLK_CON_DIV_CLKCMU_DPU, CLK_CON_DIV_CLKCMU_G3D_SWITCH, CLK_CON_DIV_CLKCMU_HSI_BUS, @@ -180,6 +203,10 @@ static const unsigned long top_clk_regs[] __initconst = { CLK_CON_GAT_GATE_CLKCMU_CORE_CCI, CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD, CLK_CON_GAT_GATE_CLKCMU_CORE_SSS, + CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG, + CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH, + CLK_CON_GAT_GATE_CLKCMU_CPUCL1_DBG, + CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH, CLK_CON_GAT_GATE_CLKCMU_DPU, CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH, CLK_CON_GAT_GATE_CLKCMU_HSI_BUS, @@ -234,6 +261,14 @@ PNAME(mout_core_mmc_embd_p) = { "oscclk", "dout_shared0_div2", "oscclk", "oscclk" }; PNAME(mout_core_sss_p) = { "dout_shared0_div3", "dout_shared1_div3", "dout_shared0_div4", "dout_shared1_div4" }; +/* List of parent clocks for Muxes in CMU_TOP: for CMU_CPUCL0 */ +PNAME(mout_cpucl0_switch_p) = { "fout_shared0_pll", "fout_shared1_pll", + "dout_shared0_div2", "dout_shared1_div2" }; +PNAME(mout_cpucl0_dbg_p) = { "dout_shared0_div4", "dout_shared1_div4" }; +/* List of parent clocks for Muxes in CMU_TOP: for CMU_CPUCL1 */ +PNAME(mout_cpucl1_switch_p) = { "fout_shared0_pll", "fout_shared1_pll", + "dout_shared0_div2", "dout_shared1_div2" }; +PNAME(mout_cpucl1_dbg_p) = { "dout_shared0_div4", "dout_shared1_div4" }; /* List of parent clocks for Muxes in CMU_TOP: for CMU_G3D */ PNAME(mout_g3d_switch_p) = { "dout_shared0_div2", "dout_shared1_div2", "dout_shared0_div3", "dout_shared1_div3" }; @@ -300,6 +335,18 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = { MUX(CLK_MOUT_CORE_SSS, "mout_core_sss", mout_core_sss_p, CLK_CON_MUX_MUX_CLKCMU_CORE_SSS, 0, 2), + /* CPUCL0 */ + MUX(CLK_MOUT_CPUCL0_DBG, "mout_cpucl0_dbg", mout_cpucl0_dbg_p, + CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG, 0, 1), + MUX(CLK_MOUT_CPUCL0_SWITCH, "mout_cpucl0_switch", mout_cpucl0_switch_p, + CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH, 0, 2), + + /* CPUCL1 */ + MUX(CLK_MOUT_CPUCL1_DBG, "mout_cpucl1_dbg", mout_cpucl1_dbg_p, + CLK_CON_MUX_MUX_CLKCMU_CPUCL1_DBG, 0, 1), + MUX(CLK_MOUT_CPUCL1_SWITCH, "mout_cpucl1_switch", mout_cpucl1_switch_p, + CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH, 0, 2), + /* DPU */ MUX(CLK_MOUT_DPU, "mout_dpu", mout_dpu_p, CLK_CON_MUX_MUX_CLKCMU_DPU, 0, 2), @@ -378,6 +425,18 @@ static const struct samsung_div_clock top_div_clks[] __initconst = { DIV(CLK_DOUT_CORE_SSS, "dout_core_sss", "gout_core_sss", CLK_CON_DIV_CLKCMU_CORE_SSS, 0, 4), + /* CPUCL0 */ + DIV(CLK_DOUT_CPUCL0_DBG, "dout_cpucl0_dbg", "gout_cpucl0_dbg", + CLK_CON_DIV_CLKCMU_CPUCL0_DBG, 0, 3), + DIV(CLK_DOUT_CPUCL0_SWITCH, "dout_cpucl0_switch", "gout_cpucl0_switch", + CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3), + + /* CPUCL1 */ + DIV(CLK_DOUT_CPUCL1_DBG, "dout_cpucl1_dbg", "gout_cpucl1_dbg", + CLK_CON_DIV_CLKCMU_CPUCL1_DBG, 0, 3), + DIV(CLK_DOUT_CPUCL1_SWITCH, "dout_cpucl1_switch", "gout_cpucl1_switch", + CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, 0, 3), + /* DPU */ DIV(CLK_DOUT_DPU, "dout_dpu", "gout_dpu", CLK_CON_DIV_CLKCMU_DPU, 0, 4), @@ -442,6 +501,18 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = { GATE(CLK_GOUT_AUD, "gout_aud", "mout_aud", CLK_CON_GAT_GATE_CLKCMU_AUD, 21, 0, 0), + /* CPUCL0 */ + GATE(CLK_GOUT_CPUCL0_DBG, "gout_cpucl0_dbg", "mout_cpucl0_dbg", + CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG, 21, 0, 0), + GATE(CLK_GOUT_CPUCL0_SWITCH, "gout_cpucl0_switch", "mout_cpucl0_switch", + CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH, 21, 0, 0), + + /* CPUCL1 */ + GATE(CLK_GOUT_CPUCL1_DBG, "gout_cpucl1_dbg", "mout_cpucl1_dbg", + CLK_CON_GAT_GATE_CLKCMU_CPUCL1_DBG, 21, 0, 0), + GATE(CLK_GOUT_CPUCL1_SWITCH, "gout_cpucl1_switch", "mout_cpucl1_switch", + CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH, 21, 0, 0), + /* DPU */ GATE(CLK_GOUT_DPU, "gout_dpu", "mout_dpu", CLK_CON_GAT_GATE_CLKCMU_DPU, 21, 0, 0), @@ -1030,6 +1101,373 @@ static const struct samsung_cmu_info cmgp_cmu_info __initconst = { .clk_name = "gout_clkcmu_cmgp_bus", }; +/* ---- CMU_CPUCL0 ---------------------------------------------------------- */ + +/* Register Offset definitions for CMU_CPUCL0 (0x10900000) */ +#define PLL_LOCKTIME_PLL_CPUCL0 0x0000 +#define PLL_CON0_PLL_CPUCL0 0x0100 +#define PLL_CON1_PLL_CPUCL0 0x0104 +#define PLL_CON3_PLL_CPUCL0 0x010c +#define PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER 0x0600 +#define PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER 0x0610 +#define CLK_CON_MUX_MUX_CLK_CPUCL0_PLL 0x100c +#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK 0x1800 +#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK 0x1808 +#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLKDBG 0x180c +#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK 0x1810 +#define CLK_CON_DIV_DIV_CLK_CPUCL0_CMUREF 0x1814 +#define CLK_CON_DIV_DIV_CLK_CPUCL0_CPU 0x1818 +#define CLK_CON_DIV_DIV_CLK_CPUCL0_PCLK 0x181c +#define CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_ATCLK 0x2000 +#define CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PCLK 0x2004 +#define CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PERIPHCLK 0x2008 +#define CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_SCLK 0x200c +#define CLK_CON_GAT_CLK_CPUCL0_CMU_CPUCL0_PCLK 0x2010 +#define CLK_CON_GAT_GATE_CLK_CPUCL0_CPU 0x2020 + +static const unsigned long cpucl0_clk_regs[] __initconst = { + PLL_LOCKTIME_PLL_CPUCL0, + PLL_CON0_PLL_CPUCL0, + PLL_CON1_PLL_CPUCL0, + PLL_CON3_PLL_CPUCL0, + PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER, + PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER, + CLK_CON_MUX_MUX_CLK_CPUCL0_PLL, + CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK, + CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK, + CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLKDBG, + CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK, + CLK_CON_DIV_DIV_CLK_CPUCL0_CMUREF, + CLK_CON_DIV_DIV_CLK_CPUCL0_CPU, + CLK_CON_DIV_DIV_CLK_CPUCL0_PCLK, + CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_ATCLK, + CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PCLK, + CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PERIPHCLK, + CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_SCLK, + CLK_CON_GAT_CLK_CPUCL0_CMU_CPUCL0_PCLK, + CLK_CON_GAT_GATE_CLK_CPUCL0_CPU, +}; + +/* List of parent clocks for Muxes in CMU_CPUCL0 */ +PNAME(mout_pll_cpucl0_p) = { "oscclk", "fout_cpucl0_pll" }; +PNAME(mout_cpucl0_switch_user_p) = { "oscclk", "dout_cpucl0_switch" }; +PNAME(mout_cpucl0_dbg_user_p) = { "oscclk", "dout_cpucl0_dbg" }; +PNAME(mout_cpucl0_pll_p) = { "mout_pll_cpucl0", + "mout_cpucl0_switch_user" }; + +static const struct samsung_pll_rate_table cpu_pll_rates[] __initconst = { + PLL_35XX_RATE(26 * MHZ, 2210000000U, 255, 3, 0), + PLL_35XX_RATE(26 * MHZ, 2106000000U, 243, 3, 0), + PLL_35XX_RATE(26 * MHZ, 2002000000U, 231, 3, 0), + PLL_35XX_RATE(26 * MHZ, 1846000000U, 213, 3, 0), + PLL_35XX_RATE(26 * MHZ, 1742000000U, 201, 3, 0), + PLL_35XX_RATE(26 * MHZ, 1586000000U, 183, 3, 0), + PLL_35XX_RATE(26 * MHZ, 1456000000U, 168, 3, 0), + PLL_35XX_RATE(26 * MHZ, 1300000000U, 150, 3, 0), + PLL_35XX_RATE(26 * MHZ, 1157000000U, 267, 3, 1), + PLL_35XX_RATE(26 * MHZ, 1053000000U, 243, 3, 1), + PLL_35XX_RATE(26 * MHZ, 949000000U, 219, 3, 1), + PLL_35XX_RATE(26 * MHZ, 806000000U, 186, 3, 1), + PLL_35XX_RATE(26 * MHZ, 650000000U, 150, 3, 1), + PLL_35XX_RATE(26 * MHZ, 546000000U, 252, 3, 2), + PLL_35XX_RATE(26 * MHZ, 442000000U, 204, 3, 2), + PLL_35XX_RATE(26 * MHZ, 351000000U, 162, 3, 2), + PLL_35XX_RATE(26 * MHZ, 247000000U, 114, 3, 2), + PLL_35XX_RATE(26 * MHZ, 182000000U, 168, 3, 3), + PLL_35XX_RATE(26 * MHZ, 130000000U, 120, 3, 3), +}; + +static const struct samsung_pll_clock cpucl0_pll_clks[] __initconst = { + PLL(pll_0822x, CLK_FOUT_CPUCL0_PLL, "fout_cpucl0_pll", "oscclk", + PLL_LOCKTIME_PLL_CPUCL0, PLL_CON3_PLL_CPUCL0, cpu_pll_rates), +}; + +static const struct samsung_mux_clock cpucl0_mux_clks[] __initconst = { + MUX_F(CLK_MOUT_PLL_CPUCL0, "mout_pll_cpucl0", mout_pll_cpucl0_p, + PLL_CON0_PLL_CPUCL0, 4, 1, + CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0), + MUX_F(CLK_MOUT_CPUCL0_SWITCH_USER, "mout_cpucl0_switch_user", + mout_cpucl0_switch_user_p, + PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER, 4, 1, + CLK_SET_RATE_PARENT, 0), + MUX(CLK_MOUT_CPUCL0_DBG_USER, "mout_cpucl0_dbg_user", + mout_cpucl0_dbg_user_p, + PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER, 4, 1), + MUX_F(CLK_MOUT_CPUCL0_PLL, "mout_cpucl0_pll", mout_cpucl0_pll_p, + CLK_CON_MUX_MUX_CLK_CPUCL0_PLL, 0, 1, CLK_SET_RATE_PARENT, 0), +}; + +static const struct samsung_div_clock cpucl0_div_clks[] __initconst = { + DIV_F(CLK_DOUT_CPUCL0_CPU, "dout_cpucl0_cpu", "mout_cpucl0_pll", + CLK_CON_DIV_DIV_CLK_CPUCL0_CPU, 0, 1, + CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), + DIV_F(CLK_DOUT_CPUCL0_CMUREF, "dout_cpucl0_cmuref", "dout_cpucl0_cpu", + CLK_CON_DIV_DIV_CLK_CPUCL0_CMUREF, 0, 3, + CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), + DIV_F(CLK_DOUT_CPUCL0_PCLK, "dout_cpucl0_pclk", "dout_cpucl0_cpu", + CLK_CON_DIV_DIV_CLK_CPUCL0_PCLK, 0, 4, + CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), + + /* EMBEDDED_CMU_CPUCL0 */ + DIV_F(CLK_DOUT_CLUSTER0_ACLK, "dout_cluster0_aclk", "gout_cluster0_cpu", + CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK, 0, 4, + CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), + DIV_F(CLK_DOUT_CLUSTER0_ATCLK, "dout_cluster0_atclk", + "gout_cluster0_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK, 0, 4, + CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), + DIV_F(CLK_DOUT_CLUSTER0_PCLKDBG, "dout_cluster0_pclkdbg", + "gout_cluster0_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLKDBG, 0, 4, + CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), + DIV_F(CLK_DOUT_CLUSTER0_PERIPHCLK, "dout_cluster0_periphclk", + "gout_cluster0_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK, 0, 4, + CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), +}; + +static const struct samsung_gate_clock cpucl0_gate_clks[] __initconst = { + GATE(CLK_GOUT_CPUCL0_CMU_CPUCL0_PCLK, "gout_cpucl0_cmu_cpucl0_pclk", + "dout_cpucl0_pclk", + CLK_CON_GAT_CLK_CPUCL0_CMU_CPUCL0_PCLK, 21, CLK_IGNORE_UNUSED, 0), + + /* EMBEDDED_CMU_CPUCL0 */ + GATE(CLK_GOUT_CLUSTER0_CPU, "gout_cluster0_cpu", "dout_cpucl0_cpu", + CLK_CON_GAT_GATE_CLK_CPUCL0_CPU, 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CLUSTER0_SCLK, "gout_cluster0_sclk", "gout_cluster0_cpu", + CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_SCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CLUSTER0_ATCLK, "gout_cluster0_atclk", + "dout_cluster0_atclk", + CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_ATCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CLUSTER0_PERIPHCLK, "gout_cluster0_periphclk", + "dout_cluster0_periphclk", + CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PERIPHCLK, 21, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CLUSTER0_PCLK, "gout_cluster0_pclk", + "dout_cluster0_pclkdbg", + CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PCLK, 21, CLK_IGNORE_UNUSED, 0), +}; + +/* + * Each parameter is going to be written into the corresponding DIV register. So + * the actual divider value for each parameter will be 1/(param+1). All these + * parameters must be in the range of 0..15, as the divider range for all of + * these DIV clocks is 1..16. The default values for these dividers is + * (1, 3, 3, 1). + */ +#define E850_CPU_DIV0(aclk, atclk, pclkdbg, periphclk) \ + (((aclk) << 16) | ((atclk) << 12) | ((pclkdbg) << 8) | \ + ((periphclk) << 4)) + +static const struct exynos_cpuclk_cfg_data exynos850_cluster_clk_d[] __initconst += { + { 2210000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 2106000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 2002000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 1846000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 1742000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 1586000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 1456000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 1300000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 1157000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 1053000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 949000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 806000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 650000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 546000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 442000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 351000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 247000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 182000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 130000, E850_CPU_DIV0(1, 3, 3, 1) }, + { 0 } +}; + +static const struct samsung_cpu_clock cpucl0_cpu_clks[] __initconst = { + CPU_CLK(CLK_CLUSTER0_SCLK, "cluster0_clk", CLK_MOUT_PLL_CPUCL0, + CLK_MOUT_CPUCL0_SWITCH_USER, 0, 0x0, CPUCLK_LAYOUT_E850_CL0, + exynos850_cluster_clk_d), +}; + +static const struct samsung_cmu_info cpucl0_cmu_info __initconst = { + .pll_clks = cpucl0_pll_clks, + .nr_pll_clks = ARRAY_SIZE(cpucl0_pll_clks), + .mux_clks = cpucl0_mux_clks, + .nr_mux_clks = ARRAY_SIZE(cpucl0_mux_clks), + .div_clks = cpucl0_div_clks, + .nr_div_clks = ARRAY_SIZE(cpucl0_div_clks), + .gate_clks = cpucl0_gate_clks, + .nr_gate_clks = ARRAY_SIZE(cpucl0_gate_clks), + .cpu_clks = cpucl0_cpu_clks, + .nr_cpu_clks = ARRAY_SIZE(cpucl0_cpu_clks), + .nr_clk_ids = CLKS_NR_CPUCL0, + .clk_regs = cpucl0_clk_regs, + .nr_clk_regs = ARRAY_SIZE(cpucl0_clk_regs), + .clk_name = "dout_cpucl0_switch", + .manual_plls = true, +}; + +static void __init exynos850_cmu_cpucl0_init(struct device_node *np) +{ + exynos_arm64_register_cmu(NULL, np, &cpucl0_cmu_info); +} + +/* Register CMU_CPUCL0 early, as CPU clocks should be available ASAP */ +CLK_OF_DECLARE(exynos850_cmu_cpucl0, "samsung,exynos850-cmu-cpucl0", + exynos850_cmu_cpucl0_init); + +/* ---- CMU_CPUCL1 ---------------------------------------------------------- */ + +/* Register Offset definitions for CMU_CPUCL1 (0x10800000) */ +#define PLL_LOCKTIME_PLL_CPUCL1 0x0000 +#define PLL_CON0_PLL_CPUCL1 0x0100 +#define PLL_CON1_PLL_CPUCL1 0x0104 +#define PLL_CON3_PLL_CPUCL1 0x010c +#define PLL_CON0_MUX_CLKCMU_CPUCL1_DBG_USER 0x0600 +#define PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER 0x0610 +#define CLK_CON_MUX_MUX_CLK_CPUCL1_PLL 0x1000 +#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK 0x1800 +#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK 0x1808 +#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLKDBG 0x180c +#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK 0x1810 +#define CLK_CON_DIV_DIV_CLK_CPUCL1_CMUREF 0x1814 +#define CLK_CON_DIV_DIV_CLK_CPUCL1_CPU 0x1818 +#define CLK_CON_DIV_DIV_CLK_CPUCL1_PCLK 0x181c +#define CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_ATCLK 0x2000 +#define CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PCLK 0x2004 +#define CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PERIPHCLK 0x2008 +#define CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_SCLK 0x200c +#define CLK_CON_GAT_CLK_CPUCL1_CMU_CPUCL1_PCLK 0x2010 +#define CLK_CON_GAT_GATE_CLK_CPUCL1_CPU 0x2020 + +static const unsigned long cpucl1_clk_regs[] __initconst = { + PLL_LOCKTIME_PLL_CPUCL1, + PLL_CON0_PLL_CPUCL1, + PLL_CON1_PLL_CPUCL1, + PLL_CON3_PLL_CPUCL1, + PLL_CON0_MUX_CLKCMU_CPUCL1_DBG_USER, + PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER, + CLK_CON_MUX_MUX_CLK_CPUCL1_PLL, + CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK, + CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK, + CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLKDBG, + CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK, + CLK_CON_DIV_DIV_CLK_CPUCL1_CMUREF, + CLK_CON_DIV_DIV_CLK_CPUCL1_CPU, + CLK_CON_DIV_DIV_CLK_CPUCL1_PCLK, + CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_ATCLK, + CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PCLK, + CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PERIPHCLK, + CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_SCLK, + CLK_CON_GAT_CLK_CPUCL1_CMU_CPUCL1_PCLK, + CLK_CON_GAT_GATE_CLK_CPUCL1_CPU, +}; + +/* List of parent clocks for Muxes in CMU_CPUCL0 */ +PNAME(mout_pll_cpucl1_p) = { "oscclk", "fout_cpucl1_pll" }; +PNAME(mout_cpucl1_switch_user_p) = { "oscclk", "dout_cpucl1_switch" }; +PNAME(mout_cpucl1_dbg_user_p) = { "oscclk", "dout_cpucl1_dbg" }; +PNAME(mout_cpucl1_pll_p) = { "mout_pll_cpucl1", + "mout_cpucl1_switch_user" }; + +static const struct samsung_pll_clock cpucl1_pll_clks[] __initconst = { + PLL(pll_0822x, CLK_FOUT_CPUCL1_PLL, "fout_cpucl1_pll", "oscclk", + PLL_LOCKTIME_PLL_CPUCL1, PLL_CON3_PLL_CPUCL1, cpu_pll_rates), +}; + +static const struct samsung_mux_clock cpucl1_mux_clks[] __initconst = { + MUX_F(CLK_MOUT_PLL_CPUCL1, "mout_pll_cpucl1", mout_pll_cpucl1_p, + PLL_CON0_PLL_CPUCL1, 4, 1, + CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0), + MUX_F(CLK_MOUT_CPUCL1_SWITCH_USER, "mout_cpucl1_switch_user", + mout_cpucl1_switch_user_p, + PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER, 4, 1, + CLK_SET_RATE_PARENT, 0), + MUX(CLK_MOUT_CPUCL1_DBG_USER, "mout_cpucl1_dbg_user", + mout_cpucl1_dbg_user_p, + PLL_CON0_MUX_CLKCMU_CPUCL1_DBG_USER, 4, 1), + MUX_F(CLK_MOUT_CPUCL1_PLL, "mout_cpucl1_pll", mout_cpucl1_pll_p, + CLK_CON_MUX_MUX_CLK_CPUCL1_PLL, 0, 1, CLK_SET_RATE_PARENT, 0), +}; + +static const struct samsung_div_clock cpucl1_div_clks[] __initconst = { + DIV_F(CLK_DOUT_CPUCL1_CPU, "dout_cpucl1_cpu", "mout_cpucl1_pll", + CLK_CON_DIV_DIV_CLK_CPUCL1_CPU, 0, 1, + CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), + DIV_F(CLK_DOUT_CPUCL1_CMUREF, "dout_cpucl1_cmuref", "dout_cpucl1_cpu", + CLK_CON_DIV_DIV_CLK_CPUCL1_CMUREF, 0, 3, + CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), + DIV_F(CLK_DOUT_CPUCL1_PCLK, "dout_cpucl1_pclk", "dout_cpucl1_cpu", + CLK_CON_DIV_DIV_CLK_CPUCL1_PCLK, 0, 4, + CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), + + /* EMBEDDED_CMU_CPUCL1 */ + DIV_F(CLK_DOUT_CLUSTER1_ACLK, "dout_cluster1_aclk", "gout_cluster1_cpu", + CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK, 0, 4, + CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), + DIV_F(CLK_DOUT_CLUSTER1_ATCLK, "dout_cluster1_atclk", + "gout_cluster1_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK, 0, 4, + CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), + DIV_F(CLK_DOUT_CLUSTER1_PCLKDBG, "dout_cluster1_pclkdbg", + "gout_cluster1_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLKDBG, 0, 4, + CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), + DIV_F(CLK_DOUT_CLUSTER1_PERIPHCLK, "dout_cluster1_periphclk", + "gout_cluster1_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK, 0, 4, + CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), +}; + +static const struct samsung_gate_clock cpucl1_gate_clks[] __initconst = { + GATE(CLK_GOUT_CPUCL1_CMU_CPUCL1_PCLK, "gout_cpucl1_cmu_cpucl1_pclk", + "dout_cpucl1_pclk", + CLK_CON_GAT_CLK_CPUCL1_CMU_CPUCL1_PCLK, 21, CLK_IGNORE_UNUSED, 0), + + /* EMBEDDED_CMU_CPUCL1 */ + GATE(CLK_GOUT_CLUSTER1_CPU, "gout_cluster1_cpu", "dout_cpucl1_cpu", + CLK_CON_GAT_GATE_CLK_CPUCL1_CPU, 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CLUSTER1_SCLK, "gout_cluster1_sclk", "gout_cluster1_cpu", + CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_SCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CLUSTER1_ATCLK, "gout_cluster1_atclk", + "dout_cluster1_atclk", + CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_ATCLK, 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CLUSTER1_PERIPHCLK, "gout_cluster1_periphclk", + "dout_cluster1_periphclk", + CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PERIPHCLK, 21, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_CLUSTER1_PCLK, "gout_cluster1_pclk", + "dout_cluster1_pclkdbg", + CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PCLK, 21, CLK_IGNORE_UNUSED, 0), +}; + +static const struct samsung_cpu_clock cpucl1_cpu_clks[] __initconst = { + CPU_CLK(CLK_CLUSTER1_SCLK, "cluster1_clk", CLK_MOUT_PLL_CPUCL1, + CLK_MOUT_CPUCL1_SWITCH_USER, 0, 0x0, CPUCLK_LAYOUT_E850_CL1, + exynos850_cluster_clk_d), +}; + +static const struct samsung_cmu_info cpucl1_cmu_info __initconst = { + .pll_clks = cpucl1_pll_clks, + .nr_pll_clks = ARRAY_SIZE(cpucl1_pll_clks), + .mux_clks = cpucl1_mux_clks, + .nr_mux_clks = ARRAY_SIZE(cpucl1_mux_clks), + .div_clks = cpucl1_div_clks, + .nr_div_clks = ARRAY_SIZE(cpucl1_div_clks), + .gate_clks = cpucl1_gate_clks, + .nr_gate_clks = ARRAY_SIZE(cpucl1_gate_clks), + .cpu_clks = cpucl1_cpu_clks, + .nr_cpu_clks = ARRAY_SIZE(cpucl1_cpu_clks), + .nr_clk_ids = CLKS_NR_CPUCL1, + .clk_regs = cpucl1_clk_regs, + .nr_clk_regs = ARRAY_SIZE(cpucl1_clk_regs), + .clk_name = "dout_cpucl1_switch", + .manual_plls = true, +}; + +static void __init exynos850_cmu_cpucl1_init(struct device_node *np) +{ + exynos_arm64_register_cmu(NULL, np, &cpucl1_cmu_info); +} + +/* Register CMU_CPUCL1 early, as CPU clocks should be available ASAP */ +CLK_OF_DECLARE(exynos850_cmu_cpucl1, "samsung,exynos850-cmu-cpucl1", + exynos850_cmu_cpucl1_init); + /* ---- CMU_G3D ------------------------------------------------------------- */ /* Register Offset definitions for CMU_G3D (0x11400000) */ diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c index bd3c1b0271..ba9570f7a5 100644 --- a/drivers/clk/samsung/clk-gs101.c +++ b/drivers/clk/samsung/clk-gs101.c @@ -15,10 +15,13 @@ #include "clk.h" #include "clk-exynos-arm64.h" +#include "clk-pll.h" /* NOTE: Must be equal to the last clock ID increased by one */ #define CLKS_NR_TOP (CLK_GOUT_CMU_TPU_UART + 1) #define CLKS_NR_APM (CLK_APM_PLL_DIV16_APM + 1) +#define CLKS_NR_HSI0 (CLK_GOUT_HSI0_XIU_P_HSI0_ACLK + 1) +#define CLKS_NR_HSI2 (CLK_GOUT_HSI2_XIU_P_HSI2_ACLK + 1) #define CLKS_NR_MISC (CLK_GOUT_MISC_XIU_D_MISC_ACLK + 1) #define CLKS_NR_PERIC0 (CLK_GOUT_PERIC0_SYSREG_PERIC0_PCLK + 1) #define CLKS_NR_PERIC1 (CLK_GOUT_PERIC1_SYSREG_PERIC1_PCLK + 1) @@ -1893,16 +1896,16 @@ static const struct samsung_gate_clock apm_gate_clks[] __initconst = { CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_G_SWD_IPCLKPORT_PCLK, 21, 0, 0), GATE(CLK_GOUT_APM_UASC_P_APM_ACLK, "gout_apm_uasc_p_apm_aclk", "gout_apm_func", - CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_APM_IPCLKPORT_ACLK, 21, 0, 0), + CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_APM_IPCLKPORT_ACLK, 21, CLK_IS_CRITICAL, 0), GATE(CLK_GOUT_APM_UASC_P_APM_PCLK, "gout_apm_uasc_p_apm_pclk", "gout_apm_func", - CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_APM_IPCLKPORT_PCLK, 21, 0, 0), + CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_APM_IPCLKPORT_PCLK, 21, CLK_IS_CRITICAL, 0), GATE(CLK_GOUT_APM_WDT_APM_PCLK, "gout_apm_wdt_apm_pclk", "gout_apm_func", CLK_CON_GAT_GOUT_BLK_APM_UID_WDT_APM_IPCLKPORT_PCLK, 21, 0, 0), GATE(CLK_GOUT_APM_XIU_DP_APM_ACLK, "gout_apm_xiu_dp_apm_aclk", "gout_apm_func", - CLK_CON_GAT_GOUT_BLK_APM_UID_XIU_DP_APM_IPCLKPORT_ACLK, 21, 0, 0), + CLK_CON_GAT_GOUT_BLK_APM_UID_XIU_DP_APM_IPCLKPORT_ACLK, 21, CLK_IS_CRITICAL, 0), }; static const struct samsung_cmu_info apm_cmu_info __initconst = { @@ -1919,6 +1922,958 @@ static const struct samsung_cmu_info apm_cmu_info __initconst = { .nr_clk_regs = ARRAY_SIZE(apm_clk_regs), }; +/* ---- CMU_HSI0 ------------------------------------------------------------ */ + +/* Register Offset definitions for CMU_HSI0 (0x11000000) */ +#define PLL_LOCKTIME_PLL_USB 0x0004 +#define PLL_CON0_PLL_USB 0x0140 +#define PLL_CON1_PLL_USB 0x0144 +#define PLL_CON2_PLL_USB 0x0148 +#define PLL_CON3_PLL_USB 0x014c +#define PLL_CON4_PLL_USB 0x0150 +#define PLL_CON0_MUX_CLKCMU_HSI0_ALT_USER 0x0600 +#define PLL_CON1_MUX_CLKCMU_HSI0_ALT_USER 0x0604 +#define PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER 0x0610 +#define PLL_CON1_MUX_CLKCMU_HSI0_BUS_USER 0x0614 +#define PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER 0x0620 +#define PLL_CON1_MUX_CLKCMU_HSI0_DPGTC_USER 0x0624 +#define PLL_CON0_MUX_CLKCMU_HSI0_TCXO_USER 0x0630 +#define PLL_CON1_MUX_CLKCMU_HSI0_TCXO_USER 0x0634 +#define PLL_CON0_MUX_CLKCMU_HSI0_USB20_USER 0x0640 +#define PLL_CON1_MUX_CLKCMU_HSI0_USB20_USER 0x0644 +#define PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER 0x0650 +#define PLL_CON1_MUX_CLKCMU_HSI0_USB31DRD_USER 0x0654 +#define PLL_CON0_MUX_CLKCMU_HSI0_USPDPDBG_USER 0x0660 +#define PLL_CON1_MUX_CLKCMU_HSI0_USPDPDBG_USER 0x0664 +#define HSI0_CMU_HSI0_CONTROLLER_OPTION 0x0800 +#define CLKOUT_CON_BLK_HSI0_CMU_HSI0_CLKOUT0 0x0810 +#define CLK_CON_MUX_MUX_CLK_HSI0_BUS 0x1000 +#define CLK_CON_MUX_MUX_CLK_HSI0_USB20_REF 0x1004 +#define CLK_CON_MUX_MUX_CLK_HSI0_USB31DRD 0x1008 +#define CLK_CON_DIV_DIV_CLK_HSI0_USB31DRD 0x1800 +#define CLK_CON_GAT_CLK_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK 0x2000 +#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_SUSPEND_CLK_26 0x2004 +#define CLK_CON_GAT_CLK_HSI0_ALT 0x2008 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK 0x200c +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK 0x2010 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK 0x2014 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_ETR_MIU_IPCLKPORT_I_ACLK 0x2018 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_ETR_MIU_IPCLKPORT_I_PCLK 0x201c +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_GPC_HSI0_IPCLKPORT_PCLK 0x2020 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_G_ETR_HSI0_IPCLKPORT_I_CLK 0x2024 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_AOCHSI0_IPCLKPORT_I_CLK 0x2028 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK 0x202c +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK 0x2030 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_AXI_D_HSI0AOC_IPCLKPORT_I_CLK 0x2034 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_AOC_IPCLKPORT_ACLK 0x2038 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_AOC_IPCLKPORT_PCLK 0x203c +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS0_IPCLKPORT_ACLK 0x2040 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS0_IPCLKPORT_PCLK 0x2044 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK 0x2048 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SSMT_USB_IPCLKPORT_ACLK 0x204c +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SSMT_USB_IPCLKPORT_PCLK 0x2050 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2 0x2054 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK 0x2058 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_CTRL_IPCLKPORT_ACLK 0x205c +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_CTRL_IPCLKPORT_PCLK 0x2060 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_LINK_IPCLKPORT_ACLK 0x2064 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_LINK_IPCLKPORT_PCLK 0x2068 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL 0x206c +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY 0x2070 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB20_PHY_REFCLK_26 0x2074 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40 0x2078 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL 0x207c +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK 0x2080 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK 0x2084 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_USBDPPHY_I_ACLK 0x2088 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_USBDPPHY_UDBG_I_APB_PCLK 0x208c +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D0_HSI0_IPCLKPORT_ACLK 0x2090 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D1_HSI0_IPCLKPORT_ACLK 0x2094 +#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_P_HSI0_IPCLKPORT_ACLK 0x2098 +#define DMYQCH_CON_USB31DRD_QCH 0x3000 +#define DMYQCH_CON_USB31DRD_QCH_REF 0x3004 +#define PCH_CON_LHM_AXI_G_ETR_HSI0_PCH 0x3008 +#define PCH_CON_LHM_AXI_P_AOCHSI0_PCH 0x300c +#define PCH_CON_LHM_AXI_P_HSI0_PCH 0x3010 +#define PCH_CON_LHS_ACEL_D_HSI0_PCH 0x3014 +#define PCH_CON_LHS_AXI_D_HSI0AOC_PCH 0x3018 +#define QCH_CON_DP_LINK_QCH_GTC_CLK 0x301c +#define QCH_CON_DP_LINK_QCH_PCLK 0x3020 +#define QCH_CON_D_TZPC_HSI0_QCH 0x3024 +#define QCH_CON_ETR_MIU_QCH_ACLK 0x3028 +#define QCH_CON_ETR_MIU_QCH_PCLK 0x302c +#define QCH_CON_GPC_HSI0_QCH 0x3030 +#define QCH_CON_HSI0_CMU_HSI0_QCH 0x3034 +#define QCH_CON_LHM_AXI_G_ETR_HSI0_QCH 0x3038 +#define QCH_CON_LHM_AXI_P_AOCHSI0_QCH 0x303c +#define QCH_CON_LHM_AXI_P_HSI0_QCH 0x3040 +#define QCH_CON_LHS_ACEL_D_HSI0_QCH 0x3044 +#define QCH_CON_LHS_AXI_D_HSI0AOC_QCH 0x3048 +#define QCH_CON_PPMU_HSI0_AOC_QCH 0x304c +#define QCH_CON_PPMU_HSI0_BUS0_QCH 0x3050 +#define QCH_CON_SSMT_USB_QCH 0x3054 +#define QCH_CON_SYSMMU_USB_QCH 0x3058 +#define QCH_CON_SYSREG_HSI0_QCH 0x305c +#define QCH_CON_UASC_HSI0_CTRL_QCH 0x3060 +#define QCH_CON_UASC_HSI0_LINK_QCH 0x3064 +#define QCH_CON_USB31DRD_QCH_APB 0x3068 +#define QCH_CON_USB31DRD_QCH_DBG 0x306c +#define QCH_CON_USB31DRD_QCH_PCS 0x3070 +#define QCH_CON_USB31DRD_QCH_SLV_CTRL 0x3074 +#define QCH_CON_USB31DRD_QCH_SLV_LINK 0x3078 +#define QUEUE_CTRL_REG_BLK_HSI0_CMU_HSI0 0x3c00 + +static const unsigned long hsi0_clk_regs[] __initconst = { + PLL_LOCKTIME_PLL_USB, + PLL_CON0_PLL_USB, + PLL_CON1_PLL_USB, + PLL_CON2_PLL_USB, + PLL_CON3_PLL_USB, + PLL_CON4_PLL_USB, + PLL_CON0_MUX_CLKCMU_HSI0_ALT_USER, + PLL_CON1_MUX_CLKCMU_HSI0_ALT_USER, + PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER, + PLL_CON1_MUX_CLKCMU_HSI0_BUS_USER, + PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER, + PLL_CON1_MUX_CLKCMU_HSI0_DPGTC_USER, + PLL_CON0_MUX_CLKCMU_HSI0_TCXO_USER, + PLL_CON1_MUX_CLKCMU_HSI0_TCXO_USER, + PLL_CON0_MUX_CLKCMU_HSI0_USB20_USER, + PLL_CON1_MUX_CLKCMU_HSI0_USB20_USER, + PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER, + PLL_CON1_MUX_CLKCMU_HSI0_USB31DRD_USER, + PLL_CON0_MUX_CLKCMU_HSI0_USPDPDBG_USER, + PLL_CON1_MUX_CLKCMU_HSI0_USPDPDBG_USER, + HSI0_CMU_HSI0_CONTROLLER_OPTION, + CLKOUT_CON_BLK_HSI0_CMU_HSI0_CLKOUT0, + CLK_CON_MUX_MUX_CLK_HSI0_BUS, + CLK_CON_MUX_MUX_CLK_HSI0_USB20_REF, + CLK_CON_MUX_MUX_CLK_HSI0_USB31DRD, + CLK_CON_DIV_DIV_CLK_HSI0_USB31DRD, + CLK_CON_GAT_CLK_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK, + CLK_CON_GAT_CLK_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_SUSPEND_CLK_26, + CLK_CON_GAT_CLK_HSI0_ALT, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_ETR_MIU_IPCLKPORT_I_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_ETR_MIU_IPCLKPORT_I_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_GPC_HSI0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_G_ETR_HSI0_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_AOCHSI0_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_AXI_D_HSI0AOC_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_AOC_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_AOC_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS0_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_SSMT_USB_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_SSMT_USB_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_CTRL_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_CTRL_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_LINK_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_LINK_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB20_PHY_REFCLK_26, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_USBDPPHY_I_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_USBDPPHY_UDBG_I_APB_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D0_HSI0_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D1_HSI0_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_P_HSI0_IPCLKPORT_ACLK, + DMYQCH_CON_USB31DRD_QCH, + DMYQCH_CON_USB31DRD_QCH_REF, + PCH_CON_LHM_AXI_G_ETR_HSI0_PCH, + PCH_CON_LHM_AXI_P_AOCHSI0_PCH, + PCH_CON_LHM_AXI_P_HSI0_PCH, + PCH_CON_LHS_ACEL_D_HSI0_PCH, + PCH_CON_LHS_AXI_D_HSI0AOC_PCH, + QCH_CON_DP_LINK_QCH_GTC_CLK, + QCH_CON_DP_LINK_QCH_PCLK, + QCH_CON_D_TZPC_HSI0_QCH, + QCH_CON_ETR_MIU_QCH_ACLK, + QCH_CON_ETR_MIU_QCH_PCLK, + QCH_CON_GPC_HSI0_QCH, + QCH_CON_HSI0_CMU_HSI0_QCH, + QCH_CON_LHM_AXI_G_ETR_HSI0_QCH, + QCH_CON_LHM_AXI_P_AOCHSI0_QCH, + QCH_CON_LHM_AXI_P_HSI0_QCH, + QCH_CON_LHS_ACEL_D_HSI0_QCH, + QCH_CON_LHS_AXI_D_HSI0AOC_QCH, + QCH_CON_PPMU_HSI0_AOC_QCH, + QCH_CON_PPMU_HSI0_BUS0_QCH, + QCH_CON_SSMT_USB_QCH, + QCH_CON_SYSMMU_USB_QCH, + QCH_CON_SYSREG_HSI0_QCH, + QCH_CON_UASC_HSI0_CTRL_QCH, + QCH_CON_UASC_HSI0_LINK_QCH, + QCH_CON_USB31DRD_QCH_APB, + QCH_CON_USB31DRD_QCH_DBG, + QCH_CON_USB31DRD_QCH_PCS, + QCH_CON_USB31DRD_QCH_SLV_CTRL, + QCH_CON_USB31DRD_QCH_SLV_LINK, + QUEUE_CTRL_REG_BLK_HSI0_CMU_HSI0, +}; + +/* List of parent clocks for Muxes in CMU_HSI0 */ +PNAME(mout_pll_usb_p) = { "oscclk", "fout_usb_pll" }; +PNAME(mout_hsi0_alt_user_p) = { "oscclk", + "gout_hsi0_clk_hsi0_alt" }; +PNAME(mout_hsi0_bus_user_p) = { "oscclk", "dout_cmu_hsi0_bus" }; +PNAME(mout_hsi0_dpgtc_user_p) = { "oscclk", "dout_cmu_hsi0_dpgtc" }; +PNAME(mout_hsi0_tcxo_user_p) = { "oscclk", "tcxo_hsi1_hsi0" }; +PNAME(mout_hsi0_usb20_user_p) = { "oscclk", "usb20phy_phy_clock" }; +PNAME(mout_hsi0_usb31drd_user_p) = { "oscclk", + "dout_cmu_hsi0_usb31drd" }; +PNAME(mout_hsi0_usbdpdbg_user_p) = { "oscclk", + "dout_cmu_hsi0_usbdpdbg" }; +PNAME(mout_hsi0_bus_p) = { "mout_hsi0_bus_user", + "mout_hsi0_alt_user" }; +PNAME(mout_hsi0_usb20_ref_p) = { "fout_usb_pll", + "mout_hsi0_tcxo_user" }; +PNAME(mout_hsi0_usb31drd_p) = { "fout_usb_pll", + "mout_hsi0_usb31drd_user", + "dout_hsi0_usb31drd", + "fout_usb_pll" }; + +static const struct samsung_pll_rate_table cmu_hsi0_usb_pll_rates[] __initconst = { + PLL_35XX_RATE(24576000, 19200000, 150, 6, 5), + { /* sentinel */ } +}; + +static const struct samsung_pll_clock cmu_hsi0_pll_clks[] __initconst = { + PLL(pll_0518x, CLK_FOUT_USB_PLL, "fout_usb_pll", "oscclk", + PLL_LOCKTIME_PLL_USB, PLL_CON3_PLL_USB, + cmu_hsi0_usb_pll_rates), +}; + +static const struct samsung_mux_clock hsi0_mux_clks[] __initconst = { + MUX(CLK_MOUT_PLL_USB, + "mout_pll_usb", mout_pll_usb_p, + PLL_CON0_PLL_USB, 4, 1), + MUX(CLK_MOUT_HSI0_ALT_USER, + "mout_hsi0_alt_user", mout_hsi0_alt_user_p, + PLL_CON0_MUX_CLKCMU_HSI0_ALT_USER, 4, 1), + MUX(CLK_MOUT_HSI0_BUS_USER, + "mout_hsi0_bus_user", mout_hsi0_bus_user_p, + PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER, 4, 1), + MUX(CLK_MOUT_HSI0_DPGTC_USER, + "mout_hsi0_dpgtc_user", mout_hsi0_dpgtc_user_p, + PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER, 4, 1), + MUX(CLK_MOUT_HSI0_TCXO_USER, + "mout_hsi0_tcxo_user", mout_hsi0_tcxo_user_p, + PLL_CON0_MUX_CLKCMU_HSI0_TCXO_USER, 4, 1), + MUX(CLK_MOUT_HSI0_USB20_USER, + "mout_hsi0_usb20_user", mout_hsi0_usb20_user_p, + PLL_CON0_MUX_CLKCMU_HSI0_USB20_USER, 4, 1), + MUX(CLK_MOUT_HSI0_USB31DRD_USER, + "mout_hsi0_usb31drd_user", mout_hsi0_usb31drd_user_p, + PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER, 4, 1), + MUX(CLK_MOUT_HSI0_USBDPDBG_USER, + "mout_hsi0_usbdpdbg_user", mout_hsi0_usbdpdbg_user_p, + PLL_CON0_MUX_CLKCMU_HSI0_USPDPDBG_USER, 4, 1), + MUX(CLK_MOUT_HSI0_BUS, + "mout_hsi0_bus", mout_hsi0_bus_p, + CLK_CON_MUX_MUX_CLK_HSI0_BUS, 0, 1), + MUX(CLK_MOUT_HSI0_USB20_REF, + "mout_hsi0_usb20_ref", mout_hsi0_usb20_ref_p, + CLK_CON_MUX_MUX_CLK_HSI0_USB20_REF, 0, 1), + MUX(CLK_MOUT_HSI0_USB31DRD, + "mout_hsi0_usb31drd", mout_hsi0_usb31drd_p, + CLK_CON_MUX_MUX_CLK_HSI0_USB31DRD, 0, 2), +}; + +static const struct samsung_div_clock hsi0_div_clks[] __initconst = { + DIV(CLK_DOUT_HSI0_USB31DRD, + "dout_hsi0_usb31drd", "mout_hsi0_usb20_user", + CLK_CON_DIV_DIV_CLK_HSI0_USB31DRD, 0, 3), +}; + +static const struct samsung_gate_clock hsi0_gate_clks[] __initconst = { + /* TODO: should have a driver for this */ + GATE(CLK_GOUT_HSI0_PCLK, + "gout_hsi0_hsi0_pclk", "mout_hsi0_bus", + CLK_CON_GAT_CLK_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_HSI0_USB31DRD_I_USB31DRD_SUSPEND_CLK_26, + "gout_hsi0_usb31drd_i_usb31drd_suspend_clk_26", + "mout_hsi0_usb20_ref", + CLK_CON_GAT_CLK_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_SUSPEND_CLK_26, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_CLK_HSI0_ALT, + "gout_hsi0_clk_hsi0_alt", "ioclk_clk_hsi0_alt", + CLK_CON_GAT_CLK_HSI0_ALT, 21, 0, 0), + GATE(CLK_GOUT_HSI0_DP_LINK_I_DP_GTC_CLK, + "gout_hsi0_dp_link_i_dp_gtc_clk", "mout_hsi0_dpgtc_user", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_DP_LINK_I_PCLK, + "gout_hsi0_dp_link_i_pclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_HSI0_D_TZPC_HSI0_PCLK, + "gout_hsi0_d_tzpc_hsi0_pclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_ETR_MIU_I_ACLK, + "gout_hsi0_etr_miu_i_aclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_ETR_MIU_IPCLKPORT_I_ACLK, 21, 0, 0), + GATE(CLK_GOUT_HSI0_ETR_MIU_I_PCLK, + "gout_hsi0_etr_miu_i_pclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_ETR_MIU_IPCLKPORT_I_PCLK, 21, 0, 0), + GATE(CLK_GOUT_HSI0_GPC_HSI0_PCLK, + "gout_hsi0_gpc_hsi0_pclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_GPC_HSI0_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_HSI0_LHM_AXI_G_ETR_HSI0_I_CLK, + "gout_hsi0_lhm_axi_g_etr_hsi0_i_clk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_G_ETR_HSI0_IPCLKPORT_I_CLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_LHM_AXI_P_AOCHSI0_I_CLK, + "gout_hsi0_lhm_axi_p_aochsi0_i_clk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_AOCHSI0_IPCLKPORT_I_CLK, + 21, 0, 0), + /* TODO: should have a driver for this */ + GATE(CLK_GOUT_HSI0_LHM_AXI_P_HSI0_I_CLK, + "gout_hsi0_lhm_axi_p_hsi0_i_clk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK, + 21, CLK_IGNORE_UNUSED, 0), + /* TODO: should have a driver for this */ + GATE(CLK_GOUT_HSI0_LHS_ACEL_D_HSI0_I_CLK, + "gout_hsi0_lhs_acel_d_hsi0_i_clk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_HSI0_LHS_AXI_D_HSI0AOC_I_CLK, + "gout_hsi0_lhs_axi_d_hsi0aoc_i_clk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_AXI_D_HSI0AOC_IPCLKPORT_I_CLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_PPMU_HSI0_AOC_ACLK, + "gout_hsi0_ppmu_hsi0_aoc_aclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_AOC_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_PPMU_HSI0_AOC_PCLK, + "gout_hsi0_ppmu_hsi0_aoc_pclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_AOC_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_PPMU_HSI0_BUS0_ACLK, + "gout_hsi0_ppmu_hsi0_bus0_aclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS0_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_PPMU_HSI0_BUS0_PCLK, + "gout_hsi0_ppmu_hsi0_bus0_pclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS0_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_CLK_HSI0_BUS_CLK, + "gout_hsi0_clk_hsi0_bus_clk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK, + 21, 0, 0), + /* TODO: should have a driver for this */ + GATE(CLK_GOUT_HSI0_SSMT_USB_ACLK, + "gout_hsi0_ssmt_usb_aclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_SSMT_USB_IPCLKPORT_ACLK, + 21, CLK_IGNORE_UNUSED, 0), + /* TODO: should have a driver for this */ + GATE(CLK_GOUT_HSI0_SSMT_USB_PCLK, + "gout_hsi0_ssmt_usb_pclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_SSMT_USB_IPCLKPORT_PCLK, + 21, CLK_IGNORE_UNUSED, 0), + /* TODO: should have a driver for this */ + GATE(CLK_GOUT_HSI0_SYSMMU_USB_CLK_S2, + "gout_hsi0_sysmmu_usb_clk_s2", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_HSI0_SYSREG_HSI0_PCLK, + "gout_hsi0_sysreg_hsi0_pclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_UASC_HSI0_CTRL_ACLK, + "gout_hsi0_uasc_hsi0_ctrl_aclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_CTRL_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_UASC_HSI0_CTRL_PCLK, + "gout_hsi0_uasc_hsi0_ctrl_pclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_CTRL_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_UASC_HSI0_LINK_ACLK, + "gout_hsi0_uasc_hsi0_link_aclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_LINK_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_UASC_HSI0_LINK_PCLK, + "gout_hsi0_uasc_hsi0_link_pclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_UASC_HSI0_LINK_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_USB31DRD_ACLK_PHYCTRL, + "gout_hsi0_usb31drd_aclk_phyctrl", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_USB31DRD_BUS_CLK_EARLY, + "gout_hsi0_usb31drd_bus_clk_early", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_USB31DRD_I_USB20_PHY_REFCLK_26, + "gout_hsi0_usb31drd_i_usb20_phy_refclk_26", "mout_hsi0_usb20_ref", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB20_PHY_REFCLK_26, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_USB31DRD_I_USB31DRD_REF_CLK_40, + "gout_hsi0_usb31drd_i_usb31drd_ref_clk_40", "mout_hsi0_usb31drd", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_USB31DRD_I_USBDPPHY_REF_SOC_PLL, + "gout_hsi0_usb31drd_i_usbdpphy_ref_soc_pll", + "mout_hsi0_usbdpdbg_user", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_USB31DRD_I_USBDPPHY_SCL_APB_PCLK, + "gout_hsi0_usb31drd_i_usbdpphy_scl_apb_pclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_USB31DRD_I_USBPCS_APB_CLK, + "gout_hsi0_usb31drd_i_usbpcs_apb_clk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_USB31DRD_USBDPPHY_I_ACLK, + "gout_hsi0_usb31drd_usbdpphy_i_aclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_USBDPPHY_I_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI0_USB31DRD_USBDPPHY_UDBG_I_APB_PCLK, + "gout_hsi0_usb31drd_usbdpphy_udbg_i_apb_pclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_USBDPPHY_UDBG_I_APB_PCLK, + 21, 0, 0), + /* TODO: should have a driver for this */ + GATE(CLK_GOUT_HSI0_XIU_D0_HSI0_ACLK, + "gout_hsi0_xiu_d0_hsi0_aclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D0_HSI0_IPCLKPORT_ACLK, + 21, CLK_IGNORE_UNUSED, 0), + /* TODO: should have a driver for this */ + GATE(CLK_GOUT_HSI0_XIU_D1_HSI0_ACLK, + "gout_hsi0_xiu_d1_hsi0_aclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D1_HSI0_IPCLKPORT_ACLK, + 21, CLK_IGNORE_UNUSED, 0), + /* TODO: should have a driver for this */ + GATE(CLK_GOUT_HSI0_XIU_P_HSI0_ACLK, + "gout_hsi0_xiu_p_hsi0_aclk", "mout_hsi0_bus", + CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_P_HSI0_IPCLKPORT_ACLK, + 21, CLK_IGNORE_UNUSED, 0), +}; + +static const struct samsung_fixed_rate_clock hsi0_fixed_clks[] __initconst = { + FRATE(0, "tcxo_hsi1_hsi0", NULL, 0, 26000000), + FRATE(0, "usb20phy_phy_clock", NULL, 0, 120000000), + /* until we implement APMGSA */ + FRATE(0, "ioclk_clk_hsi0_alt", NULL, 0, 213000000), +}; + +static const struct samsung_cmu_info hsi0_cmu_info __initconst = { + .pll_clks = cmu_hsi0_pll_clks, + .nr_pll_clks = ARRAY_SIZE(cmu_hsi0_pll_clks), + .mux_clks = hsi0_mux_clks, + .nr_mux_clks = ARRAY_SIZE(hsi0_mux_clks), + .div_clks = hsi0_div_clks, + .nr_div_clks = ARRAY_SIZE(hsi0_div_clks), + .gate_clks = hsi0_gate_clks, + .nr_gate_clks = ARRAY_SIZE(hsi0_gate_clks), + .fixed_clks = hsi0_fixed_clks, + .nr_fixed_clks = ARRAY_SIZE(hsi0_fixed_clks), + .nr_clk_ids = CLKS_NR_HSI0, + .clk_regs = hsi0_clk_regs, + .nr_clk_regs = ARRAY_SIZE(hsi0_clk_regs), + .clk_name = "bus", +}; + +/* ---- CMU_HSI2 ------------------------------------------------------------ */ + +/* Register Offset definitions for CMU_HSI2 (0x14400000) */ +#define PLL_CON0_MUX_CLKCMU_HSI2_BUS_USER 0x0600 +#define PLL_CON1_MUX_CLKCMU_HSI2_BUS_USER 0x0604 +#define PLL_CON0_MUX_CLKCMU_HSI2_MMC_CARD_USER 0x0610 +#define PLL_CON1_MUX_CLKCMU_HSI2_MMC_CARD_USER 0x0614 +#define PLL_CON0_MUX_CLKCMU_HSI2_PCIE_USER 0x0620 +#define PLL_CON1_MUX_CLKCMU_HSI2_PCIE_USER 0x0624 +#define PLL_CON0_MUX_CLKCMU_HSI2_UFS_EMBD_USER 0x0630 +#define PLL_CON1_MUX_CLKCMU_HSI2_UFS_EMBD_USER 0x0634 +#define HSI2_CMU_HSI2_CONTROLLER_OPTION 0x0800 +#define CLKOUT_CON_BLK_HSI2_CMU_HSI2_CLKOUT0 0x0810 +#define CLK_CON_GAT_CLK_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_PCIE_SUB_CTRL_INST_0_PHY_REFCLK_IN 0x2000 +#define CLK_CON_GAT_CLK_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_PCIE_SUB_CTRL_INST_0_PHY_REFCLK_IN 0x2004 +#define CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4A_1_IPCLKPORT_ACLK 0x2008 +#define CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4A_1_IPCLKPORT_PCLK 0x200c +#define CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4B_1_IPCLKPORT_ACLK 0x2010 +#define CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4B_1_IPCLKPORT_PCLK 0x2014 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_D_TZPC_HSI2_IPCLKPORT_PCLK 0x201c +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_GPC_HSI2_IPCLKPORT_PCLK 0x2020 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_GPIO_HSI2_IPCLKPORT_PCLK 0x2024 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_HSI2_CMU_HSI2_IPCLKPORT_PCLK 0x2028 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_LHM_AXI_P_HSI2_IPCLKPORT_I_CLK 0x202c +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_LHS_ACEL_D_HSI2_IPCLKPORT_I_CLK 0x2030 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_MMC_CARD_IPCLKPORT_I_ACLK 0x2034 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_MMC_CARD_IPCLKPORT_SDCLKIN 0x2038 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG 0x203c +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG 0x2040 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG 0x2044 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK 0x2048 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG 0x204c +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG 0x2050 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG 0x2054 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK 0x2058 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_PHY_UDBG_I_APB_PCLK 0x205c +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_PIPE_PAL_PCIE_INST_0_I_APB_PCLK 0x2060 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_SF_PCIEPHY210X2_LN05LPE_QCH_TM_WRAPPER_INST_0_I_APB_PCLK 0x2064 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_IA_GEN4A_1_IPCLKPORT_I_CLK 0x2068 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_IA_GEN4B_1_IPCLKPORT_I_CLK 0x206c +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PPMU_HSI2_IPCLKPORT_ACLK 0x2070 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_PPMU_HSI2_IPCLKPORT_PCLK 0x2074 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_MMC_CARD_HSI2_IPCLKPORT_ACLK 0x2078 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_MMC_CARD_HSI2_IPCLKPORT_PCLK 0x207c +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4A_HSI2_IPCLKPORT_ACLK 0x2080 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4A_HSI2_IPCLKPORT_PCLK 0x2084 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4B_HSI2_IPCLKPORT_ACLK 0x2088 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4B_HSI2_IPCLKPORT_PCLK 0x208c +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_ACLK 0x2090 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_PCLK 0x2094 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_RSTNSYNC_CLK_HSI2_BUS_IPCLKPORT_CLK 0x2098 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_RSTNSYNC_CLK_HSI2_OSCCLK_IPCLKPORT_CLK 0x209c +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_SSMT_HSI2_IPCLKPORT_ACLK 0x20a0 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_SSMT_HSI2_IPCLKPORT_PCLK 0x20a4 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_SYSMMU_HSI2_IPCLKPORT_CLK_S2 0x20a8 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_SYSREG_HSI2_IPCLKPORT_PCLK 0x20ac +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_DBI_1_IPCLKPORT_ACLK 0x20b0 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_DBI_1_IPCLKPORT_PCLK 0x20b4 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_SLV_1_IPCLKPORT_ACLK 0x20b8 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_SLV_1_IPCLKPORT_PCLK 0x20bc +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_DBI_1_IPCLKPORT_ACLK 0x20c0 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_DBI_1_IPCLKPORT_PCLK 0x20c4 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_SLV_1_IPCLKPORT_ACLK 0x20c8 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_SLV_1_IPCLKPORT_PCLK 0x20cc +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_ACLK 0x20d0 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO 0x20d4 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK 0x20d8 +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_XIU_D_HSI2_IPCLKPORT_ACLK 0x20dc +#define CLK_CON_GAT_GOUT_BLK_HSI2_UID_XIU_P_HSI2_IPCLKPORT_ACLK 0x20e0 +#define DMYQCH_CON_PCIE_GEN4_1_QCH_SCLK_1 0x3000 +#define PCH_CON_LHM_AXI_P_HSI2_PCH 0x3008 +#define PCH_CON_LHS_ACEL_D_HSI2_PCH 0x300c +#define QCH_CON_D_TZPC_HSI2_QCH 0x3010 +#define QCH_CON_GPC_HSI2_QCH 0x3014 +#define QCH_CON_GPIO_HSI2_QCH 0x3018 +#define QCH_CON_HSI2_CMU_HSI2_QCH 0x301c +#define QCH_CON_LHM_AXI_P_HSI2_QCH 0x3020 +#define QCH_CON_LHS_ACEL_D_HSI2_QCH 0x3024 +#define QCH_CON_MMC_CARD_QCH 0x3028 +#define QCH_CON_PCIE_GEN4_1_QCH_APB_1 0x302c +#define QCH_CON_PCIE_GEN4_1_QCH_APB_2 0x3030 +#define QCH_CON_PCIE_GEN4_1_QCH_AXI_1 0x3034 +#define QCH_CON_PCIE_GEN4_1_QCH_AXI_2 0x3038 +#define QCH_CON_PCIE_GEN4_1_QCH_DBG_1 0x303c +#define QCH_CON_PCIE_GEN4_1_QCH_DBG_2 0x3040 +#define QCH_CON_PCIE_GEN4_1_QCH_PCS_APB 0x3044 +#define QCH_CON_PCIE_GEN4_1_QCH_PMA_APB 0x3048 +#define QCH_CON_PCIE_GEN4_1_QCH_UDBG 0x304c +#define QCH_CON_PCIE_IA_GEN4A_1_QCH 0x3050 +#define QCH_CON_PCIE_IA_GEN4B_1_QCH 0x3054 +#define QCH_CON_PPMU_HSI2_QCH 0x3058 +#define QCH_CON_QE_MMC_CARD_HSI2_QCH 0x305c +#define QCH_CON_QE_PCIE_GEN4A_HSI2_QCH 0x3060 +#define QCH_CON_QE_PCIE_GEN4B_HSI2_QCH 0x3064 +#define QCH_CON_QE_UFS_EMBD_HSI2_QCH 0x3068 +#define QCH_CON_SSMT_HSI2_QCH 0x306c +#define QCH_CON_SSMT_PCIE_IA_GEN4A_1_QCH 0x3070 +#define QCH_CON_SSMT_PCIE_IA_GEN4B_1_QCH 0x3074 +#define QCH_CON_SYSMMU_HSI2_QCH 0x3078 +#define QCH_CON_SYSREG_HSI2_QCH 0x307c +#define QCH_CON_UASC_PCIE_GEN4A_DBI_1_QCH 0x3080 +#define QCH_CON_UASC_PCIE_GEN4A_SLV_1_QCH 0x3084 +#define QCH_CON_UASC_PCIE_GEN4B_DBI_1_QCH 0x3088 +#define QCH_CON_UASC_PCIE_GEN4B_SLV_1_QCH 0x308c +#define QCH_CON_UFS_EMBD_QCH 0x3090 +#define QCH_CON_UFS_EMBD_QCH_FMP 0x3094 +#define QUEUE_CTRL_REG_BLK_HSI2_CMU_HSI2 0x3c00 + +static const unsigned long cmu_hsi2_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_HSI2_BUS_USER, + PLL_CON1_MUX_CLKCMU_HSI2_BUS_USER, + PLL_CON0_MUX_CLKCMU_HSI2_MMC_CARD_USER, + PLL_CON1_MUX_CLKCMU_HSI2_MMC_CARD_USER, + PLL_CON0_MUX_CLKCMU_HSI2_PCIE_USER, + PLL_CON1_MUX_CLKCMU_HSI2_PCIE_USER, + PLL_CON0_MUX_CLKCMU_HSI2_UFS_EMBD_USER, + PLL_CON1_MUX_CLKCMU_HSI2_UFS_EMBD_USER, + HSI2_CMU_HSI2_CONTROLLER_OPTION, + CLKOUT_CON_BLK_HSI2_CMU_HSI2_CLKOUT0, + CLK_CON_GAT_CLK_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_PCIE_SUB_CTRL_INST_0_PHY_REFCLK_IN, + CLK_CON_GAT_CLK_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_PCIE_SUB_CTRL_INST_0_PHY_REFCLK_IN, + CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4A_1_IPCLKPORT_ACLK, + CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4A_1_IPCLKPORT_PCLK, + CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4B_1_IPCLKPORT_ACLK, + CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4B_1_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_D_TZPC_HSI2_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_GPC_HSI2_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_GPIO_HSI2_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_HSI2_CMU_HSI2_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_LHM_AXI_P_HSI2_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_LHS_ACEL_D_HSI2_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_MMC_CARD_IPCLKPORT_I_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_MMC_CARD_IPCLKPORT_SDCLKIN, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_PHY_UDBG_I_APB_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_PIPE_PAL_PCIE_INST_0_I_APB_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_SF_PCIEPHY210X2_LN05LPE_QCH_TM_WRAPPER_INST_0_I_APB_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_IA_GEN4A_1_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_IA_GEN4B_1_IPCLKPORT_I_CLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PPMU_HSI2_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PPMU_HSI2_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_MMC_CARD_HSI2_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_MMC_CARD_HSI2_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4A_HSI2_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4A_HSI2_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4B_HSI2_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4B_HSI2_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_RSTNSYNC_CLK_HSI2_BUS_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_RSTNSYNC_CLK_HSI2_OSCCLK_IPCLKPORT_CLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_SSMT_HSI2_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_SSMT_HSI2_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_SYSMMU_HSI2_IPCLKPORT_CLK_S2, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_SYSREG_HSI2_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_DBI_1_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_DBI_1_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_SLV_1_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_SLV_1_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_DBI_1_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_DBI_1_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_SLV_1_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_SLV_1_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_XIU_D_HSI2_IPCLKPORT_ACLK, + CLK_CON_GAT_GOUT_BLK_HSI2_UID_XIU_P_HSI2_IPCLKPORT_ACLK, + DMYQCH_CON_PCIE_GEN4_1_QCH_SCLK_1, + PCH_CON_LHM_AXI_P_HSI2_PCH, + PCH_CON_LHS_ACEL_D_HSI2_PCH, + QCH_CON_D_TZPC_HSI2_QCH, + QCH_CON_GPC_HSI2_QCH, + QCH_CON_GPIO_HSI2_QCH, + QCH_CON_HSI2_CMU_HSI2_QCH, + QCH_CON_LHM_AXI_P_HSI2_QCH, + QCH_CON_LHS_ACEL_D_HSI2_QCH, + QCH_CON_MMC_CARD_QCH, + QCH_CON_PCIE_GEN4_1_QCH_APB_1, + QCH_CON_PCIE_GEN4_1_QCH_APB_2, + QCH_CON_PCIE_GEN4_1_QCH_AXI_1, + QCH_CON_PCIE_GEN4_1_QCH_AXI_2, + QCH_CON_PCIE_GEN4_1_QCH_DBG_1, + QCH_CON_PCIE_GEN4_1_QCH_DBG_2, + QCH_CON_PCIE_GEN4_1_QCH_PCS_APB, + QCH_CON_PCIE_GEN4_1_QCH_PMA_APB, + QCH_CON_PCIE_GEN4_1_QCH_UDBG, + QCH_CON_PCIE_IA_GEN4A_1_QCH, + QCH_CON_PCIE_IA_GEN4B_1_QCH, + QCH_CON_PPMU_HSI2_QCH, + QCH_CON_QE_MMC_CARD_HSI2_QCH, + QCH_CON_QE_PCIE_GEN4A_HSI2_QCH, + QCH_CON_QE_PCIE_GEN4B_HSI2_QCH, + QCH_CON_QE_UFS_EMBD_HSI2_QCH, + QCH_CON_SSMT_HSI2_QCH, + QCH_CON_SSMT_PCIE_IA_GEN4A_1_QCH, + QCH_CON_SSMT_PCIE_IA_GEN4B_1_QCH, + QCH_CON_SYSMMU_HSI2_QCH, + QCH_CON_SYSREG_HSI2_QCH, + QCH_CON_UASC_PCIE_GEN4A_DBI_1_QCH, + QCH_CON_UASC_PCIE_GEN4A_SLV_1_QCH, + QCH_CON_UASC_PCIE_GEN4B_DBI_1_QCH, + QCH_CON_UASC_PCIE_GEN4B_SLV_1_QCH, + QCH_CON_UFS_EMBD_QCH, + QCH_CON_UFS_EMBD_QCH_FMP, + QUEUE_CTRL_REG_BLK_HSI2_CMU_HSI2, +}; + +PNAME(mout_hsi2_bus_user_p) = { "oscclk", "dout_cmu_hsi2_bus" }; +PNAME(mout_hsi2_mmc_card_user_p) = { "oscclk", "dout_cmu_hsi2_mmc_card" }; +PNAME(mout_hsi2_pcie_user_p) = { "oscclk", "dout_cmu_hsi2_pcie" }; +PNAME(mout_hsi2_ufs_embd_user_p) = { "oscclk", "dout_cmu_hsi2_ufs_embd" }; + +static const struct samsung_mux_clock hsi2_mux_clks[] __initconst = { + MUX(CLK_MOUT_HSI2_BUS_USER, "mout_hsi2_bus_user", mout_hsi2_bus_user_p, + PLL_CON0_MUX_CLKCMU_HSI2_BUS_USER, 4, 1), + MUX(CLK_MOUT_HSI2_MMC_CARD_USER, "mout_hsi2_mmc_card_user", + mout_hsi2_mmc_card_user_p, PLL_CON0_MUX_CLKCMU_HSI2_MMC_CARD_USER, + 4, 1), + MUX(CLK_MOUT_HSI2_PCIE_USER, "mout_hsi2_pcie_user", + mout_hsi2_pcie_user_p, PLL_CON0_MUX_CLKCMU_HSI2_PCIE_USER, + 4, 1), + MUX(CLK_MOUT_HSI2_UFS_EMBD_USER, "mout_hsi2_ufs_embd_user", + mout_hsi2_ufs_embd_user_p, PLL_CON0_MUX_CLKCMU_HSI2_UFS_EMBD_USER, + 4, 1), +}; + +static const struct samsung_gate_clock hsi2_gate_clks[] __initconst = { + GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_PHY_REFCLK_IN, + "gout_hsi2_pcie_gen4_1_pcie_003_phy_refclk_in", + "mout_hsi2_pcie_user", + CLK_CON_GAT_CLK_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_PCIE_SUB_CTRL_INST_0_PHY_REFCLK_IN, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_PHY_REFCLK_IN, + "gout_hsi2_pcie_gen4_1_pcie_004_phy_refclk_in", + "mout_hsi2_pcie_user", + CLK_CON_GAT_CLK_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_PCIE_SUB_CTRL_INST_0_PHY_REFCLK_IN, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4A_1_ACLK, + "gout_hsi2_ssmt_pcie_ia_gen4a_1_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4A_1_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4A_1_PCLK, + "gout_hsi2_ssmt_pcie_ia_gen4a_1_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4A_1_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4B_1_ACLK, + "gout_hsi2_ssmt_pcie_ia_gen4b_1_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4B_1_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4B_1_PCLK, + "gout_hsi2_ssmt_pcie_ia_gen4b_1_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_CLK_BLK_HSI2_UID_SSMT_PCIE_IA_GEN4B_1_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_D_TZPC_HSI2_PCLK, + "gout_hsi2_d_tzpc_hsi2_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_D_TZPC_HSI2_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_GPC_HSI2_PCLK, + "gout_hsi2_gpc_hsi2_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_GPC_HSI2_IPCLKPORT_PCLK, 21, 0, 0), + GATE(CLK_GOUT_HSI2_GPIO_HSI2_PCLK, + "gout_hsi2_gpio_hsi2_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_GPIO_HSI2_IPCLKPORT_PCLK, 21, + CLK_IGNORE_UNUSED, 0), + /* Disabling this clock makes the system hang. Mark the clock as critical. */ + GATE(CLK_GOUT_HSI2_HSI2_CMU_HSI2_PCLK, + "gout_hsi2_hsi2_cmu_hsi2_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_HSI2_CMU_HSI2_IPCLKPORT_PCLK, + 21, CLK_IS_CRITICAL, 0), + /* Disabling this clock makes the system hang. Mark the clock as critical. */ + GATE(CLK_GOUT_HSI2_LHM_AXI_P_HSI2_I_CLK, + "gout_hsi2_lhm_axi_p_hsi2_i_clk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_LHM_AXI_P_HSI2_IPCLKPORT_I_CLK, + 21, CLK_IS_CRITICAL, 0), + /* TODO: should have a driver for this */ + GATE(CLK_GOUT_HSI2_LHS_ACEL_D_HSI2_I_CLK, + "gout_hsi2_lhs_acel_d_hsi2_i_clk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_LHS_ACEL_D_HSI2_IPCLKPORT_I_CLK, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_HSI2_MMC_CARD_I_ACLK, + "gout_hsi2_mmc_card_i_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_MMC_CARD_IPCLKPORT_I_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_MMC_CARD_SDCLKIN, + "gout_hsi2_mmc_card_sdclkin", "mout_hsi2_mmc_card_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_MMC_CARD_IPCLKPORT_SDCLKIN, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_DBI_ACLK_UG, + "gout_hsi2_pcie_gen4_1_pcie_003_dbi_aclk_ug", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_MSTR_ACLK_UG, + "gout_hsi2_pcie_gen4_1_pcie_003_mstr_aclk_ug", + "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_SLV_ACLK_UG, + "gout_hsi2_pcie_gen4_1_pcie_003_slv_aclk_ug", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_G4X2_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_I_DRIVER_APB_CLK, + "gout_hsi2_pcie_gen4_1_pcie_003_i_driver_apb_clk", + "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_003_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_DBI_ACLK_UG, + "gout_hsi2_pcie_gen4_1_pcie_004_dbi_aclk_ug", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_MSTR_ACLK_UG, + "gout_hsi2_pcie_gen4_1_pcie_004_mstr_aclk_ug", + "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_SLV_ACLK_UG, + "gout_hsi2_pcie_gen4_1_pcie_004_slv_aclk_ug", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_G4X1_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_I_DRIVER_APB_CLK, + "gout_hsi2_pcie_gen4_1_pcie_004_i_driver_apb_clk", + "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCIE_004_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCS_PMA_PHY_UDBG_I_APB_PCLK, + "gout_hsi2_pcie_gen4_1_pcs_pma_phy_udbg_i_apb_pclk", + "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_PHY_UDBG_I_APB_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCS_PMA_PIPE_PAL_PCIE_I_APB_PCLK, + "gout_hsi2_pcie_gen4_1_pcs_pma_pipe_pal_pcie_i_apb_pclk", + "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_PIPE_PAL_PCIE_INST_0_I_APB_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PCIE_GEN4_1_PCS_PMA_PCIEPHY210X2_QCH_I_APB_PCLK, + "gout_hsi2_pcie_gen4_1_pcs_pma_pciephy210x2_qch_i_apb_pclk", + "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_GEN4_1_IPCLKPORT_PCS_PMA_INST_0_SF_PCIEPHY210X2_LN05LPE_QCH_TM_WRAPPER_INST_0_I_APB_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PCIE_IA_GEN4A_1_I_CLK, + "gout_hsi2_pcie_ia_gen4a_1_i_clk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_IA_GEN4A_1_IPCLKPORT_I_CLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PCIE_IA_GEN4B_1_I_CLK, + "gout_hsi2_pcie_ia_gen4b_1_i_clk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PCIE_IA_GEN4B_1_IPCLKPORT_I_CLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PPMU_HSI2_ACLK, + "gout_hsi2_ppmu_hsi2_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PPMU_HSI2_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_PPMU_HSI2_PCLK, + "gout_hsi2_ppmu_hsi2_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_PPMU_HSI2_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_QE_MMC_CARD_HSI2_ACLK, + "gout_hsi2_qe_mmc_card_hsi2_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_MMC_CARD_HSI2_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_QE_MMC_CARD_HSI2_PCLK, + "gout_hsi2_qe_mmc_card_hsi2_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_MMC_CARD_HSI2_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_QE_PCIE_GEN4A_HSI2_ACLK, + "gout_hsi2_qe_pcie_gen4a_hsi2_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4A_HSI2_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_QE_PCIE_GEN4A_HSI2_PCLK, + "gout_hsi2_qe_pcie_gen4a_hsi2_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4A_HSI2_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_QE_PCIE_GEN4B_HSI2_ACLK, + "gout_hsi2_qe_pcie_gen4b_hsi2_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4B_HSI2_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_QE_PCIE_GEN4B_HSI2_PCLK, + "gout_hsi2_qe_pcie_gen4b_hsi2_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_PCIE_GEN4B_HSI2_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_ACLK, + "gout_hsi2_qe_ufs_embd_hsi2_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_PCLK, + "gout_hsi2_qe_ufs_embd_hsi2_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_CLK_HSI2_BUS_CLK, + "gout_hsi2_clk_hsi2_bus_clk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_RSTNSYNC_CLK_HSI2_BUS_IPCLKPORT_CLK, + 21, CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_HSI2_CLK_HSI2_OSCCLK_CLK, + "gout_hsi2_clk_hsi2_oscclk_clk", "oscclk", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_RSTNSYNC_CLK_HSI2_OSCCLK_IPCLKPORT_CLK, + 21, 0, 0), + /* TODO: should have a driver for this */ + GATE(CLK_GOUT_HSI2_SSMT_HSI2_ACLK, + "gout_hsi2_ssmt_hsi2_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_SSMT_HSI2_IPCLKPORT_ACLK, + 21, CLK_IGNORE_UNUSED, 0), + /* TODO: should have a driver for this */ + GATE(CLK_GOUT_HSI2_SSMT_HSI2_PCLK, + "gout_hsi2_ssmt_hsi2_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_SSMT_HSI2_IPCLKPORT_PCLK, + 21, CLK_IGNORE_UNUSED, 0), + /* TODO: should have a driver for this */ + GATE(CLK_GOUT_HSI2_SYSMMU_HSI2_CLK_S2, + "gout_hsi2_sysmmu_hsi2_clk_s2", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_SYSMMU_HSI2_IPCLKPORT_CLK_S2, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_HSI2_SYSREG_HSI2_PCLK, + "gout_hsi2_sysreg_hsi2_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_SYSREG_HSI2_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4A_DBI_1_ACLK, + "gout_hsi2_uasc_pcie_gen4a_dbi_1_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_DBI_1_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4A_DBI_1_PCLK, + "gout_hsi2_uasc_pcie_gen4a_dbi_1_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_DBI_1_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4A_SLV_1_ACLK, + "gout_hsi2_uasc_pcie_gen4a_slv_1_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_SLV_1_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4A_SLV_1_PCLK, + "gout_hsi2_uasc_pcie_gen4a_slv_1_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_SLV_1_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4B_DBI_1_ACLK, + "gout_hsi2_uasc_pcie_gen4b_dbi_1_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_DBI_1_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4B_DBI_1_PCLK, + "gout_hsi2_uasc_pcie_gen4b_dbi_1_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_DBI_1_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4B_SLV_1_ACLK, + "gout_hsi2_uasc_pcie_gen4b_slv_1_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_SLV_1_IPCLKPORT_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4B_SLV_1_PCLK, + "gout_hsi2_uasc_pcie_gen4b_slv_1_pclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4B_SLV_1_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_UFS_EMBD_I_ACLK, + "gout_hsi2_ufs_embd_i_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_ACLK, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_UFS_EMBD_I_CLK_UNIPRO, + "gout_hsi2_ufs_embd_i_clk_unipro", "mout_hsi2_ufs_embd_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO, + 21, 0, 0), + GATE(CLK_GOUT_HSI2_UFS_EMBD_I_FMP_CLK, + "gout_hsi2_ufs_embd_i_fmp_clk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK, + 21, 0, 0), + /* TODO: should have a driver for this */ + GATE(CLK_GOUT_HSI2_XIU_D_HSI2_ACLK, + "gout_hsi2_xiu_d_hsi2_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_XIU_D_HSI2_IPCLKPORT_ACLK, + 21, CLK_IGNORE_UNUSED, 0), + /* TODO: should have a driver for this */ + GATE(CLK_GOUT_HSI2_XIU_P_HSI2_ACLK, + "gout_hsi2_xiu_p_hsi2_aclk", "mout_hsi2_bus_user", + CLK_CON_GAT_GOUT_BLK_HSI2_UID_XIU_P_HSI2_IPCLKPORT_ACLK, + 21, CLK_IGNORE_UNUSED, 0), +}; + +static const struct samsung_cmu_info hsi2_cmu_info __initconst = { + .mux_clks = hsi2_mux_clks, + .nr_mux_clks = ARRAY_SIZE(hsi2_mux_clks), + .gate_clks = hsi2_gate_clks, + .nr_gate_clks = ARRAY_SIZE(hsi2_gate_clks), + .nr_clk_ids = CLKS_NR_HSI2, + .clk_regs = cmu_hsi2_clk_regs, + .nr_clk_regs = ARRAY_SIZE(cmu_hsi2_clk_regs), + .clk_name = "bus", +}; + /* ---- CMU_MISC ------------------------------------------------------------ */ /* Register Offset definitions for CMU_MISC (0x10010000) */ @@ -3441,6 +4396,12 @@ static const struct of_device_id gs101_cmu_of_match[] = { { .compatible = "google,gs101-cmu-apm", .data = &apm_cmu_info, + }, { + .compatible = "google,gs101-cmu-hsi0", + .data = &hsi0_cmu_info, + }, { + .compatible = "google,gs101-cmu-hsi2", + .data = &hsi2_cmu_info, }, { .compatible = "google,gs101-cmu-peric0", .data = &peric0_cmu_info, diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c index b6701905f2..afa5760ed3 100644 --- a/drivers/clk/samsung/clk.c +++ b/drivers/clk/samsung/clk.c @@ -139,7 +139,7 @@ void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx, unsigned int nr_clk) { struct clk_hw *clk_hw; - unsigned int idx, ret; + unsigned int idx; for (idx = 0; idx < nr_clk; idx++, list++) { clk_hw = clk_hw_register_fixed_rate(ctx->dev, list->name, @@ -151,15 +151,6 @@ void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx, } samsung_clk_add_lookup(ctx, clk_hw, list->id); - - /* - * Unconditionally add a clock lookup for the fixed rate clocks. - * There are not many of these on any of Samsung platforms. - */ - ret = clk_hw_register_clkdev(clk_hw, list->name, NULL); - if (ret) - pr_err("%s: failed to register clock lookup for %s", - __func__, list->name); } } diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h index 556167350b..fb06caa71f 100644 --- a/drivers/clk/samsung/clk.h +++ b/drivers/clk/samsung/clk.h @@ -337,6 +337,7 @@ struct samsung_clock_reg_cache { * @suspend_regs: list of clock registers to set before suspend * @nr_suspend_regs: count of clock registers in @suspend_regs * @clk_name: name of the parent clock needed for CMU register access + * @manual_plls: Enable manual control for PLL clocks */ struct samsung_cmu_info { const struct samsung_pll_clock *pll_clks; @@ -361,6 +362,9 @@ struct samsung_cmu_info { const struct samsung_clk_reg_dump *suspend_regs; unsigned int nr_suspend_regs; const char *clk_name; + + /* ARM64 Exynos CMUs */ + bool manual_plls; }; struct samsung_clk_provider *samsung_clk_init(struct device *dev, diff --git a/drivers/clk/sophgo/Kconfig b/drivers/clk/sophgo/Kconfig new file mode 100644 index 0000000000..1cc49be71b --- /dev/null +++ b/drivers/clk/sophgo/Kconfig @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +# common clock support for SOPHGO SoC family. + +config CLK_SOPHGO_CV1800 + tristate "Support for the Sophgo CV1800 series SoCs clock controller" + depends on ARCH_SOPHGO || COMPILE_TEST + help + This driver supports clock controller of Sophgo CV18XX series SoC. + The driver require a 25MHz Oscillator to function generate clock. + It includes PLLs, common clock function and some vendor clock for + IPs of CV18XX series SoC diff --git a/drivers/clk/sophgo/Makefile b/drivers/clk/sophgo/Makefile new file mode 100644 index 0000000000..a503207642 --- /dev/null +++ b/drivers/clk/sophgo/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_CLK_SOPHGO_CV1800) += clk-sophgo-cv1800.o + +clk-sophgo-cv1800-y += clk-cv1800.o +clk-sophgo-cv1800-y += clk-cv18xx-common.o +clk-sophgo-cv1800-y += clk-cv18xx-ip.o +clk-sophgo-cv1800-y += clk-cv18xx-pll.o diff --git a/drivers/clk/sophgo/clk-cv1800.c b/drivers/clk/sophgo/clk-cv1800.c new file mode 100644 index 0000000000..2da4c24621 --- /dev/null +++ b/drivers/clk/sophgo/clk-cv1800.c @@ -0,0 +1,1537 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2023 Inochi Amaoto + */ + +#include +#include +#include +#include +#include + +#include "clk-cv1800.h" + +#include "clk-cv18xx-common.h" +#include "clk-cv18xx-ip.h" +#include "clk-cv18xx-pll.h" + +struct cv1800_clk_ctrl; + +struct cv1800_clk_desc { + struct clk_hw_onecell_data *clks_data; + + int (*pre_init)(struct device *dev, void __iomem *base, + struct cv1800_clk_ctrl *ctrl, + const struct cv1800_clk_desc *desc); +}; + +struct cv1800_clk_ctrl { + const struct cv1800_clk_desc *desc; + spinlock_t lock; +}; + +#define CV1800_DIV_FLAG \ + (CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST) +static const struct clk_parent_data osc_parents[] = { + { .index = 0 }, +}; + +static const struct cv1800_clk_pll_limit pll_limits[] = { + { + .pre_div = _CV1800_PLL_LIMIT(1, 127), + .div = _CV1800_PLL_LIMIT(6, 127), + .post_div = _CV1800_PLL_LIMIT(1, 127), + .ictrl = _CV1800_PLL_LIMIT(0, 7), + .mode = _CV1800_PLL_LIMIT(0, 3), + }, + { + .pre_div = _CV1800_PLL_LIMIT(1, 127), + .div = _CV1800_PLL_LIMIT(6, 127), + .post_div = _CV1800_PLL_LIMIT(1, 127), + .ictrl = _CV1800_PLL_LIMIT(0, 7), + .mode = _CV1800_PLL_LIMIT(0, 3), + }, +}; + +static CV1800_INTEGRAL_PLL(clk_fpll, osc_parents, + REG_FPLL_CSR, + REG_PLL_G6_CTRL, 8, + REG_PLL_G6_STATUS, 2, + pll_limits, + CLK_IS_CRITICAL); + +static CV1800_INTEGRAL_PLL(clk_mipimpll, osc_parents, + REG_MIPIMPLL_CSR, + REG_PLL_G2_CTRL, 0, + REG_PLL_G2_STATUS, 0, + pll_limits, + CLK_IS_CRITICAL); + +static const struct clk_parent_data clk_mipimpll_parents[] = { + { .hw = &clk_mipimpll.common.hw }, +}; +static const struct clk_parent_data clk_bypass_mipimpll_parents[] = { + { .index = 0 }, + { .hw = &clk_mipimpll.common.hw }, +}; +static const struct clk_parent_data clk_bypass_fpll_parents[] = { + { .index = 0 }, + { .hw = &clk_fpll.common.hw }, +}; + +static struct cv1800_clk_pll_synthesizer clk_mpll_synthesizer = { + .en = CV1800_CLK_BIT(REG_PLL_G6_SSC_SYN_CTRL, 2), + .clk_half = CV1800_CLK_BIT(REG_PLL_G6_SSC_SYN_CTRL, 0), + .ctrl = REG_MPLL_SSC_SYN_CTRL, + .set = REG_MPLL_SSC_SYN_SET, +}; +static CV1800_FACTIONAL_PLL(clk_mpll, clk_bypass_mipimpll_parents, + REG_MPLL_CSR, + REG_PLL_G6_CTRL, 0, + REG_PLL_G6_STATUS, 0, + pll_limits, + &clk_mpll_synthesizer, + CLK_IS_CRITICAL); + +static struct cv1800_clk_pll_synthesizer clk_tpll_synthesizer = { + .en = CV1800_CLK_BIT(REG_PLL_G6_SSC_SYN_CTRL, 3), + .clk_half = CV1800_CLK_BIT(REG_PLL_G6_SSC_SYN_CTRL, 0), + .ctrl = REG_TPLL_SSC_SYN_CTRL, + .set = REG_TPLL_SSC_SYN_SET, +}; +static CV1800_FACTIONAL_PLL(clk_tpll, clk_bypass_mipimpll_parents, + REG_TPLL_CSR, + REG_PLL_G6_CTRL, 4, + REG_PLL_G6_STATUS, 1, + pll_limits, + &clk_tpll_synthesizer, + CLK_IS_CRITICAL); + +static struct cv1800_clk_pll_synthesizer clk_a0pll_synthesizer = { + .en = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 2), + .clk_half = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 0), + .ctrl = REG_A0PLL_SSC_SYN_CTRL, + .set = REG_A0PLL_SSC_SYN_SET, +}; +static CV1800_FACTIONAL_PLL(clk_a0pll, clk_bypass_mipimpll_parents, + REG_A0PLL_CSR, + REG_PLL_G2_CTRL, 4, + REG_PLL_G2_STATUS, 1, + pll_limits, + &clk_a0pll_synthesizer, + CLK_IS_CRITICAL); + +static struct cv1800_clk_pll_synthesizer clk_disppll_synthesizer = { + .en = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 3), + .clk_half = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 0), + .ctrl = REG_DISPPLL_SSC_SYN_CTRL, + .set = REG_DISPPLL_SSC_SYN_SET, +}; +static CV1800_FACTIONAL_PLL(clk_disppll, clk_bypass_mipimpll_parents, + REG_DISPPLL_CSR, + REG_PLL_G2_CTRL, 8, + REG_PLL_G2_STATUS, 2, + pll_limits, + &clk_disppll_synthesizer, + CLK_IS_CRITICAL); + +static struct cv1800_clk_pll_synthesizer clk_cam0pll_synthesizer = { + .en = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 4), + .clk_half = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 0), + .ctrl = REG_CAM0PLL_SSC_SYN_CTRL, + .set = REG_CAM0PLL_SSC_SYN_SET, +}; +static CV1800_FACTIONAL_PLL(clk_cam0pll, clk_bypass_mipimpll_parents, + REG_CAM0PLL_CSR, + REG_PLL_G2_CTRL, 12, + REG_PLL_G2_STATUS, 3, + pll_limits, + &clk_cam0pll_synthesizer, + CLK_IGNORE_UNUSED); + +static struct cv1800_clk_pll_synthesizer clk_cam1pll_synthesizer = { + .en = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 5), + .clk_half = CV1800_CLK_BIT(REG_PLL_G2_SSC_SYN_CTRL, 0), + .ctrl = REG_CAM1PLL_SSC_SYN_CTRL, + .set = REG_CAM1PLL_SSC_SYN_SET, +}; +static CV1800_FACTIONAL_PLL(clk_cam1pll, clk_bypass_mipimpll_parents, + REG_CAM1PLL_CSR, + REG_PLL_G2_CTRL, 16, + REG_PLL_G2_STATUS, 4, + pll_limits, + &clk_cam1pll_synthesizer, + CLK_IS_CRITICAL); + +static const struct clk_parent_data clk_cam0pll_parents[] = { + { .hw = &clk_cam0pll.common.hw }, +}; + +/* G2D */ +static CV1800_FIXED_DIV(clk_cam0pll_d2, clk_cam0pll_parents, + REG_CAM0PLL_CLK_CSR, 1, + 2, + CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED); +static CV1800_FIXED_DIV(clk_cam0pll_d3, clk_cam0pll_parents, + REG_CAM0PLL_CLK_CSR, 2, + 3, + CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED); +static CV1800_FIXED_DIV(clk_mipimpll_d3, clk_mipimpll_parents, + REG_MIPIMPLL_CLK_CSR, 2, + 3, + CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED); + +/* TPU */ +static const struct clk_parent_data clk_tpu_parents[] = { + { .index = 0 }, + { .hw = &clk_tpll.common.hw }, + { .hw = &clk_a0pll.common.hw }, + { .hw = &clk_mipimpll.common.hw }, + { .hw = &clk_fpll.common.hw }, +}; + +static CV1800_BYPASS_MUX(clk_tpu, clk_tpu_parents, + REG_CLK_EN_0, 4, + REG_DIV_CLK_TPU, 16, 4, 3, CV1800_DIV_FLAG, + REG_DIV_CLK_TPU, 8, 2, + REG_CLK_BYP_0, 3, + 0); +static CV1800_GATE(clk_tpu_fab, clk_mipimpll_parents, + REG_CLK_EN_0, 5, + 0); + +/* FABRIC_AXI6 */ +static CV1800_BYPASS_DIV(clk_axi6, clk_bypass_fpll_parents, + REG_CLK_EN_2, 2, + REG_DIV_CLK_AXI6, 16, 4, 15, CV1800_DIV_FLAG, + REG_CLK_BYP_0, 20, + CLK_IS_CRITICAL); + +static const struct clk_parent_data clk_axi6_bus_parents[] = { + { .hw = &clk_axi6.div.common.hw }, +}; +static const struct clk_parent_data clk_bypass_axi6_bus_parents[] = { + { .index = 0 }, + { .hw = &clk_axi6.div.common.hw }, +}; + +/* FABRIC_AXI4 */ +static const struct clk_parent_data clk_axi4_parents[] = { + { .index = 0 }, + { .hw = &clk_fpll.common.hw }, + { .hw = &clk_disppll.common.hw }, +}; + +static CV1800_BYPASS_MUX(clk_axi4, clk_axi4_parents, + REG_CLK_EN_2, 1, + REG_DIV_CLK_AXI4, 16, 4, 5, CV1800_DIV_FLAG, + REG_DIV_CLK_AXI4, 8, 2, + REG_CLK_BYP_0, 19, + CLK_IS_CRITICAL); + +static const struct clk_parent_data clk_axi4_bus_parents[] = { + { .hw = &clk_axi4.mux.common.hw }, +}; + +/* XTAL_MISC */ +static CV1800_GATE(clk_xtal_misc, osc_parents, + REG_CLK_EN_0, 14, + CLK_IS_CRITICAL); + +static const struct clk_parent_data clk_timer_parents[] = { + { .hw = &clk_xtal_misc.common.hw }, +}; + +/* TOP */ +static const struct clk_parent_data clk_cam0_200_parents[] = { + { .index = 0 }, + { .index = 0 }, + { .hw = &clk_disppll.common.hw }, +}; + +static CV1800_BYPASS_MUX(clk_cam0_200, clk_cam0_200_parents, + REG_CLK_EN_1, 13, + REG_DIV_CLK_CAM0_200, 16, 4, 1, CV1800_DIV_FLAG, + REG_DIV_CLK_CAM0_200, 8, 2, + REG_CLK_BYP_0, 16, + CLK_IS_CRITICAL); +static CV1800_DIV(clk_1m, osc_parents, + REG_CLK_EN_3, 5, + REG_DIV_CLK_1M, 16, 6, 25, CV1800_DIV_FLAG, + CLK_IS_CRITICAL); +static CV1800_GATE(clk_pm, clk_axi6_bus_parents, + REG_CLK_EN_3, 8, + CLK_IS_CRITICAL); +static CV1800_GATE(clk_timer0, clk_timer_parents, + REG_CLK_EN_3, 9, + CLK_IS_CRITICAL); +static CV1800_GATE(clk_timer1, clk_timer_parents, + REG_CLK_EN_3, 10, + CLK_IS_CRITICAL); +static CV1800_GATE(clk_timer2, clk_timer_parents, + REG_CLK_EN_3, 11, + CLK_IS_CRITICAL); +static CV1800_GATE(clk_timer3, clk_timer_parents, + REG_CLK_EN_3, 12, + CLK_IS_CRITICAL); +static CV1800_GATE(clk_timer4, clk_timer_parents, + REG_CLK_EN_3, 13, + CLK_IS_CRITICAL); +static CV1800_GATE(clk_timer5, clk_timer_parents, + REG_CLK_EN_3, 14, + CLK_IS_CRITICAL); +static CV1800_GATE(clk_timer6, clk_timer_parents, + REG_CLK_EN_3, 15, + CLK_IS_CRITICAL); +static CV1800_GATE(clk_timer7, clk_timer_parents, + REG_CLK_EN_3, 16, + CLK_IS_CRITICAL); + +static const struct clk_parent_data clk_parents_1m[] = { + { .hw = &clk_1m.common.hw }, +}; +static const struct clk_parent_data clk_uart_parents[] = { + { .hw = &clk_cam0_200.mux.common.hw }, +}; + +/* AHB ROM */ +static CV1800_GATE(clk_ahb_rom, clk_axi4_bus_parents, + REG_CLK_EN_0, 6, + 0); + +/* RTC */ +static CV1800_GATE(clk_rtc_25m, osc_parents, + REG_CLK_EN_0, 8, + CLK_IS_CRITICAL); +static CV1800_BYPASS_DIV(clk_src_rtc_sys_0, clk_bypass_fpll_parents, + REG_CLK_EN_4, 6, + REG_DIV_CLK_RTCSYS_SRC_0, 16, 4, 5, CV1800_DIV_FLAG, + REG_CLK_BYP_1, 5, + CLK_IS_CRITICAL); + +/* TEMPSEN */ +static CV1800_GATE(clk_tempsen, osc_parents, + REG_CLK_EN_0, 9, + 0); + +/* SARADC */ +static CV1800_GATE(clk_saradc, osc_parents, + REG_CLK_EN_0, 10, + 0); + +/* EFUSE */ +static CV1800_GATE(clk_efuse, osc_parents, + REG_CLK_EN_0, 11, + 0); +static CV1800_GATE(clk_apb_efuse, osc_parents, + REG_CLK_EN_0, 12, + 0); + +/* WDT */ +static CV1800_GATE(clk_apb_wdt, osc_parents, + REG_CLK_EN_1, 7, + CLK_IS_CRITICAL); + +/* WGN */ +static CV1800_GATE(clk_wgn, osc_parents, + REG_CLK_EN_3, 22, + 0); +static CV1800_GATE(clk_wgn0, osc_parents, + REG_CLK_EN_3, 23, + 0); +static CV1800_GATE(clk_wgn1, osc_parents, + REG_CLK_EN_3, 24, + 0); +static CV1800_GATE(clk_wgn2, osc_parents, + REG_CLK_EN_3, 25, + 0); + +/* KEYSCAN */ +static CV1800_GATE(clk_keyscan, osc_parents, + REG_CLK_EN_3, 26, + 0); + +/* EMMC */ +static CV1800_GATE(clk_axi4_emmc, clk_axi4_bus_parents, + REG_CLK_EN_0, 15, + 0); +static CV1800_BYPASS_MUX(clk_emmc, clk_axi4_parents, + REG_CLK_EN_0, 16, + REG_DIV_CLK_EMMC, 16, 5, 15, CV1800_DIV_FLAG, + REG_DIV_CLK_EMMC, 8, 2, + REG_CLK_BYP_0, 5, + 0); +static CV1800_DIV(clk_emmc_100k, clk_parents_1m, + REG_CLK_EN_0, 17, + REG_DIV_CLK_EMMC_100K, 16, 8, 10, CV1800_DIV_FLAG, + 0); + +/* SD */ +static CV1800_GATE(clk_axi4_sd0, clk_axi4_bus_parents, + REG_CLK_EN_0, 18, + 0); +static CV1800_BYPASS_MUX(clk_sd0, clk_axi4_parents, + REG_CLK_EN_0, 19, + REG_DIV_CLK_SD0, 16, 5, 15, CV1800_DIV_FLAG, + REG_DIV_CLK_SD0, 8, 2, + REG_CLK_BYP_0, 6, + 0); +static CV1800_DIV(clk_sd0_100k, clk_parents_1m, + REG_CLK_EN_0, 20, + REG_DIV_CLK_SD0_100K, 16, 8, 10, CV1800_DIV_FLAG, + 0); +static CV1800_GATE(clk_axi4_sd1, clk_axi4_bus_parents, + REG_CLK_EN_0, 21, + 0); +static CV1800_BYPASS_MUX(clk_sd1, clk_axi4_parents, + REG_CLK_EN_0, 22, + REG_DIV_CLK_SD1, 16, 5, 15, CV1800_DIV_FLAG, + REG_DIV_CLK_SD1, 8, 2, + REG_CLK_BYP_0, 7, + 0); +static CV1800_DIV(clk_sd1_100k, clk_parents_1m, + REG_CLK_EN_0, 23, + REG_DIV_CLK_SD1_100K, 16, 8, 10, CV1800_DIV_FLAG, + 0); + +/* SPI NAND */ +static CV1800_BYPASS_MUX(clk_spi_nand, clk_axi4_parents, + REG_CLK_EN_0, 24, + REG_DIV_CLK_SPI_NAND, 16, 5, 8, CV1800_DIV_FLAG, + REG_DIV_CLK_SPI_NAND, 8, 2, + REG_CLK_BYP_0, 8, + 0); + +/* GPIO */ +static CV1800_DIV(clk_gpio_db, clk_parents_1m, + REG_CLK_EN_0, 31, + REG_DIV_CLK_GPIO_DB, 16, 16, 10, CV1800_DIV_FLAG, + CLK_IS_CRITICAL); +static CV1800_GATE(clk_apb_gpio, clk_axi6_bus_parents, + REG_CLK_EN_0, 29, + CLK_IS_CRITICAL); +static CV1800_GATE(clk_apb_gpio_intr, clk_axi6_bus_parents, + REG_CLK_EN_0, 30, + CLK_IS_CRITICAL); + +/* ETH */ +static CV1800_BYPASS_DIV(clk_eth0_500m, clk_bypass_fpll_parents, + REG_CLK_EN_0, 25, + REG_DIV_CLK_GPIO_DB, 16, 4, 3, CV1800_DIV_FLAG, + REG_CLK_BYP_0, 9, + 0); +static CV1800_GATE(clk_axi4_eth0, clk_axi4_bus_parents, + REG_CLK_EN_0, 26, + 0); +static CV1800_BYPASS_DIV(clk_eth1_500m, clk_bypass_fpll_parents, + REG_CLK_EN_0, 27, + REG_DIV_CLK_GPIO_DB, 16, 4, 3, CV1800_DIV_FLAG, + REG_CLK_BYP_0, 10, + 0); +static CV1800_GATE(clk_axi4_eth1, clk_axi4_bus_parents, + REG_CLK_EN_0, 28, + 0); + +/* SF */ +static CV1800_GATE(clk_ahb_sf, clk_axi4_bus_parents, + REG_CLK_EN_1, 0, + 0); +static CV1800_GATE(clk_ahb_sf1, clk_axi4_bus_parents, + REG_CLK_EN_3, 27, + 0); + +/* AUDSRC */ +static CV1800_ACLK(clk_a24m, clk_mipimpll_parents, + REG_APLL_FRAC_DIV_CTRL, 0, + REG_APLL_FRAC_DIV_CTRL, 3, + REG_APLL_FRAC_DIV_CTRL, 1, + REG_APLL_FRAC_DIV_CTRL, 2, + REG_APLL_FRAC_DIV_M, 0, 22, CV1800_DIV_FLAG, + REG_APLL_FRAC_DIV_N, 0, 22, CV1800_DIV_FLAG, + 24576000, + 0); + +static const struct clk_parent_data clk_aud_parents[] = { + { .index = 0 }, + { .hw = &clk_a0pll.common.hw }, + { .hw = &clk_a24m.common.hw }, +}; + +static CV1800_BYPASS_MUX(clk_audsrc, clk_aud_parents, + REG_CLK_EN_4, 1, + REG_DIV_CLK_AUDSRC, 16, 8, 18, CV1800_DIV_FLAG, + REG_DIV_CLK_AUDSRC, 8, 2, + REG_CLK_BYP_1, 2, + 0); +static CV1800_GATE(clk_apb_audsrc, clk_axi4_bus_parents, + REG_CLK_EN_4, 2, + 0); + +/* SDMA */ +static CV1800_GATE(clk_sdma_axi, clk_axi4_bus_parents, + REG_CLK_EN_1, 1, + 0); +static CV1800_BYPASS_MUX(clk_sdma_aud0, clk_aud_parents, + REG_CLK_EN_1, 2, + REG_DIV_CLK_SDMA_AUD0, 16, 8, 18, CV1800_DIV_FLAG, + REG_DIV_CLK_SDMA_AUD0, 8, 2, + REG_CLK_BYP_0, 11, + 0); +static CV1800_BYPASS_MUX(clk_sdma_aud1, clk_aud_parents, + REG_CLK_EN_1, 3, + REG_DIV_CLK_SDMA_AUD1, 16, 8, 18, CV1800_DIV_FLAG, + REG_DIV_CLK_SDMA_AUD1, 8, 2, + REG_CLK_BYP_0, 12, + 0); +static CV1800_BYPASS_MUX(clk_sdma_aud2, clk_aud_parents, + REG_CLK_EN_1, 3, + REG_DIV_CLK_SDMA_AUD2, 16, 8, 18, CV1800_DIV_FLAG, + REG_DIV_CLK_SDMA_AUD2, 8, 2, + REG_CLK_BYP_0, 13, + 0); +static CV1800_BYPASS_MUX(clk_sdma_aud3, clk_aud_parents, + REG_CLK_EN_1, 3, + REG_DIV_CLK_SDMA_AUD3, 16, 8, 18, CV1800_DIV_FLAG, + REG_DIV_CLK_SDMA_AUD3, 8, 2, + REG_CLK_BYP_0, 14, + 0); + +/* SPI */ +static CV1800_GATE(clk_apb_spi0, clk_axi4_bus_parents, + REG_CLK_EN_1, 9, + 0); +static CV1800_GATE(clk_apb_spi1, clk_axi4_bus_parents, + REG_CLK_EN_1, 10, + 0); +static CV1800_GATE(clk_apb_spi2, clk_axi4_bus_parents, + REG_CLK_EN_1, 11, + 0); +static CV1800_GATE(clk_apb_spi3, clk_axi4_bus_parents, + REG_CLK_EN_1, 12, + 0); +static CV1800_BYPASS_DIV(clk_spi, clk_bypass_fpll_parents, + REG_CLK_EN_3, 6, + REG_DIV_CLK_SPI, 16, 6, 8, CV1800_DIV_FLAG, + REG_CLK_BYP_0, 30, + 0); + +/* UART */ +static CV1800_GATE(clk_uart0, clk_uart_parents, + REG_CLK_EN_1, 14, + CLK_IS_CRITICAL); +static CV1800_GATE(clk_apb_uart0, clk_axi4_bus_parents, + REG_CLK_EN_1, 15, + CLK_IS_CRITICAL); +static CV1800_GATE(clk_uart1, clk_uart_parents, + REG_CLK_EN_1, 16, + 0); +static CV1800_GATE(clk_apb_uart1, clk_axi4_bus_parents, + REG_CLK_EN_1, 17, + 0); +static CV1800_GATE(clk_uart2, clk_uart_parents, + REG_CLK_EN_1, 18, + 0); +static CV1800_GATE(clk_apb_uart2, clk_axi4_bus_parents, + REG_CLK_EN_1, 19, + 0); +static CV1800_GATE(clk_uart3, clk_uart_parents, + REG_CLK_EN_1, 20, + 0); +static CV1800_GATE(clk_apb_uart3, clk_axi4_bus_parents, + REG_CLK_EN_1, 21, + 0); +static CV1800_GATE(clk_uart4, clk_uart_parents, + REG_CLK_EN_1, 22, + 0); +static CV1800_GATE(clk_apb_uart4, clk_axi4_bus_parents, + REG_CLK_EN_1, 23, + 0); + +/* I2S */ +static CV1800_GATE(clk_apb_i2s0, clk_axi4_bus_parents, + REG_CLK_EN_1, 24, + 0); +static CV1800_GATE(clk_apb_i2s1, clk_axi4_bus_parents, + REG_CLK_EN_1, 25, + 0); +static CV1800_GATE(clk_apb_i2s2, clk_axi4_bus_parents, + REG_CLK_EN_1, 26, + 0); +static CV1800_GATE(clk_apb_i2s3, clk_axi4_bus_parents, + REG_CLK_EN_1, 27, + 0); + +/* DEBUG */ +static CV1800_GATE(clk_debug, osc_parents, + REG_CLK_EN_0, 13, + CLK_IS_CRITICAL); +static CV1800_BYPASS_DIV(clk_ap_debug, clk_bypass_fpll_parents, + REG_CLK_EN_4, 5, + REG_DIV_CLK_AP_DEBUG, 16, 4, 5, CV1800_DIV_FLAG, + REG_CLK_BYP_1, 4, + CLK_IS_CRITICAL); + +/* DDR */ +static CV1800_GATE(clk_ddr_axi_reg, clk_axi6_bus_parents, + REG_CLK_EN_0, 7, + CLK_IS_CRITICAL); + +/* I2C */ +static CV1800_GATE(clk_apb_i2c, clk_axi4_bus_parents, + REG_CLK_EN_1, 6, + 0); +static CV1800_BYPASS_DIV(clk_i2c, clk_bypass_axi6_bus_parents, + REG_CLK_EN_3, 7, + REG_DIV_CLK_I2C, 16, 4, 1, CV1800_DIV_FLAG, + REG_CLK_BYP_0, 31, + 0); +static CV1800_GATE(clk_apb_i2c0, clk_axi4_bus_parents, + REG_CLK_EN_3, 17, + 0); +static CV1800_GATE(clk_apb_i2c1, clk_axi4_bus_parents, + REG_CLK_EN_3, 18, + 0); +static CV1800_GATE(clk_apb_i2c2, clk_axi4_bus_parents, + REG_CLK_EN_3, 19, + 0); +static CV1800_GATE(clk_apb_i2c3, clk_axi4_bus_parents, + REG_CLK_EN_3, 20, + 0); +static CV1800_GATE(clk_apb_i2c4, clk_axi4_bus_parents, + REG_CLK_EN_3, 21, + 0); + +/* USB */ +static CV1800_GATE(clk_axi4_usb, clk_axi4_bus_parents, + REG_CLK_EN_1, 28, + 0); +static CV1800_GATE(clk_apb_usb, clk_axi4_bus_parents, + REG_CLK_EN_1, 29, + 0); +static CV1800_BYPASS_FIXED_DIV(clk_usb_125m, clk_bypass_fpll_parents, + REG_CLK_EN_1, 30, + 12, + REG_CLK_BYP_0, 17, + CLK_SET_RATE_PARENT); +static CV1800_FIXED_DIV(clk_usb_33k, clk_parents_1m, + REG_CLK_EN_1, 31, + 3, + 0); +static CV1800_BYPASS_FIXED_DIV(clk_usb_12m, clk_bypass_fpll_parents, + REG_CLK_EN_2, 0, + 125, + REG_CLK_BYP_0, 18, + CLK_SET_RATE_PARENT); + +/* VIP SYS */ +static const struct clk_parent_data clk_vip_sys_parents[] = { + { .index = 0 }, + { .hw = &clk_mipimpll.common.hw }, + { .hw = &clk_cam0pll.common.hw }, + { .hw = &clk_disppll.common.hw }, + { .hw = &clk_fpll.common.hw }, +}; +static const struct clk_parent_data clk_disp_vip_parents[] = { + { .index = 0 }, + { .hw = &clk_disppll.common.hw }, +}; + +static CV1800_BYPASS_DIV(clk_dsi_esc, clk_bypass_axi6_bus_parents, + REG_CLK_EN_2, 3, + REG_DIV_CLK_DSI_ESC, 16, 4, 5, CV1800_DIV_FLAG, + REG_CLK_BYP_0, 21, + 0); +static CV1800_BYPASS_MUX(clk_axi_vip, clk_vip_sys_parents, + REG_CLK_EN_2, 4, + REG_DIV_CLK_AXI_VIP, 16, 4, 3, CV1800_DIV_FLAG, + REG_DIV_CLK_AXI_VIP, 8, 2, + REG_CLK_BYP_0, 22, + 0); + +static const struct clk_parent_data clk_axi_vip_bus_parents[] = { + { .hw = &clk_axi_vip.mux.common.hw }, +}; + +static CV1800_BYPASS_MUX(clk_src_vip_sys_0, clk_vip_sys_parents, + REG_CLK_EN_2, 5, + REG_DIV_CLK_SRC_VIP_SYS_0, 16, 4, 6, CV1800_DIV_FLAG, + REG_DIV_CLK_SRC_VIP_SYS_0, 8, 2, + REG_CLK_BYP_0, 23, + 0); +static CV1800_BYPASS_MUX(clk_src_vip_sys_1, clk_vip_sys_parents, + REG_CLK_EN_2, 6, + REG_DIV_CLK_SRC_VIP_SYS_1, 16, 4, 6, CV1800_DIV_FLAG, + REG_DIV_CLK_SRC_VIP_SYS_1, 8, 2, + REG_CLK_BYP_0, 24, + 0); +static CV1800_BYPASS_DIV(clk_disp_src_vip, clk_disp_vip_parents, + REG_CLK_EN_2, 7, + REG_DIV_CLK_DISP_SRC_VIP, 16, 4, 8, CV1800_DIV_FLAG, + REG_CLK_BYP_0, 25, + 0); +static CV1800_BYPASS_MUX(clk_src_vip_sys_2, clk_vip_sys_parents, + REG_CLK_EN_3, 29, + REG_DIV_CLK_SRC_VIP_SYS_2, 16, 4, 2, CV1800_DIV_FLAG, + REG_DIV_CLK_SRC_VIP_SYS_2, 8, 2, + REG_CLK_BYP_1, 1, + 0); +static CV1800_GATE(clk_csi_mac0_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_2, 18, + 0); +static CV1800_GATE(clk_csi_mac1_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_2, 19, + 0); +static CV1800_GATE(clk_isp_top_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_2, 20, + 0); +static CV1800_GATE(clk_img_d_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_2, 21, + 0); +static CV1800_GATE(clk_img_v_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_2, 22, + 0); +static CV1800_GATE(clk_sc_top_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_2, 23, + 0); +static CV1800_GATE(clk_sc_d_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_2, 24, + 0); +static CV1800_GATE(clk_sc_v1_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_2, 25, + 0); +static CV1800_GATE(clk_sc_v2_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_2, 26, + 0); +static CV1800_GATE(clk_sc_v3_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_2, 27, + 0); +static CV1800_GATE(clk_dwa_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_2, 28, + 0); +static CV1800_GATE(clk_bt_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_2, 29, + 0); +static CV1800_GATE(clk_disp_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_2, 30, + 0); +static CV1800_GATE(clk_dsi_mac_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_2, 31, + 0); +static CV1800_GATE(clk_lvds0_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_3, 0, + 0); +static CV1800_GATE(clk_lvds1_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_3, 1, + 0); +static CV1800_GATE(clk_csi0_rx_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_3, 2, + 0); +static CV1800_GATE(clk_csi1_rx_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_3, 3, + 0); +static CV1800_GATE(clk_pad_vi_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_3, 4, + 0); +static CV1800_GATE(clk_pad_vi1_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_3, 30, + 0); +static CV1800_GATE(clk_cfg_reg_vip, clk_axi6_bus_parents, + REG_CLK_EN_3, 31, + 0); +static CV1800_GATE(clk_pad_vi2_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_4, 7, + 0); +static CV1800_GATE(clk_csi_be_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_4, 8, + 0); +static CV1800_GATE(clk_vip_ip0, clk_axi_vip_bus_parents, + REG_CLK_EN_4, 9, + 0); +static CV1800_GATE(clk_vip_ip1, clk_axi_vip_bus_parents, + REG_CLK_EN_4, 10, + 0); +static CV1800_GATE(clk_vip_ip2, clk_axi_vip_bus_parents, + REG_CLK_EN_4, 11, + 0); +static CV1800_GATE(clk_vip_ip3, clk_axi_vip_bus_parents, + REG_CLK_EN_4, 12, + 0); +static CV1800_BYPASS_MUX(clk_src_vip_sys_3, clk_vip_sys_parents, + REG_CLK_EN_4, 15, + REG_DIV_CLK_SRC_VIP_SYS_3, 16, 4, 2, CV1800_DIV_FLAG, + REG_DIV_CLK_SRC_VIP_SYS_3, 8, 2, + REG_CLK_BYP_1, 8, + 0); +static CV1800_BYPASS_MUX(clk_src_vip_sys_4, clk_vip_sys_parents, + REG_CLK_EN_4, 16, + REG_DIV_CLK_SRC_VIP_SYS_4, 16, 4, 3, CV1800_DIV_FLAG, + REG_DIV_CLK_SRC_VIP_SYS_4, 8, 2, + REG_CLK_BYP_1, 9, + 0); +static CV1800_GATE(clk_ive_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_4, 17, + 0); +static CV1800_GATE(clk_raw_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_4, 18, + 0); +static CV1800_GATE(clk_osdc_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_4, 19, + 0); +static CV1800_GATE(clk_csi_mac2_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_4, 20, + 0); +static CV1800_GATE(clk_cam0_vip, clk_axi_vip_bus_parents, + REG_CLK_EN_4, 21, + 0); + +/* CAM OUT */ +static const struct clk_parent_data clk_cam_parents[] = { + { .hw = &clk_cam0pll.common.hw }, + { .hw = &clk_cam0pll_d2.common.hw }, + { .hw = &clk_cam0pll_d3.common.hw }, + { .hw = &clk_mipimpll_d3.common.hw }, +}; + +static CV1800_MUX(clk_cam0, clk_cam_parents, + REG_CLK_EN_2, 16, + REG_CLK_CAM0_SRC_DIV, 16, 6, 0, CV1800_DIV_FLAG, + REG_CLK_CAM0_SRC_DIV, 8, 2, + CLK_IGNORE_UNUSED); +static CV1800_MUX(clk_cam1, clk_cam_parents, + REG_CLK_EN_2, 17, + REG_CLK_CAM1_SRC_DIV, 16, 6, 0, CV1800_DIV_FLAG, + REG_CLK_CAM1_SRC_DIV, 8, 2, + CLK_IGNORE_UNUSED); + +/* VIDEO SUBSYS */ +static const struct clk_parent_data clk_axi_video_codec_parents[] = { + { .index = 0 }, + { .hw = &clk_a0pll.common.hw }, + { .hw = &clk_mipimpll.common.hw }, + { .hw = &clk_cam1pll.common.hw }, + { .hw = &clk_fpll.common.hw }, +}; +static const struct clk_parent_data clk_vc_src0_parents[] = { + { .index = 0 }, + { .hw = &clk_disppll.common.hw }, + { .hw = &clk_mipimpll.common.hw }, + { .hw = &clk_cam1pll.common.hw }, + { .hw = &clk_fpll.common.hw }, +}; +static const struct clk_parent_data clk_vc_src1_parents[] = { + { .index = 0 }, + { .hw = &clk_cam1pll.common.hw }, +}; + +static CV1800_BYPASS_MUX(clk_axi_video_codec, clk_axi_video_codec_parents, + REG_CLK_EN_2, 8, + REG_DIV_CLK_AXI_VIDEO_CODEC, 16, 4, 2, CV1800_DIV_FLAG, + REG_DIV_CLK_AXI_VIDEO_CODEC, 8, 2, + REG_CLK_BYP_0, 26, + 0); + +static const struct clk_parent_data clk_axi_video_codec_bus_parents[] = { + { .hw = &clk_axi_video_codec.mux.common.hw }, +}; + +static CV1800_BYPASS_MUX(clk_vc_src0, clk_vc_src0_parents, + REG_CLK_EN_2, 9, + REG_DIV_CLK_VC_SRC0, 16, 4, 2, CV1800_DIV_FLAG, + REG_DIV_CLK_VC_SRC0, 8, 2, + REG_CLK_BYP_0, 27, + 0); + +static CV1800_GATE(clk_h264c, clk_axi_video_codec_bus_parents, + REG_CLK_EN_2, 10, + 0); +static CV1800_GATE(clk_h265c, clk_axi_video_codec_bus_parents, + REG_CLK_EN_2, 11, + 0); +static CV1800_GATE(clk_jpeg, clk_axi_video_codec_bus_parents, + REG_CLK_EN_2, 12, + CLK_IGNORE_UNUSED); +static CV1800_GATE(clk_apb_jpeg, clk_axi6_bus_parents, + REG_CLK_EN_2, 13, + CLK_IGNORE_UNUSED); +static CV1800_GATE(clk_apb_h264c, clk_axi6_bus_parents, + REG_CLK_EN_2, 14, + 0); +static CV1800_GATE(clk_apb_h265c, clk_axi6_bus_parents, + REG_CLK_EN_2, 15, + 0); +static CV1800_BYPASS_FIXED_DIV(clk_vc_src1, clk_vc_src1_parents, + REG_CLK_EN_3, 28, + 2, + REG_CLK_BYP_1, 0, + CLK_SET_RATE_PARENT); +static CV1800_BYPASS_FIXED_DIV(clk_vc_src2, clk_bypass_fpll_parents, + REG_CLK_EN_4, 3, + 3, + REG_CLK_BYP_1, 3, + CLK_SET_RATE_PARENT); + +/* VC SYS */ +static CV1800_GATE(clk_cfg_reg_vc, clk_axi6_bus_parents, + REG_CLK_EN_4, 0, + CLK_IGNORE_UNUSED); + +/* PWM */ +static CV1800_BYPASS_MUX(clk_pwm_src, clk_axi4_parents, + REG_CLK_EN_4, 4, + REG_DIV_CLK_PWM_SRC_0, 16, 6, 10, CV1800_DIV_FLAG, + REG_DIV_CLK_PWM_SRC_0, 8, 2, + REG_CLK_BYP_0, 15, + CLK_IS_CRITICAL); + +static const struct clk_parent_data clk_pwm_parents[] = { + { .hw = &clk_pwm_src.mux.common.hw }, +}; + +static CV1800_GATE(clk_pwm, clk_pwm_parents, + REG_CLK_EN_1, 8, + CLK_IS_CRITICAL); + +/* C906 */ +static const struct clk_parent_data clk_c906_0_parents[] = { + { .index = 0 }, + { .hw = &clk_tpll.common.hw }, + { .hw = &clk_a0pll.common.hw }, + { .hw = &clk_mipimpll.common.hw }, + { .hw = &clk_mpll.common.hw }, + { .hw = &clk_fpll.common.hw }, +}; +static const struct clk_parent_data clk_c906_1_parents[] = { + { .index = 0 }, + { .hw = &clk_tpll.common.hw }, + { .hw = &clk_a0pll.common.hw }, + { .hw = &clk_disppll.common.hw }, + { .hw = &clk_mpll.common.hw }, + { .hw = &clk_fpll.common.hw }, +}; + +static const s8 clk_c906_parent2sel[] = { + -1, /* osc */ + 0, /* mux 0: clk_tpll(c906_0), clk_tpll(c906_1) */ + 0, /* mux 0: clk_a0pll(c906_0), clk_a0pll(c906_1) */ + 0, /* mux 0: clk_mipimpll(c906_0), clk_disppll(c906_1) */ + 0, /* mux 0: clk_mpll(c906_0), clk_mpll(c906_1) */ + 1 /* mux 1: clk_fpll(c906_0), clk_fpll(c906_1) */ +}; + +static const u8 clk_c906_sel2parent[2][4] = { + [0] = { + 1, + 2, + 3, + 4 + }, + [1] = { + 5, + 5, + 5, + 5 + }, +}; + +static CV1800_MMUX(clk_c906_0, clk_c906_0_parents, + REG_CLK_EN_4, 13, + REG_DIV_CLK_C906_0_0, 16, 4, 1, CV1800_DIV_FLAG, + REG_DIV_CLK_C906_0_1, 16, 4, 2, CV1800_DIV_FLAG, + REG_DIV_CLK_C906_0_0, 8, 2, + REG_DIV_CLK_C906_0_1, 8, 2, + REG_CLK_BYP_1, 6, + REG_CLK_SEL_0, 23, + clk_c906_parent2sel, + clk_c906_sel2parent[0], clk_c906_sel2parent[1], + CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE); +static CV1800_MMUX(clk_c906_1, clk_c906_1_parents, + REG_CLK_EN_4, 14, + REG_DIV_CLK_C906_1_0, 16, 4, 2, CV1800_DIV_FLAG, + REG_DIV_CLK_C906_1_1, 16, 4, 3, CV1800_DIV_FLAG, + REG_DIV_CLK_C906_1_0, 8, 2, + REG_DIV_CLK_C906_1_1, 8, 2, + REG_CLK_BYP_1, 7, + REG_CLK_SEL_0, 24, + clk_c906_parent2sel, + clk_c906_sel2parent[0], clk_c906_sel2parent[1], + CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE); + +/* A53 */ +static CV1800_BYPASS_DIV(clk_cpu_axi0, clk_axi4_parents, + REG_CLK_EN_0, 1, + REG_DIV_CLK_CPU_AXI0, 16, 4, 3, CV1800_DIV_FLAG, + REG_CLK_BYP_0, 1, + CLK_IS_CRITICAL); +static CV1800_BYPASS_DIV(clk_cpu_gic, clk_bypass_fpll_parents, + REG_CLK_EN_0, 2, + REG_DIV_CLK_CPU_GIC, 16, 4, 5, CV1800_DIV_FLAG, + REG_CLK_BYP_0, 2, + CLK_IS_CRITICAL); +static CV1800_GATE(clk_xtal_ap, osc_parents, + REG_CLK_EN_0, 3, + CLK_IS_CRITICAL); + +static const struct clk_parent_data clk_a53_parents[] = { + { .index = 0 }, + { .hw = &clk_tpll.common.hw }, + { .hw = &clk_a0pll.common.hw }, + { .hw = &clk_mipimpll.common.hw }, + { .hw = &clk_mpll.common.hw }, + { .hw = &clk_fpll.common.hw }, +}; + +static const s8 clk_a53_parent2sel[] = { + -1, /* osc */ + 0, /* mux 0: clk_tpll */ + 0, /* mux 0: clk_a0pll */ + 0, /* mux 0: clk_mipimpll */ + 0, /* mux 0: clk_mpll */ + 1 /* mux 1: clk_fpll */ +}; + +static const u8 clk_a53_sel2parent[2][4] = { + [0] = { + 1, + 2, + 3, + 4 + }, + [1] = { + 5, + 5, + 5, + 5 + }, +}; + +/* + * Clock for A53 cpu in the CV18XX/SG200X series. + * For CV180X and CV181X series, this clock is not used, but can not + * be set to bypass mode, or the SoC will hang. + */ +static CV1800_MMUX(clk_a53, clk_a53_parents, + REG_CLK_EN_0, 0, + REG_DIV_CLK_A53_0, 16, 4, 1, CV1800_DIV_FLAG, + REG_DIV_CLK_A53_1, 16, 4, 2, CV1800_DIV_FLAG, + REG_DIV_CLK_A53_0, 8, 2, + REG_DIV_CLK_A53_1, 8, 2, + REG_CLK_BYP_0, 0, + REG_CLK_SEL_0, 0, + clk_a53_parent2sel, + clk_a53_sel2parent[0], clk_a53_sel2parent[1], + CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE); + +static struct clk_hw_onecell_data cv1800_hw_clks = { + .num = CV1800_CLK_MAX, + .hws = { + [CLK_MPLL] = &clk_mpll.common.hw, + [CLK_TPLL] = &clk_tpll.common.hw, + [CLK_FPLL] = &clk_fpll.common.hw, + [CLK_MIPIMPLL] = &clk_mipimpll.common.hw, + [CLK_A0PLL] = &clk_a0pll.common.hw, + [CLK_DISPPLL] = &clk_disppll.common.hw, + [CLK_CAM0PLL] = &clk_cam0pll.common.hw, + [CLK_CAM1PLL] = &clk_cam1pll.common.hw, + + [CLK_MIPIMPLL_D3] = &clk_mipimpll_d3.common.hw, + [CLK_CAM0PLL_D2] = &clk_cam0pll_d2.common.hw, + [CLK_CAM0PLL_D3] = &clk_cam0pll_d3.common.hw, + + [CLK_TPU] = &clk_tpu.mux.common.hw, + [CLK_TPU_FAB] = &clk_tpu_fab.common.hw, + [CLK_AHB_ROM] = &clk_ahb_rom.common.hw, + [CLK_DDR_AXI_REG] = &clk_ddr_axi_reg.common.hw, + [CLK_RTC_25M] = &clk_rtc_25m.common.hw, + [CLK_SRC_RTC_SYS_0] = &clk_src_rtc_sys_0.div.common.hw, + [CLK_TEMPSEN] = &clk_tempsen.common.hw, + [CLK_SARADC] = &clk_saradc.common.hw, + [CLK_EFUSE] = &clk_efuse.common.hw, + [CLK_APB_EFUSE] = &clk_apb_efuse.common.hw, + [CLK_DEBUG] = &clk_debug.common.hw, + [CLK_AP_DEBUG] = &clk_ap_debug.div.common.hw, + [CLK_XTAL_MISC] = &clk_xtal_misc.common.hw, + [CLK_AXI4_EMMC] = &clk_axi4_emmc.common.hw, + [CLK_EMMC] = &clk_emmc.mux.common.hw, + [CLK_EMMC_100K] = &clk_emmc_100k.common.hw, + [CLK_AXI4_SD0] = &clk_axi4_sd0.common.hw, + [CLK_SD0] = &clk_sd0.mux.common.hw, + [CLK_SD0_100K] = &clk_sd0_100k.common.hw, + [CLK_AXI4_SD1] = &clk_axi4_sd1.common.hw, + [CLK_SD1] = &clk_sd1.mux.common.hw, + [CLK_SD1_100K] = &clk_sd1_100k.common.hw, + [CLK_SPI_NAND] = &clk_spi_nand.mux.common.hw, + [CLK_ETH0_500M] = &clk_eth0_500m.div.common.hw, + [CLK_AXI4_ETH0] = &clk_axi4_eth0.common.hw, + [CLK_ETH1_500M] = &clk_eth1_500m.div.common.hw, + [CLK_AXI4_ETH1] = &clk_axi4_eth1.common.hw, + [CLK_APB_GPIO] = &clk_apb_gpio.common.hw, + [CLK_APB_GPIO_INTR] = &clk_apb_gpio_intr.common.hw, + [CLK_GPIO_DB] = &clk_gpio_db.common.hw, + [CLK_AHB_SF] = &clk_ahb_sf.common.hw, + [CLK_AHB_SF1] = &clk_ahb_sf1.common.hw, + [CLK_A24M] = &clk_a24m.common.hw, + [CLK_AUDSRC] = &clk_audsrc.mux.common.hw, + [CLK_APB_AUDSRC] = &clk_apb_audsrc.common.hw, + [CLK_SDMA_AXI] = &clk_sdma_axi.common.hw, + [CLK_SDMA_AUD0] = &clk_sdma_aud0.mux.common.hw, + [CLK_SDMA_AUD1] = &clk_sdma_aud1.mux.common.hw, + [CLK_SDMA_AUD2] = &clk_sdma_aud2.mux.common.hw, + [CLK_SDMA_AUD3] = &clk_sdma_aud3.mux.common.hw, + [CLK_I2C] = &clk_i2c.div.common.hw, + [CLK_APB_I2C] = &clk_apb_i2c.common.hw, + [CLK_APB_I2C0] = &clk_apb_i2c0.common.hw, + [CLK_APB_I2C1] = &clk_apb_i2c1.common.hw, + [CLK_APB_I2C2] = &clk_apb_i2c2.common.hw, + [CLK_APB_I2C3] = &clk_apb_i2c3.common.hw, + [CLK_APB_I2C4] = &clk_apb_i2c4.common.hw, + [CLK_APB_WDT] = &clk_apb_wdt.common.hw, + [CLK_PWM_SRC] = &clk_pwm_src.mux.common.hw, + [CLK_PWM] = &clk_pwm.common.hw, + [CLK_SPI] = &clk_spi.div.common.hw, + [CLK_APB_SPI0] = &clk_apb_spi0.common.hw, + [CLK_APB_SPI1] = &clk_apb_spi1.common.hw, + [CLK_APB_SPI2] = &clk_apb_spi2.common.hw, + [CLK_APB_SPI3] = &clk_apb_spi3.common.hw, + [CLK_1M] = &clk_1m.common.hw, + [CLK_CAM0_200] = &clk_cam0_200.mux.common.hw, + [CLK_PM] = &clk_pm.common.hw, + [CLK_TIMER0] = &clk_timer0.common.hw, + [CLK_TIMER1] = &clk_timer1.common.hw, + [CLK_TIMER2] = &clk_timer2.common.hw, + [CLK_TIMER3] = &clk_timer3.common.hw, + [CLK_TIMER4] = &clk_timer4.common.hw, + [CLK_TIMER5] = &clk_timer5.common.hw, + [CLK_TIMER6] = &clk_timer6.common.hw, + [CLK_TIMER7] = &clk_timer7.common.hw, + [CLK_UART0] = &clk_uart0.common.hw, + [CLK_APB_UART0] = &clk_apb_uart0.common.hw, + [CLK_UART1] = &clk_uart1.common.hw, + [CLK_APB_UART1] = &clk_apb_uart1.common.hw, + [CLK_UART2] = &clk_uart2.common.hw, + [CLK_APB_UART2] = &clk_apb_uart2.common.hw, + [CLK_UART3] = &clk_uart3.common.hw, + [CLK_APB_UART3] = &clk_apb_uart3.common.hw, + [CLK_UART4] = &clk_uart4.common.hw, + [CLK_APB_UART4] = &clk_apb_uart4.common.hw, + [CLK_APB_I2S0] = &clk_apb_i2s0.common.hw, + [CLK_APB_I2S1] = &clk_apb_i2s1.common.hw, + [CLK_APB_I2S2] = &clk_apb_i2s2.common.hw, + [CLK_APB_I2S3] = &clk_apb_i2s3.common.hw, + [CLK_AXI4_USB] = &clk_axi4_usb.common.hw, + [CLK_APB_USB] = &clk_apb_usb.common.hw, + [CLK_USB_125M] = &clk_usb_125m.div.common.hw, + [CLK_USB_33K] = &clk_usb_33k.common.hw, + [CLK_USB_12M] = &clk_usb_12m.div.common.hw, + [CLK_AXI4] = &clk_axi4.mux.common.hw, + [CLK_AXI6] = &clk_axi6.div.common.hw, + [CLK_DSI_ESC] = &clk_dsi_esc.div.common.hw, + [CLK_AXI_VIP] = &clk_axi_vip.mux.common.hw, + [CLK_SRC_VIP_SYS_0] = &clk_src_vip_sys_0.mux.common.hw, + [CLK_SRC_VIP_SYS_1] = &clk_src_vip_sys_1.mux.common.hw, + [CLK_SRC_VIP_SYS_2] = &clk_src_vip_sys_2.mux.common.hw, + [CLK_SRC_VIP_SYS_3] = &clk_src_vip_sys_3.mux.common.hw, + [CLK_SRC_VIP_SYS_4] = &clk_src_vip_sys_4.mux.common.hw, + [CLK_CSI_BE_VIP] = &clk_csi_be_vip.common.hw, + [CLK_CSI_MAC0_VIP] = &clk_csi_mac0_vip.common.hw, + [CLK_CSI_MAC1_VIP] = &clk_csi_mac1_vip.common.hw, + [CLK_CSI_MAC2_VIP] = &clk_csi_mac2_vip.common.hw, + [CLK_CSI0_RX_VIP] = &clk_csi0_rx_vip.common.hw, + [CLK_CSI1_RX_VIP] = &clk_csi1_rx_vip.common.hw, + [CLK_ISP_TOP_VIP] = &clk_isp_top_vip.common.hw, + [CLK_IMG_D_VIP] = &clk_img_d_vip.common.hw, + [CLK_IMG_V_VIP] = &clk_img_v_vip.common.hw, + [CLK_SC_TOP_VIP] = &clk_sc_top_vip.common.hw, + [CLK_SC_D_VIP] = &clk_sc_d_vip.common.hw, + [CLK_SC_V1_VIP] = &clk_sc_v1_vip.common.hw, + [CLK_SC_V2_VIP] = &clk_sc_v2_vip.common.hw, + [CLK_SC_V3_VIP] = &clk_sc_v3_vip.common.hw, + [CLK_DWA_VIP] = &clk_dwa_vip.common.hw, + [CLK_BT_VIP] = &clk_bt_vip.common.hw, + [CLK_DISP_VIP] = &clk_disp_vip.common.hw, + [CLK_DSI_MAC_VIP] = &clk_dsi_mac_vip.common.hw, + [CLK_LVDS0_VIP] = &clk_lvds0_vip.common.hw, + [CLK_LVDS1_VIP] = &clk_lvds1_vip.common.hw, + [CLK_PAD_VI_VIP] = &clk_pad_vi_vip.common.hw, + [CLK_PAD_VI1_VIP] = &clk_pad_vi1_vip.common.hw, + [CLK_PAD_VI2_VIP] = &clk_pad_vi2_vip.common.hw, + [CLK_CFG_REG_VIP] = &clk_cfg_reg_vip.common.hw, + [CLK_VIP_IP0] = &clk_vip_ip0.common.hw, + [CLK_VIP_IP1] = &clk_vip_ip1.common.hw, + [CLK_VIP_IP2] = &clk_vip_ip2.common.hw, + [CLK_VIP_IP3] = &clk_vip_ip3.common.hw, + [CLK_IVE_VIP] = &clk_ive_vip.common.hw, + [CLK_RAW_VIP] = &clk_raw_vip.common.hw, + [CLK_OSDC_VIP] = &clk_osdc_vip.common.hw, + [CLK_CAM0_VIP] = &clk_cam0_vip.common.hw, + [CLK_AXI_VIDEO_CODEC] = &clk_axi_video_codec.mux.common.hw, + [CLK_VC_SRC0] = &clk_vc_src0.mux.common.hw, + [CLK_VC_SRC1] = &clk_vc_src1.div.common.hw, + [CLK_VC_SRC2] = &clk_vc_src2.div.common.hw, + [CLK_H264C] = &clk_h264c.common.hw, + [CLK_APB_H264C] = &clk_apb_h264c.common.hw, + [CLK_H265C] = &clk_h265c.common.hw, + [CLK_APB_H265C] = &clk_apb_h265c.common.hw, + [CLK_JPEG] = &clk_jpeg.common.hw, + [CLK_APB_JPEG] = &clk_apb_jpeg.common.hw, + [CLK_CAM0] = &clk_cam0.common.hw, + [CLK_CAM1] = &clk_cam1.common.hw, + [CLK_WGN] = &clk_wgn.common.hw, + [CLK_WGN0] = &clk_wgn0.common.hw, + [CLK_WGN1] = &clk_wgn1.common.hw, + [CLK_WGN2] = &clk_wgn2.common.hw, + [CLK_KEYSCAN] = &clk_keyscan.common.hw, + [CLK_CFG_REG_VC] = &clk_cfg_reg_vc.common.hw, + [CLK_C906_0] = &clk_c906_0.common.hw, + [CLK_C906_1] = &clk_c906_1.common.hw, + [CLK_A53] = &clk_a53.common.hw, + [CLK_CPU_AXI0] = &clk_cpu_axi0.div.common.hw, + [CLK_CPU_GIC] = &clk_cpu_gic.div.common.hw, + [CLK_XTAL_AP] = &clk_xtal_ap.common.hw, + }, +}; + +static void cv18xx_clk_disable_auto_pd(void __iomem *base) +{ + static const u16 CV1800_PD_CLK[] = { + REG_MIPIMPLL_CLK_CSR, + REG_A0PLL_CLK_CSR, + REG_DISPPLL_CLK_CSR, + REG_CAM0PLL_CLK_CSR, + REG_CAM1PLL_CLK_CSR, + }; + + u32 val; + int i; + + /* disable auto power down */ + for (i = 0; i < ARRAY_SIZE(CV1800_PD_CLK); i++) { + u32 reg = CV1800_PD_CLK[i]; + + val = readl(base + reg); + val |= GENMASK(12, 9); + val &= ~BIT(8); + writel(val, base + reg); + } +} + +static void cv18xx_clk_disable_a53(void __iomem *base) +{ + u32 val = readl(base + REG_CLK_BYP_0); + + /* Set bypass clock for clk_a53 */ + val |= BIT(0); + + /* Set bypass clock for clk_cpu_axi0 */ + val |= BIT(1); + + /* Set bypass clock for clk_cpu_gic */ + val |= BIT(2); + + writel(val, base + REG_CLK_BYP_0); +} + +static int cv1800_pre_init(struct device *dev, void __iomem *base, + struct cv1800_clk_ctrl *ctrl, + const struct cv1800_clk_desc *desc) +{ + u32 val = readl(base + REG_CLK_EN_2); + + /* disable unsupported clk_disp_src_vip */ + val &= ~BIT(7); + + writel(val, base + REG_CLK_EN_2); + + cv18xx_clk_disable_a53(base); + cv18xx_clk_disable_auto_pd(base); + + return 0; +} + +static const struct cv1800_clk_desc cv1800_desc = { + .clks_data = &cv1800_hw_clks, + .pre_init = cv1800_pre_init, +}; + +static struct clk_hw_onecell_data cv1810_hw_clks = { + .num = CV1810_CLK_MAX, + .hws = { + [CLK_MPLL] = &clk_mpll.common.hw, + [CLK_TPLL] = &clk_tpll.common.hw, + [CLK_FPLL] = &clk_fpll.common.hw, + [CLK_MIPIMPLL] = &clk_mipimpll.common.hw, + [CLK_A0PLL] = &clk_a0pll.common.hw, + [CLK_DISPPLL] = &clk_disppll.common.hw, + [CLK_CAM0PLL] = &clk_cam0pll.common.hw, + [CLK_CAM1PLL] = &clk_cam1pll.common.hw, + + [CLK_MIPIMPLL_D3] = &clk_mipimpll_d3.common.hw, + [CLK_CAM0PLL_D2] = &clk_cam0pll_d2.common.hw, + [CLK_CAM0PLL_D3] = &clk_cam0pll_d3.common.hw, + + [CLK_TPU] = &clk_tpu.mux.common.hw, + [CLK_TPU_FAB] = &clk_tpu_fab.common.hw, + [CLK_AHB_ROM] = &clk_ahb_rom.common.hw, + [CLK_DDR_AXI_REG] = &clk_ddr_axi_reg.common.hw, + [CLK_RTC_25M] = &clk_rtc_25m.common.hw, + [CLK_SRC_RTC_SYS_0] = &clk_src_rtc_sys_0.div.common.hw, + [CLK_TEMPSEN] = &clk_tempsen.common.hw, + [CLK_SARADC] = &clk_saradc.common.hw, + [CLK_EFUSE] = &clk_efuse.common.hw, + [CLK_APB_EFUSE] = &clk_apb_efuse.common.hw, + [CLK_DEBUG] = &clk_debug.common.hw, + [CLK_AP_DEBUG] = &clk_ap_debug.div.common.hw, + [CLK_XTAL_MISC] = &clk_xtal_misc.common.hw, + [CLK_AXI4_EMMC] = &clk_axi4_emmc.common.hw, + [CLK_EMMC] = &clk_emmc.mux.common.hw, + [CLK_EMMC_100K] = &clk_emmc_100k.common.hw, + [CLK_AXI4_SD0] = &clk_axi4_sd0.common.hw, + [CLK_SD0] = &clk_sd0.mux.common.hw, + [CLK_SD0_100K] = &clk_sd0_100k.common.hw, + [CLK_AXI4_SD1] = &clk_axi4_sd1.common.hw, + [CLK_SD1] = &clk_sd1.mux.common.hw, + [CLK_SD1_100K] = &clk_sd1_100k.common.hw, + [CLK_SPI_NAND] = &clk_spi_nand.mux.common.hw, + [CLK_ETH0_500M] = &clk_eth0_500m.div.common.hw, + [CLK_AXI4_ETH0] = &clk_axi4_eth0.common.hw, + [CLK_ETH1_500M] = &clk_eth1_500m.div.common.hw, + [CLK_AXI4_ETH1] = &clk_axi4_eth1.common.hw, + [CLK_APB_GPIO] = &clk_apb_gpio.common.hw, + [CLK_APB_GPIO_INTR] = &clk_apb_gpio_intr.common.hw, + [CLK_GPIO_DB] = &clk_gpio_db.common.hw, + [CLK_AHB_SF] = &clk_ahb_sf.common.hw, + [CLK_AHB_SF1] = &clk_ahb_sf1.common.hw, + [CLK_A24M] = &clk_a24m.common.hw, + [CLK_AUDSRC] = &clk_audsrc.mux.common.hw, + [CLK_APB_AUDSRC] = &clk_apb_audsrc.common.hw, + [CLK_SDMA_AXI] = &clk_sdma_axi.common.hw, + [CLK_SDMA_AUD0] = &clk_sdma_aud0.mux.common.hw, + [CLK_SDMA_AUD1] = &clk_sdma_aud1.mux.common.hw, + [CLK_SDMA_AUD2] = &clk_sdma_aud2.mux.common.hw, + [CLK_SDMA_AUD3] = &clk_sdma_aud3.mux.common.hw, + [CLK_I2C] = &clk_i2c.div.common.hw, + [CLK_APB_I2C] = &clk_apb_i2c.common.hw, + [CLK_APB_I2C0] = &clk_apb_i2c0.common.hw, + [CLK_APB_I2C1] = &clk_apb_i2c1.common.hw, + [CLK_APB_I2C2] = &clk_apb_i2c2.common.hw, + [CLK_APB_I2C3] = &clk_apb_i2c3.common.hw, + [CLK_APB_I2C4] = &clk_apb_i2c4.common.hw, + [CLK_APB_WDT] = &clk_apb_wdt.common.hw, + [CLK_PWM_SRC] = &clk_pwm_src.mux.common.hw, + [CLK_PWM] = &clk_pwm.common.hw, + [CLK_SPI] = &clk_spi.div.common.hw, + [CLK_APB_SPI0] = &clk_apb_spi0.common.hw, + [CLK_APB_SPI1] = &clk_apb_spi1.common.hw, + [CLK_APB_SPI2] = &clk_apb_spi2.common.hw, + [CLK_APB_SPI3] = &clk_apb_spi3.common.hw, + [CLK_1M] = &clk_1m.common.hw, + [CLK_CAM0_200] = &clk_cam0_200.mux.common.hw, + [CLK_PM] = &clk_pm.common.hw, + [CLK_TIMER0] = &clk_timer0.common.hw, + [CLK_TIMER1] = &clk_timer1.common.hw, + [CLK_TIMER2] = &clk_timer2.common.hw, + [CLK_TIMER3] = &clk_timer3.common.hw, + [CLK_TIMER4] = &clk_timer4.common.hw, + [CLK_TIMER5] = &clk_timer5.common.hw, + [CLK_TIMER6] = &clk_timer6.common.hw, + [CLK_TIMER7] = &clk_timer7.common.hw, + [CLK_UART0] = &clk_uart0.common.hw, + [CLK_APB_UART0] = &clk_apb_uart0.common.hw, + [CLK_UART1] = &clk_uart1.common.hw, + [CLK_APB_UART1] = &clk_apb_uart1.common.hw, + [CLK_UART2] = &clk_uart2.common.hw, + [CLK_APB_UART2] = &clk_apb_uart2.common.hw, + [CLK_UART3] = &clk_uart3.common.hw, + [CLK_APB_UART3] = &clk_apb_uart3.common.hw, + [CLK_UART4] = &clk_uart4.common.hw, + [CLK_APB_UART4] = &clk_apb_uart4.common.hw, + [CLK_APB_I2S0] = &clk_apb_i2s0.common.hw, + [CLK_APB_I2S1] = &clk_apb_i2s1.common.hw, + [CLK_APB_I2S2] = &clk_apb_i2s2.common.hw, + [CLK_APB_I2S3] = &clk_apb_i2s3.common.hw, + [CLK_AXI4_USB] = &clk_axi4_usb.common.hw, + [CLK_APB_USB] = &clk_apb_usb.common.hw, + [CLK_USB_125M] = &clk_usb_125m.div.common.hw, + [CLK_USB_33K] = &clk_usb_33k.common.hw, + [CLK_USB_12M] = &clk_usb_12m.div.common.hw, + [CLK_AXI4] = &clk_axi4.mux.common.hw, + [CLK_AXI6] = &clk_axi6.div.common.hw, + [CLK_DSI_ESC] = &clk_dsi_esc.div.common.hw, + [CLK_AXI_VIP] = &clk_axi_vip.mux.common.hw, + [CLK_SRC_VIP_SYS_0] = &clk_src_vip_sys_0.mux.common.hw, + [CLK_SRC_VIP_SYS_1] = &clk_src_vip_sys_1.mux.common.hw, + [CLK_SRC_VIP_SYS_2] = &clk_src_vip_sys_2.mux.common.hw, + [CLK_SRC_VIP_SYS_3] = &clk_src_vip_sys_3.mux.common.hw, + [CLK_SRC_VIP_SYS_4] = &clk_src_vip_sys_4.mux.common.hw, + [CLK_CSI_BE_VIP] = &clk_csi_be_vip.common.hw, + [CLK_CSI_MAC0_VIP] = &clk_csi_mac0_vip.common.hw, + [CLK_CSI_MAC1_VIP] = &clk_csi_mac1_vip.common.hw, + [CLK_CSI_MAC2_VIP] = &clk_csi_mac2_vip.common.hw, + [CLK_CSI0_RX_VIP] = &clk_csi0_rx_vip.common.hw, + [CLK_CSI1_RX_VIP] = &clk_csi1_rx_vip.common.hw, + [CLK_ISP_TOP_VIP] = &clk_isp_top_vip.common.hw, + [CLK_IMG_D_VIP] = &clk_img_d_vip.common.hw, + [CLK_IMG_V_VIP] = &clk_img_v_vip.common.hw, + [CLK_SC_TOP_VIP] = &clk_sc_top_vip.common.hw, + [CLK_SC_D_VIP] = &clk_sc_d_vip.common.hw, + [CLK_SC_V1_VIP] = &clk_sc_v1_vip.common.hw, + [CLK_SC_V2_VIP] = &clk_sc_v2_vip.common.hw, + [CLK_SC_V3_VIP] = &clk_sc_v3_vip.common.hw, + [CLK_DWA_VIP] = &clk_dwa_vip.common.hw, + [CLK_BT_VIP] = &clk_bt_vip.common.hw, + [CLK_DISP_VIP] = &clk_disp_vip.common.hw, + [CLK_DSI_MAC_VIP] = &clk_dsi_mac_vip.common.hw, + [CLK_LVDS0_VIP] = &clk_lvds0_vip.common.hw, + [CLK_LVDS1_VIP] = &clk_lvds1_vip.common.hw, + [CLK_PAD_VI_VIP] = &clk_pad_vi_vip.common.hw, + [CLK_PAD_VI1_VIP] = &clk_pad_vi1_vip.common.hw, + [CLK_PAD_VI2_VIP] = &clk_pad_vi2_vip.common.hw, + [CLK_CFG_REG_VIP] = &clk_cfg_reg_vip.common.hw, + [CLK_VIP_IP0] = &clk_vip_ip0.common.hw, + [CLK_VIP_IP1] = &clk_vip_ip1.common.hw, + [CLK_VIP_IP2] = &clk_vip_ip2.common.hw, + [CLK_VIP_IP3] = &clk_vip_ip3.common.hw, + [CLK_IVE_VIP] = &clk_ive_vip.common.hw, + [CLK_RAW_VIP] = &clk_raw_vip.common.hw, + [CLK_OSDC_VIP] = &clk_osdc_vip.common.hw, + [CLK_CAM0_VIP] = &clk_cam0_vip.common.hw, + [CLK_AXI_VIDEO_CODEC] = &clk_axi_video_codec.mux.common.hw, + [CLK_VC_SRC0] = &clk_vc_src0.mux.common.hw, + [CLK_VC_SRC1] = &clk_vc_src1.div.common.hw, + [CLK_VC_SRC2] = &clk_vc_src2.div.common.hw, + [CLK_H264C] = &clk_h264c.common.hw, + [CLK_APB_H264C] = &clk_apb_h264c.common.hw, + [CLK_H265C] = &clk_h265c.common.hw, + [CLK_APB_H265C] = &clk_apb_h265c.common.hw, + [CLK_JPEG] = &clk_jpeg.common.hw, + [CLK_APB_JPEG] = &clk_apb_jpeg.common.hw, + [CLK_CAM0] = &clk_cam0.common.hw, + [CLK_CAM1] = &clk_cam1.common.hw, + [CLK_WGN] = &clk_wgn.common.hw, + [CLK_WGN0] = &clk_wgn0.common.hw, + [CLK_WGN1] = &clk_wgn1.common.hw, + [CLK_WGN2] = &clk_wgn2.common.hw, + [CLK_KEYSCAN] = &clk_keyscan.common.hw, + [CLK_CFG_REG_VC] = &clk_cfg_reg_vc.common.hw, + [CLK_C906_0] = &clk_c906_0.common.hw, + [CLK_C906_1] = &clk_c906_1.common.hw, + [CLK_A53] = &clk_a53.common.hw, + [CLK_CPU_AXI0] = &clk_cpu_axi0.div.common.hw, + [CLK_CPU_GIC] = &clk_cpu_gic.div.common.hw, + [CLK_XTAL_AP] = &clk_xtal_ap.common.hw, + [CLK_DISP_SRC_VIP] = &clk_disp_src_vip.div.common.hw, + }, +}; + +static int cv1810_pre_init(struct device *dev, void __iomem *base, + struct cv1800_clk_ctrl *ctrl, + const struct cv1800_clk_desc *desc) +{ + cv18xx_clk_disable_a53(base); + cv18xx_clk_disable_auto_pd(base); + + return 0; +} + +static const struct cv1800_clk_desc cv1810_desc = { + .clks_data = &cv1810_hw_clks, + .pre_init = cv1810_pre_init, +}; + +static int sg2000_pre_init(struct device *dev, void __iomem *base, + struct cv1800_clk_ctrl *ctrl, + const struct cv1800_clk_desc *desc) +{ + cv18xx_clk_disable_auto_pd(base); + + return 0; +} + +static const struct cv1800_clk_desc sg2000_desc = { + .clks_data = &cv1810_hw_clks, + .pre_init = sg2000_pre_init, +}; + +static int cv1800_clk_init_ctrl(struct device *dev, void __iomem *reg, + struct cv1800_clk_ctrl *ctrl, + const struct cv1800_clk_desc *desc) +{ + int i, ret; + + ctrl->desc = desc; + spin_lock_init(&ctrl->lock); + + for (i = 0; i < desc->clks_data->num; i++) { + struct clk_hw *hw = desc->clks_data->hws[i]; + struct cv1800_clk_common *common; + const char *name; + + if (!hw) + continue; + + name = hw->init->name; + + common = hw_to_cv1800_clk_common(hw); + common->base = reg; + common->lock = &ctrl->lock; + + ret = devm_clk_hw_register(dev, hw); + if (ret) { + dev_err(dev, "Couldn't register clock %d - %s\n", + i, name); + return ret; + } + } + + return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, + desc->clks_data); +} + +static int cv1800_clk_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + void __iomem *reg; + int ret; + const struct cv1800_clk_desc *desc; + struct cv1800_clk_ctrl *ctrl; + + reg = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + desc = device_get_match_data(dev); + if (!desc) { + dev_err(dev, "no match data for platform\n"); + return -EINVAL; + } + + ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + + if (desc->pre_init) { + ret = desc->pre_init(dev, reg, ctrl, desc); + if (ret) + return ret; + } + + return cv1800_clk_init_ctrl(dev, reg, ctrl, desc); +} + +static const struct of_device_id cv1800_clk_ids[] = { + { .compatible = "sophgo,cv1800-clk", .data = &cv1800_desc }, + { .compatible = "sophgo,cv1810-clk", .data = &cv1810_desc }, + { .compatible = "sophgo,sg2000-clk", .data = &sg2000_desc }, + { } +}; +MODULE_DEVICE_TABLE(of, cv1800_clk_ids); + +static struct platform_driver cv1800_clk_driver = { + .probe = cv1800_clk_probe, + .driver = { + .name = "cv1800-clk", + .suppress_bind_attrs = true, + .of_match_table = cv1800_clk_ids, + }, +}; +module_platform_driver(cv1800_clk_driver); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/sophgo/clk-cv1800.h b/drivers/clk/sophgo/clk-cv1800.h new file mode 100644 index 0000000000..1e7107b5d0 --- /dev/null +++ b/drivers/clk/sophgo/clk-cv1800.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Inochi Amaoto + */ + +#ifndef _CLK_SOPHGO_CV1800_H_ +#define _CLK_SOPHGO_CV1800_H_ + +#include + +#define CV1800_CLK_MAX (CLK_XTAL_AP + 1) +#define CV1810_CLK_MAX (CLK_DISP_SRC_VIP + 1) + +#define REG_PLL_G2_CTRL 0x800 +#define REG_PLL_G2_STATUS 0x804 +#define REG_MIPIMPLL_CSR 0x808 +#define REG_A0PLL_CSR 0x80C +#define REG_DISPPLL_CSR 0x810 +#define REG_CAM0PLL_CSR 0x814 +#define REG_CAM1PLL_CSR 0x818 +#define REG_PLL_G2_SSC_SYN_CTRL 0x840 +#define REG_A0PLL_SSC_SYN_CTRL 0x850 +#define REG_A0PLL_SSC_SYN_SET 0x854 +#define REG_A0PLL_SSC_SYN_SPAN 0x858 +#define REG_A0PLL_SSC_SYN_STEP 0x85C +#define REG_DISPPLL_SSC_SYN_CTRL 0x860 +#define REG_DISPPLL_SSC_SYN_SET 0x864 +#define REG_DISPPLL_SSC_SYN_SPAN 0x868 +#define REG_DISPPLL_SSC_SYN_STEP 0x86C +#define REG_CAM0PLL_SSC_SYN_CTRL 0x870 +#define REG_CAM0PLL_SSC_SYN_SET 0x874 +#define REG_CAM0PLL_SSC_SYN_SPAN 0x878 +#define REG_CAM0PLL_SSC_SYN_STEP 0x87C +#define REG_CAM1PLL_SSC_SYN_CTRL 0x880 +#define REG_CAM1PLL_SSC_SYN_SET 0x884 +#define REG_CAM1PLL_SSC_SYN_SPAN 0x888 +#define REG_CAM1PLL_SSC_SYN_STEP 0x88C +#define REG_APLL_FRAC_DIV_CTRL 0x890 +#define REG_APLL_FRAC_DIV_M 0x894 +#define REG_APLL_FRAC_DIV_N 0x898 +#define REG_MIPIMPLL_CLK_CSR 0x8A0 +#define REG_A0PLL_CLK_CSR 0x8A4 +#define REG_DISPPLL_CLK_CSR 0x8A8 +#define REG_CAM0PLL_CLK_CSR 0x8AC +#define REG_CAM1PLL_CLK_CSR 0x8B0 +#define REG_CLK_CAM0_SRC_DIV 0x8C0 +#define REG_CLK_CAM1_SRC_DIV 0x8C4 + +/* top_pll_g6 */ +#define REG_PLL_G6_CTRL 0x900 +#define REG_PLL_G6_STATUS 0x904 +#define REG_MPLL_CSR 0x908 +#define REG_TPLL_CSR 0x90C +#define REG_FPLL_CSR 0x910 +#define REG_PLL_G6_SSC_SYN_CTRL 0x940 +#define REG_DPLL_SSC_SYN_CTRL 0x950 +#define REG_DPLL_SSC_SYN_SET 0x954 +#define REG_DPLL_SSC_SYN_SPAN 0x958 +#define REG_DPLL_SSC_SYN_STEP 0x95C +#define REG_MPLL_SSC_SYN_CTRL 0x960 +#define REG_MPLL_SSC_SYN_SET 0x964 +#define REG_MPLL_SSC_SYN_SPAN 0x968 +#define REG_MPLL_SSC_SYN_STEP 0x96C +#define REG_TPLL_SSC_SYN_CTRL 0x970 +#define REG_TPLL_SSC_SYN_SET 0x974 +#define REG_TPLL_SSC_SYN_SPAN 0x978 +#define REG_TPLL_SSC_SYN_STEP 0x97C + +/* clkgen */ +#define REG_CLK_EN_0 0x000 +#define REG_CLK_EN_1 0x004 +#define REG_CLK_EN_2 0x008 +#define REG_CLK_EN_3 0x00C +#define REG_CLK_EN_4 0x010 +#define REG_CLK_SEL_0 0x020 +#define REG_CLK_BYP_0 0x030 +#define REG_CLK_BYP_1 0x034 + +#define REG_DIV_CLK_A53_0 0x040 +#define REG_DIV_CLK_A53_1 0x044 +#define REG_DIV_CLK_CPU_AXI0 0x048 +#define REG_DIV_CLK_CPU_GIC 0x050 +#define REG_DIV_CLK_TPU 0x054 +#define REG_DIV_CLK_EMMC 0x064 +#define REG_DIV_CLK_EMMC_100K 0x06C +#define REG_DIV_CLK_SD0 0x070 +#define REG_DIV_CLK_SD0_100K 0x078 +#define REG_DIV_CLK_SD1 0x07C +#define REG_DIV_CLK_SD1_100K 0x084 +#define REG_DIV_CLK_SPI_NAND 0x088 +#define REG_DIV_CLK_ETH0_500M 0x08C +#define REG_DIV_CLK_ETH1_500M 0x090 +#define REG_DIV_CLK_GPIO_DB 0x094 +#define REG_DIV_CLK_SDMA_AUD0 0x098 +#define REG_DIV_CLK_SDMA_AUD1 0x09C +#define REG_DIV_CLK_SDMA_AUD2 0x0A0 +#define REG_DIV_CLK_SDMA_AUD3 0x0A4 +#define REG_DIV_CLK_CAM0_200 0x0A8 +#define REG_DIV_CLK_AXI4 0x0B8 +#define REG_DIV_CLK_AXI6 0x0BC +#define REG_DIV_CLK_DSI_ESC 0x0C4 +#define REG_DIV_CLK_AXI_VIP 0x0C8 +#define REG_DIV_CLK_SRC_VIP_SYS_0 0x0D0 +#define REG_DIV_CLK_SRC_VIP_SYS_1 0x0D8 +#define REG_DIV_CLK_DISP_SRC_VIP 0x0E0 +#define REG_DIV_CLK_AXI_VIDEO_CODEC 0x0E4 +#define REG_DIV_CLK_VC_SRC0 0x0EC +#define REG_DIV_CLK_1M 0x0FC +#define REG_DIV_CLK_SPI 0x100 +#define REG_DIV_CLK_I2C 0x104 +#define REG_DIV_CLK_SRC_VIP_SYS_2 0x110 +#define REG_DIV_CLK_AUDSRC 0x118 +#define REG_DIV_CLK_PWM_SRC_0 0x120 +#define REG_DIV_CLK_AP_DEBUG 0x128 +#define REG_DIV_CLK_RTCSYS_SRC_0 0x12C +#define REG_DIV_CLK_C906_0_0 0x130 +#define REG_DIV_CLK_C906_0_1 0x134 +#define REG_DIV_CLK_C906_1_0 0x138 +#define REG_DIV_CLK_C906_1_1 0x13C +#define REG_DIV_CLK_SRC_VIP_SYS_3 0x140 +#define REG_DIV_CLK_SRC_VIP_SYS_4 0x144 + +#endif /* _CLK_SOPHGO_CV1800_H_ */ diff --git a/drivers/clk/sophgo/clk-cv18xx-common.c b/drivers/clk/sophgo/clk-cv18xx-common.c new file mode 100644 index 0000000000..cbcdd88f0e --- /dev/null +++ b/drivers/clk/sophgo/clk-cv18xx-common.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2023 Inochi Amaoto + */ + +#include +#include +#include +#include + +#include "clk-cv18xx-common.h" + +int cv1800_clk_setbit(struct cv1800_clk_common *common, + struct cv1800_clk_regbit *field) +{ + u32 mask = BIT(field->shift); + u32 value; + unsigned long flags; + + spin_lock_irqsave(common->lock, flags); + + value = readl(common->base + field->reg); + writel(value | mask, common->base + field->reg); + + spin_unlock_irqrestore(common->lock, flags); + + return 0; +} + +int cv1800_clk_clearbit(struct cv1800_clk_common *common, + struct cv1800_clk_regbit *field) +{ + u32 mask = BIT(field->shift); + u32 value; + unsigned long flags; + + spin_lock_irqsave(common->lock, flags); + + value = readl(common->base + field->reg); + writel(value & ~mask, common->base + field->reg); + + spin_unlock_irqrestore(common->lock, flags); + + return 0; +} + +int cv1800_clk_checkbit(struct cv1800_clk_common *common, + struct cv1800_clk_regbit *field) +{ + return readl(common->base + field->reg) & BIT(field->shift); +} + +#define PLL_LOCK_TIMEOUT_US (200 * 1000) + +void cv1800_clk_wait_for_lock(struct cv1800_clk_common *common, + u32 reg, u32 lock) +{ + void __iomem *addr = common->base + reg; + u32 regval; + + if (!lock) + return; + + WARN_ON(readl_relaxed_poll_timeout(addr, regval, regval & lock, + 100, PLL_LOCK_TIMEOUT_US)); +} diff --git a/drivers/clk/sophgo/clk-cv18xx-common.h b/drivers/clk/sophgo/clk-cv18xx-common.h new file mode 100644 index 0000000000..2bfda02b20 --- /dev/null +++ b/drivers/clk/sophgo/clk-cv18xx-common.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Inochi Amaoto + */ + +#ifndef _CLK_SOPHGO_CV18XX_IP_H_ +#define _CLK_SOPHGO_CV18XX_IP_H_ + +#include +#include +#include + +struct cv1800_clk_common { + void __iomem *base; + spinlock_t *lock; + struct clk_hw hw; + unsigned long features; +}; + +#define CV1800_CLK_COMMON(_name, _parents, _op, _flags) \ + { \ + .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parents, \ + _op, _flags), \ + } + +static inline struct cv1800_clk_common * +hw_to_cv1800_clk_common(struct clk_hw *hw) +{ + return container_of(hw, struct cv1800_clk_common, hw); +} + +struct cv1800_clk_regbit { + u16 reg; + s8 shift; +}; + +struct cv1800_clk_regfield { + u16 reg; + u8 shift; + u8 width; + s16 initval; + unsigned long flags; +}; + +#define CV1800_CLK_BIT(_reg, _shift) \ + { \ + .reg = _reg, \ + .shift = _shift, \ + } + +#define CV1800_CLK_REG(_reg, _shift, _width, _initval, _flags) \ + { \ + .reg = _reg, \ + .shift = _shift, \ + .width = _width, \ + .initval = _initval, \ + .flags = _flags, \ + } + +#define cv1800_clk_regfield_genmask(_reg) \ + GENMASK((_reg)->shift + (_reg)->width - 1, (_reg)->shift) +#define cv1800_clk_regfield_get(_val, _reg) \ + (((_val) >> (_reg)->shift) & GENMASK((_reg)->width - 1, 0)) +#define cv1800_clk_regfield_set(_val, _new, _reg) \ + (((_val) & ~cv1800_clk_regfield_genmask((_reg))) | \ + (((_new) & GENMASK((_reg)->width - 1, 0)) << (_reg)->shift)) + +#define _CV1800_SET_FIELD(_reg, _val, _field) \ + (((_reg) & ~(_field)) | FIELD_PREP((_field), (_val))) + +int cv1800_clk_setbit(struct cv1800_clk_common *common, + struct cv1800_clk_regbit *field); +int cv1800_clk_clearbit(struct cv1800_clk_common *common, + struct cv1800_clk_regbit *field); +int cv1800_clk_checkbit(struct cv1800_clk_common *common, + struct cv1800_clk_regbit *field); + +void cv1800_clk_wait_for_lock(struct cv1800_clk_common *common, + u32 reg, u32 lock); + +#endif // _CLK_SOPHGO_CV18XX_IP_H_ diff --git a/drivers/clk/sophgo/clk-cv18xx-ip.c b/drivers/clk/sophgo/clk-cv18xx-ip.c new file mode 100644 index 0000000000..805f561725 --- /dev/null +++ b/drivers/clk/sophgo/clk-cv18xx-ip.c @@ -0,0 +1,887 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2023 Inochi Amaoto + */ + +#include +#include +#include +#include + +#include "clk-cv18xx-ip.h" + +/* GATE */ +static inline struct cv1800_clk_gate *hw_to_cv1800_clk_gate(struct clk_hw *hw) +{ + struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw); + + return container_of(common, struct cv1800_clk_gate, common); +} + +static int gate_enable(struct clk_hw *hw) +{ + struct cv1800_clk_gate *gate = hw_to_cv1800_clk_gate(hw); + + return cv1800_clk_setbit(&gate->common, &gate->gate); +} + +static void gate_disable(struct clk_hw *hw) +{ + struct cv1800_clk_gate *gate = hw_to_cv1800_clk_gate(hw); + + cv1800_clk_clearbit(&gate->common, &gate->gate); +} + +static int gate_is_enabled(struct clk_hw *hw) +{ + struct cv1800_clk_gate *gate = hw_to_cv1800_clk_gate(hw); + + return cv1800_clk_checkbit(&gate->common, &gate->gate); +} + +static unsigned long gate_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return parent_rate; +} + +static long gate_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + return *parent_rate; +} + +static int gate_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + return 0; +} + +const struct clk_ops cv1800_clk_gate_ops = { + .disable = gate_disable, + .enable = gate_enable, + .is_enabled = gate_is_enabled, + + .recalc_rate = gate_recalc_rate, + .round_rate = gate_round_rate, + .set_rate = gate_set_rate, +}; + +/* DIV */ +#define _DIV_EN_CLK_DIV_FACTOR_FIELD BIT(3) + +#define DIV_GET_EN_CLK_DIV_FACTOR(_reg) \ + FIELD_GET(_DIV_EN_CLK_DIV_FACTOR_FIELD, _reg) + +#define DIV_SET_EN_DIV_FACTOR(_reg) \ + _CV1800_SET_FIELD(_reg, 1, _DIV_EN_CLK_DIV_FACTOR_FIELD) + +static inline struct cv1800_clk_div *hw_to_cv1800_clk_div(struct clk_hw *hw) +{ + struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw); + + return container_of(common, struct cv1800_clk_div, common); +} + +static int div_enable(struct clk_hw *hw) +{ + struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw); + + return cv1800_clk_setbit(&div->common, &div->gate); +} + +static void div_disable(struct clk_hw *hw) +{ + struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw); + + cv1800_clk_clearbit(&div->common, &div->gate); +} + +static int div_is_enabled(struct clk_hw *hw) +{ + struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw); + + return cv1800_clk_checkbit(&div->common, &div->gate); +} + +static int div_helper_set_rate(struct cv1800_clk_common *common, + struct cv1800_clk_regfield *div, + unsigned long val) +{ + unsigned long flags; + u32 reg; + + if (div->width == 0) + return 0; + + spin_lock_irqsave(common->lock, flags); + + reg = readl(common->base + div->reg); + reg = cv1800_clk_regfield_set(reg, val, div); + if (div->initval > 0) + reg = DIV_SET_EN_DIV_FACTOR(reg); + + writel(reg, common->base + div->reg); + + spin_unlock_irqrestore(common->lock, flags); + + return 0; +} + +static u32 div_helper_get_clockdiv(struct cv1800_clk_common *common, + struct cv1800_clk_regfield *div) +{ + u32 clockdiv = 1; + u32 reg; + + if (!div || div->initval < 0 || (div->width == 0 && div->initval <= 0)) + return 1; + + if (div->width == 0 && div->initval > 0) + return div->initval; + + reg = readl(common->base + div->reg); + + if (div->initval == 0 || DIV_GET_EN_CLK_DIV_FACTOR(reg)) + clockdiv = cv1800_clk_regfield_get(reg, div); + else if (div->initval > 0) + clockdiv = div->initval; + + return clockdiv; +} + +static u32 div_helper_round_rate(struct cv1800_clk_regfield *div, + struct clk_hw *hw, struct clk_hw *parent, + unsigned long rate, unsigned long *prate) +{ + if (div->width == 0) { + if (div->initval <= 0) + return DIV_ROUND_UP_ULL(*prate, 1); + else + return DIV_ROUND_UP_ULL(*prate, div->initval); + } + + return divider_round_rate_parent(hw, parent, rate, prate, NULL, + div->width, div->flags); +} + +static long div_round_rate(struct clk_hw *parent, unsigned long *parent_rate, + unsigned long rate, int id, void *data) +{ + struct cv1800_clk_div *div = data; + + return div_helper_round_rate(&div->div, &div->common.hw, parent, + rate, parent_rate); +} + +static bool div_is_better_rate(struct cv1800_clk_common *common, + unsigned long target, unsigned long now, + unsigned long best) +{ + if (common->features & CLK_DIVIDER_ROUND_CLOSEST) + return abs_diff(target, now) < abs_diff(target, best); + + return now <= target && now > best; +} + +static int mux_helper_determine_rate(struct cv1800_clk_common *common, + struct clk_rate_request *req, + long (*round)(struct clk_hw *, + unsigned long *, + unsigned long, + int, + void *), + void *data) +{ + unsigned long best_parent_rate = 0, best_rate = 0; + struct clk_hw *best_parent, *hw = &common->hw; + unsigned int i; + + if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) { + unsigned long adj_parent_rate; + + best_parent = clk_hw_get_parent(hw); + best_parent_rate = clk_hw_get_rate(best_parent); + + best_rate = round(best_parent, &adj_parent_rate, + req->rate, -1, data); + + goto find; + } + + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { + unsigned long tmp_rate, parent_rate; + struct clk_hw *parent; + + parent = clk_hw_get_parent_by_index(hw, i); + if (!parent) + continue; + + parent_rate = clk_hw_get_rate(parent); + + tmp_rate = round(parent, &parent_rate, req->rate, i, data); + + if (tmp_rate == req->rate) { + best_parent = parent; + best_parent_rate = parent_rate; + best_rate = tmp_rate; + goto find; + } + + if (div_is_better_rate(common, req->rate, + tmp_rate, best_rate)) { + best_parent = parent; + best_parent_rate = parent_rate; + best_rate = tmp_rate; + } + } + + if (best_rate == 0) + return -EINVAL; + +find: + req->best_parent_hw = best_parent; + req->best_parent_rate = best_parent_rate; + req->rate = best_rate; + return 0; +} + +static int div_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw); + + return mux_helper_determine_rate(&div->common, req, + div_round_rate, div); +} + +static unsigned long div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw); + unsigned long val; + + val = div_helper_get_clockdiv(&div->common, &div->div); + if (val == 0) + return 0; + + return divider_recalc_rate(hw, parent_rate, val, NULL, + div->div.flags, div->div.width); +} + +static int div_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw); + unsigned long val; + + val = divider_get_val(rate, parent_rate, NULL, + div->div.width, div->div.flags); + + return div_helper_set_rate(&div->common, &div->div, val); +} + +const struct clk_ops cv1800_clk_div_ops = { + .disable = div_disable, + .enable = div_enable, + .is_enabled = div_is_enabled, + + .determine_rate = div_determine_rate, + .recalc_rate = div_recalc_rate, + .set_rate = div_set_rate, +}; + +static inline struct cv1800_clk_bypass_div * +hw_to_cv1800_clk_bypass_div(struct clk_hw *hw) +{ + struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw); + + return container_of(div, struct cv1800_clk_bypass_div, div); +} + +static long bypass_div_round_rate(struct clk_hw *parent, + unsigned long *parent_rate, + unsigned long rate, int id, void *data) +{ + struct cv1800_clk_bypass_div *div = data; + + if (id == -1) { + if (cv1800_clk_checkbit(&div->div.common, &div->bypass)) + return *parent_rate; + else + return div_round_rate(parent, parent_rate, rate, + -1, &div->div); + } + + if (id == 0) + return *parent_rate; + + return div_round_rate(parent, parent_rate, rate, id - 1, &div->div); +} + +static int bypass_div_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct cv1800_clk_bypass_div *div = hw_to_cv1800_clk_bypass_div(hw); + + return mux_helper_determine_rate(&div->div.common, req, + bypass_div_round_rate, div); +} + +static unsigned long bypass_div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct cv1800_clk_bypass_div *div = hw_to_cv1800_clk_bypass_div(hw); + + if (cv1800_clk_checkbit(&div->div.common, &div->bypass)) + return parent_rate; + + return div_recalc_rate(hw, parent_rate); +} + +static int bypass_div_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct cv1800_clk_bypass_div *div = hw_to_cv1800_clk_bypass_div(hw); + + if (cv1800_clk_checkbit(&div->div.common, &div->bypass)) + return 0; + + return div_set_rate(hw, rate, parent_rate); +} + +static u8 bypass_div_get_parent(struct clk_hw *hw) +{ + struct cv1800_clk_bypass_div *div = hw_to_cv1800_clk_bypass_div(hw); + + if (cv1800_clk_checkbit(&div->div.common, &div->bypass)) + return 0; + + return 1; +} + +static int bypass_div_set_parent(struct clk_hw *hw, u8 index) +{ + struct cv1800_clk_bypass_div *div = hw_to_cv1800_clk_bypass_div(hw); + + if (index) + return cv1800_clk_clearbit(&div->div.common, &div->bypass); + + return cv1800_clk_setbit(&div->div.common, &div->bypass); +} + +const struct clk_ops cv1800_clk_bypass_div_ops = { + .disable = div_disable, + .enable = div_enable, + .is_enabled = div_is_enabled, + + .determine_rate = bypass_div_determine_rate, + .recalc_rate = bypass_div_recalc_rate, + .set_rate = bypass_div_set_rate, + + .set_parent = bypass_div_set_parent, + .get_parent = bypass_div_get_parent, +}; + +/* MUX */ +static inline struct cv1800_clk_mux *hw_to_cv1800_clk_mux(struct clk_hw *hw) +{ + struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw); + + return container_of(common, struct cv1800_clk_mux, common); +} + +static int mux_enable(struct clk_hw *hw) +{ + struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); + + return cv1800_clk_setbit(&mux->common, &mux->gate); +} + +static void mux_disable(struct clk_hw *hw) +{ + struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); + + cv1800_clk_clearbit(&mux->common, &mux->gate); +} + +static int mux_is_enabled(struct clk_hw *hw) +{ + struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); + + return cv1800_clk_checkbit(&mux->common, &mux->gate); +} + +static long mux_round_rate(struct clk_hw *parent, unsigned long *parent_rate, + unsigned long rate, int id, void *data) +{ + struct cv1800_clk_mux *mux = data; + + return div_helper_round_rate(&mux->div, &mux->common.hw, parent, + rate, parent_rate); +} + +static int mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); + + return mux_helper_determine_rate(&mux->common, req, + mux_round_rate, mux); +} + +static unsigned long mux_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); + unsigned long val; + + val = div_helper_get_clockdiv(&mux->common, &mux->div); + if (val == 0) + return 0; + + return divider_recalc_rate(hw, parent_rate, val, NULL, + mux->div.flags, mux->div.width); +} + +static int mux_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); + unsigned long val; + + val = divider_get_val(rate, parent_rate, NULL, + mux->div.width, mux->div.flags); + + return div_helper_set_rate(&mux->common, &mux->div, val); +} + +static u8 mux_get_parent(struct clk_hw *hw) +{ + struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); + u32 reg = readl(mux->common.base + mux->mux.reg); + + return cv1800_clk_regfield_get(reg, &mux->mux); +} + +static int _mux_set_parent(struct cv1800_clk_mux *mux, u8 index) +{ + u32 reg; + + reg = readl(mux->common.base + mux->mux.reg); + reg = cv1800_clk_regfield_set(reg, index, &mux->mux); + writel(reg, mux->common.base + mux->mux.reg); + + return 0; +} + +static int mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); + unsigned long flags; + + spin_lock_irqsave(mux->common.lock, flags); + + _mux_set_parent(mux, index); + + spin_unlock_irqrestore(mux->common.lock, flags); + + return 0; +} + +const struct clk_ops cv1800_clk_mux_ops = { + .disable = mux_disable, + .enable = mux_enable, + .is_enabled = mux_is_enabled, + + .determine_rate = mux_determine_rate, + .recalc_rate = mux_recalc_rate, + .set_rate = mux_set_rate, + + .set_parent = mux_set_parent, + .get_parent = mux_get_parent, +}; + +static inline struct cv1800_clk_bypass_mux * +hw_to_cv1800_clk_bypass_mux(struct clk_hw *hw) +{ + struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); + + return container_of(mux, struct cv1800_clk_bypass_mux, mux); +} + +static long bypass_mux_round_rate(struct clk_hw *parent, + unsigned long *parent_rate, + unsigned long rate, int id, void *data) +{ + struct cv1800_clk_bypass_mux *mux = data; + + if (id == -1) { + if (cv1800_clk_checkbit(&mux->mux.common, &mux->bypass)) + return *parent_rate; + else + return mux_round_rate(parent, parent_rate, rate, + -1, &mux->mux); + } + + if (id == 0) + return *parent_rate; + + return mux_round_rate(parent, parent_rate, rate, id - 1, &mux->mux); +} + +static int bypass_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct cv1800_clk_bypass_mux *mux = hw_to_cv1800_clk_bypass_mux(hw); + + return mux_helper_determine_rate(&mux->mux.common, req, + bypass_mux_round_rate, mux); +} + +static unsigned long bypass_mux_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct cv1800_clk_bypass_mux *mux = hw_to_cv1800_clk_bypass_mux(hw); + + if (cv1800_clk_checkbit(&mux->mux.common, &mux->bypass)) + return parent_rate; + + return mux_recalc_rate(hw, parent_rate); +} + +static int bypass_mux_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct cv1800_clk_bypass_mux *mux = hw_to_cv1800_clk_bypass_mux(hw); + + if (cv1800_clk_checkbit(&mux->mux.common, &mux->bypass)) + return 0; + + return mux_set_rate(hw, rate, parent_rate); +} + +static u8 bypass_mux_get_parent(struct clk_hw *hw) +{ + struct cv1800_clk_bypass_mux *mux = hw_to_cv1800_clk_bypass_mux(hw); + + if (cv1800_clk_checkbit(&mux->mux.common, &mux->bypass)) + return 0; + + return mux_get_parent(hw) + 1; +} + +static int bypass_mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct cv1800_clk_bypass_mux *mux = hw_to_cv1800_clk_bypass_mux(hw); + + if (index == 0) + return cv1800_clk_setbit(&mux->mux.common, &mux->bypass); + + return cv1800_clk_clearbit(&mux->mux.common, &mux->bypass); +} + +const struct clk_ops cv1800_clk_bypass_mux_ops = { + .disable = mux_disable, + .enable = mux_enable, + .is_enabled = mux_is_enabled, + + .determine_rate = bypass_mux_determine_rate, + .recalc_rate = bypass_mux_recalc_rate, + .set_rate = bypass_mux_set_rate, + + .set_parent = bypass_mux_set_parent, + .get_parent = bypass_mux_get_parent, +}; + +/* MMUX */ +static inline struct cv1800_clk_mmux *hw_to_cv1800_clk_mmux(struct clk_hw *hw) +{ + struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw); + + return container_of(common, struct cv1800_clk_mmux, common); +} + +static u8 mmux_get_parent_id(struct cv1800_clk_mmux *mmux) +{ + struct clk_hw *hw = &mmux->common.hw; + struct clk_hw *parent = clk_hw_get_parent(hw); + unsigned int i; + + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { + if (parent == clk_hw_get_parent_by_index(hw, i)) + return i; + } + + unreachable(); +} + +static int mmux_enable(struct clk_hw *hw) +{ + struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); + + return cv1800_clk_setbit(&mmux->common, &mmux->gate); +} + +static void mmux_disable(struct clk_hw *hw) +{ + struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); + + cv1800_clk_clearbit(&mmux->common, &mmux->gate); +} + +static int mmux_is_enabled(struct clk_hw *hw) +{ + struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); + + return cv1800_clk_checkbit(&mmux->common, &mmux->gate); +} + +static long mmux_round_rate(struct clk_hw *parent, unsigned long *parent_rate, + unsigned long rate, int id, void *data) +{ + struct cv1800_clk_mmux *mmux = data; + s8 div_id; + + if (id == -1) { + if (cv1800_clk_checkbit(&mmux->common, &mmux->bypass)) + return *parent_rate; + + id = mmux_get_parent_id(mmux); + } + + div_id = mmux->parent2sel[id]; + + if (div_id < 0) + return *parent_rate; + + return div_helper_round_rate(&mmux->div[div_id], + &mmux->common.hw, parent, + rate, parent_rate); +} + +static int mmux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); + + return mux_helper_determine_rate(&mmux->common, req, + mmux_round_rate, mmux); +} + +static unsigned long mmux_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); + unsigned long val; + struct cv1800_clk_regfield *div; + + if (cv1800_clk_checkbit(&mmux->common, &mmux->bypass)) + return parent_rate; + + if (cv1800_clk_checkbit(&mmux->common, &mmux->clk_sel)) + div = &mmux->div[0]; + else + div = &mmux->div[1]; + + val = div_helper_get_clockdiv(&mmux->common, div); + if (val == 0) + return 0; + + return divider_recalc_rate(hw, parent_rate, val, NULL, + div->flags, div->width); +} + +static int mmux_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); + struct cv1800_clk_regfield *div; + unsigned long val; + + if (cv1800_clk_checkbit(&mmux->common, &mmux->bypass)) + return parent_rate; + + if (cv1800_clk_checkbit(&mmux->common, &mmux->clk_sel)) + div = &mmux->div[0]; + else + div = &mmux->div[1]; + + val = divider_get_val(rate, parent_rate, NULL, + div->width, div->flags); + + return div_helper_set_rate(&mmux->common, div, val); +} + +static u8 mmux_get_parent(struct clk_hw *hw) +{ + struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); + struct cv1800_clk_regfield *mux; + u32 reg; + s8 clk_sel; + + if (cv1800_clk_checkbit(&mmux->common, &mmux->bypass)) + return 0; + + if (cv1800_clk_checkbit(&mmux->common, &mmux->clk_sel)) + clk_sel = 0; + else + clk_sel = 1; + mux = &mmux->mux[clk_sel]; + + reg = readl(mmux->common.base + mux->reg); + + return mmux->sel2parent[clk_sel][cv1800_clk_regfield_get(reg, mux)]; +} + +static int mmux_set_parent(struct clk_hw *hw, u8 index) +{ + struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); + struct cv1800_clk_regfield *mux; + unsigned long flags; + u32 reg; + s8 clk_sel = mmux->parent2sel[index]; + + if (index == 0 || clk_sel == -1) { + cv1800_clk_setbit(&mmux->common, &mmux->bypass); + goto release; + } + + cv1800_clk_clearbit(&mmux->common, &mmux->bypass); + + if (clk_sel) + cv1800_clk_clearbit(&mmux->common, &mmux->clk_sel); + else + cv1800_clk_setbit(&mmux->common, &mmux->clk_sel); + + spin_lock_irqsave(mmux->common.lock, flags); + + mux = &mmux->mux[clk_sel]; + reg = readl(mmux->common.base + mux->reg); + reg = cv1800_clk_regfield_set(reg, index, mux); + + writel(reg, mmux->common.base + mux->reg); + + spin_unlock_irqrestore(mmux->common.lock, flags); + +release: + return 0; +} + +const struct clk_ops cv1800_clk_mmux_ops = { + .disable = mmux_disable, + .enable = mmux_enable, + .is_enabled = mmux_is_enabled, + + .determine_rate = mmux_determine_rate, + .recalc_rate = mmux_recalc_rate, + .set_rate = mmux_set_rate, + + .set_parent = mmux_set_parent, + .get_parent = mmux_get_parent, +}; + +/* AUDIO CLK */ +static inline struct cv1800_clk_audio * +hw_to_cv1800_clk_audio(struct clk_hw *hw) +{ + struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw); + + return container_of(common, struct cv1800_clk_audio, common); +} + +static int aclk_enable(struct clk_hw *hw) +{ + struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw); + + cv1800_clk_setbit(&aclk->common, &aclk->src_en); + return cv1800_clk_setbit(&aclk->common, &aclk->output_en); +} + +static void aclk_disable(struct clk_hw *hw) +{ + struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw); + + cv1800_clk_clearbit(&aclk->common, &aclk->output_en); + cv1800_clk_clearbit(&aclk->common, &aclk->src_en); +} + +static int aclk_is_enabled(struct clk_hw *hw) +{ + struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw); + + return cv1800_clk_checkbit(&aclk->common, &aclk->output_en); +} + +static int aclk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw); + + req->rate = aclk->target_rate; + + return 0; +} + +static unsigned long aclk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw); + u64 rate = parent_rate; + u64 factor = 2; + u32 regval; + + if (!cv1800_clk_checkbit(&aclk->common, &aclk->div_en)) + return 0; + + regval = readl(aclk->common.base + aclk->m.reg); + factor *= cv1800_clk_regfield_get(regval, &aclk->m); + + regval = readl(aclk->common.base + aclk->n.reg); + rate *= cv1800_clk_regfield_get(regval, &aclk->n); + + return DIV64_U64_ROUND_UP(rate, factor); +} + +static void aclk_determine_mn(unsigned long parent_rate, unsigned long rate, + u32 *m, u32 *n) +{ + u32 tm = parent_rate / 2; + u32 tn = rate; + u32 tcommon = gcd(tm, tn); + *m = tm / tcommon; + *n = tn / tcommon; +} + +static int aclk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw); + unsigned long flags; + u32 m, n; + + aclk_determine_mn(parent_rate, rate, + &m, &n); + + spin_lock_irqsave(aclk->common.lock, flags); + + writel(m, aclk->common.base + aclk->m.reg); + writel(n, aclk->common.base + aclk->n.reg); + + cv1800_clk_setbit(&aclk->common, &aclk->div_en); + cv1800_clk_setbit(&aclk->common, &aclk->div_up); + + spin_unlock_irqrestore(aclk->common.lock, flags); + + return 0; +} + +const struct clk_ops cv1800_clk_audio_ops = { + .disable = aclk_disable, + .enable = aclk_enable, + .is_enabled = aclk_is_enabled, + + .determine_rate = aclk_determine_rate, + .recalc_rate = aclk_recalc_rate, + .set_rate = aclk_set_rate, +}; diff --git a/drivers/clk/sophgo/clk-cv18xx-ip.h b/drivers/clk/sophgo/clk-cv18xx-ip.h new file mode 100644 index 0000000000..b37ba42bfd --- /dev/null +++ b/drivers/clk/sophgo/clk-cv18xx-ip.h @@ -0,0 +1,261 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Inochi Amaoto + */ + +#ifndef _CLK_SOPHGO_CV1800_IP_H_ +#define _CLK_SOPHGO_CV1800_IP_H_ + +#include "clk-cv18xx-common.h" + +struct cv1800_clk_gate { + struct cv1800_clk_common common; + struct cv1800_clk_regbit gate; +}; + +struct cv1800_clk_div_data { + u32 reg; + u32 mask; + u32 width; + u32 init; + u32 flags; +}; + +struct cv1800_clk_div { + struct cv1800_clk_common common; + struct cv1800_clk_regbit gate; + struct cv1800_clk_regfield div; +}; + +struct cv1800_clk_bypass_div { + struct cv1800_clk_div div; + struct cv1800_clk_regbit bypass; +}; + +struct cv1800_clk_mux { + struct cv1800_clk_common common; + struct cv1800_clk_regbit gate; + struct cv1800_clk_regfield div; + struct cv1800_clk_regfield mux; +}; + +struct cv1800_clk_bypass_mux { + struct cv1800_clk_mux mux; + struct cv1800_clk_regbit bypass; +}; + +struct cv1800_clk_mmux { + struct cv1800_clk_common common; + struct cv1800_clk_regbit gate; + struct cv1800_clk_regfield div[2]; + struct cv1800_clk_regfield mux[2]; + struct cv1800_clk_regbit bypass; + struct cv1800_clk_regbit clk_sel; + const s8 *parent2sel; + const u8 *sel2parent[2]; +}; + +struct cv1800_clk_audio { + struct cv1800_clk_common common; + struct cv1800_clk_regbit src_en; + struct cv1800_clk_regbit output_en; + struct cv1800_clk_regbit div_en; + struct cv1800_clk_regbit div_up; + struct cv1800_clk_regfield m; + struct cv1800_clk_regfield n; + u32 target_rate; +}; + +#define CV1800_GATE(_name, _parent, _gate_reg, _gate_shift, _flags) \ + struct cv1800_clk_gate _name = { \ + .common = CV1800_CLK_COMMON(#_name, _parent, \ + &cv1800_clk_gate_ops, \ + _flags), \ + .gate = CV1800_CLK_BIT(_gate_reg, _gate_shift), \ + } + +#define _CV1800_DIV(_name, _parent, _gate_reg, _gate_shift, \ + _div_reg, _div_shift, _div_width, _div_init, \ + _div_flag, _ops, _flags) \ + { \ + .common = CV1800_CLK_COMMON(#_name, _parent, \ + _ops, _flags), \ + .gate = CV1800_CLK_BIT(_gate_reg, \ + _gate_shift), \ + .div = CV1800_CLK_REG(_div_reg, _div_shift, \ + _div_width, _div_init, \ + _div_flag), \ + } + +#define _CV1800_FIXED_DIV_FLAG \ + (CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST) + +#define _CV1800_FIXED_DIV(_name, _parent, _gate_reg, _gate_shift, \ + _fix_div, _ops, _flags) \ + { \ + .common = CV1800_CLK_COMMON(#_name, _parent, \ + _ops, _flags), \ + .gate = CV1800_CLK_BIT(_gate_reg, \ + _gate_shift), \ + .div = CV1800_CLK_REG(0, 0, 0, \ + _fix_div, \ + _CV1800_FIXED_DIV_FLAG),\ + } + +#define CV1800_DIV(_name, _parent, _gate_reg, _gate_shift, \ + _div_reg, _div_shift, _div_width, _div_init, \ + _div_flag, _flags) \ + struct cv1800_clk_div _name = \ + _CV1800_DIV(_name, _parent, _gate_reg, _gate_shift, \ + _div_reg, _div_shift, _div_width, _div_init,\ + _div_flag, &cv1800_clk_div_ops, _flags) + +#define CV1800_BYPASS_DIV(_name, _parent, _gate_reg, _gate_shift, \ + _div_reg, _div_shift, _div_width, _div_init, \ + _div_flag, _bypass_reg, _bypass_shift, _flags)\ + struct cv1800_clk_bypass_div _name = { \ + .div = _CV1800_DIV(_name, _parent, \ + _gate_reg, _gate_shift, \ + _div_reg, _div_shift, \ + _div_width, _div_init, _div_flag, \ + &cv1800_clk_bypass_div_ops, \ + _flags), \ + .bypass = CV1800_CLK_BIT(_bypass_reg, _bypass_shift), \ + } + +#define CV1800_FIXED_DIV(_name, _parent, _gate_reg, _gate_shift, \ + _fix_div, _flags) \ + struct cv1800_clk_div _name = \ + _CV1800_FIXED_DIV(_name, _parent, \ + _gate_reg, _gate_shift, \ + _fix_div, \ + &cv1800_clk_div_ops, _flags) \ + +#define CV1800_BYPASS_FIXED_DIV(_name, _parent, _gate_reg, _gate_shift, \ + _fix_div, _bypass_reg, _bypass_shift, \ + _flags) \ + struct cv1800_clk_bypass_div _name = { \ + .div = _CV1800_FIXED_DIV(_name, _parent, \ + _gate_reg, _gate_shift, \ + _fix_div, \ + &cv1800_clk_bypass_div_ops, \ + _flags), \ + .bypass = CV1800_CLK_BIT(_bypass_reg, _bypass_shift), \ + } + +#define _CV1800_MUX(_name, _parent, _gate_reg, _gate_shift, \ + _div_reg, _div_shift, _div_width, _div_init, \ + _div_flag, \ + _mux_reg, _mux_shift, _mux_width, \ + _ops, _flags) \ + { \ + .common = CV1800_CLK_COMMON(#_name, _parent, \ + _ops, _flags), \ + .gate = CV1800_CLK_BIT(_gate_reg, \ + _gate_shift), \ + .div = CV1800_CLK_REG(_div_reg, _div_shift, \ + _div_width, _div_init, \ + _div_flag), \ + .mux = CV1800_CLK_REG(_mux_reg, _mux_shift, \ + _mux_width, 0, 0), \ + } + +#define CV1800_MUX(_name, _parent, _gate_reg, _gate_shift, \ + _div_reg, _div_shift, _div_width, _div_init, \ + _div_flag, \ + _mux_reg, _mux_shift, _mux_width, _flags) \ + struct cv1800_clk_mux _name = \ + _CV1800_MUX(_name, _parent, _gate_reg, _gate_shift, \ + _div_reg, _div_shift, _div_width, _div_init,\ + _div_flag, _mux_reg, _mux_shift, _mux_width,\ + &cv1800_clk_mux_ops, _flags) + +#define CV1800_BYPASS_MUX(_name, _parent, _gate_reg, _gate_shift, \ + _div_reg, _div_shift, _div_width, _div_init, \ + _div_flag, \ + _mux_reg, _mux_shift, _mux_width, \ + _bypass_reg, _bypass_shift, _flags) \ + struct cv1800_clk_bypass_mux _name = { \ + .mux = _CV1800_MUX(_name, _parent, \ + _gate_reg, _gate_shift, \ + _div_reg, _div_shift, _div_width, \ + _div_init, _div_flag, \ + _mux_reg, _mux_shift, _mux_width, \ + &cv1800_clk_bypass_mux_ops, \ + _flags), \ + .bypass = CV1800_CLK_BIT(_bypass_reg, _bypass_shift), \ + } + +#define CV1800_MMUX(_name, _parent, _gate_reg, _gate_shift, \ + _div0_reg, _div0_shift, _div0_width, _div0_init, \ + _div0_flag, \ + _div1_reg, _div1_shift, _div1_width, _div1_init, \ + _div1_flag, \ + _mux0_reg, _mux0_shift, _mux0_width, \ + _mux1_reg, _mux1_shift, _mux1_width, \ + _bypass_reg, _bypass_shift, \ + _clk_sel_reg, _clk_sel_shift, \ + _parent2sel, _sel2parent0, _sel2parent1, _flags) \ + struct cv1800_clk_mmux _name = { \ + .common = CV1800_CLK_COMMON(#_name, _parent, \ + &cv1800_clk_mmux_ops,\ + _flags), \ + .gate = CV1800_CLK_BIT(_gate_reg, _gate_shift),\ + .div = { \ + CV1800_CLK_REG(_div0_reg, _div0_shift, \ + _div0_width, _div0_init, \ + _div0_flag), \ + CV1800_CLK_REG(_div1_reg, _div1_shift, \ + _div1_width, _div1_init, \ + _div1_flag), \ + }, \ + .mux = { \ + CV1800_CLK_REG(_mux0_reg, _mux0_shift, \ + _mux0_width, 0, 0), \ + CV1800_CLK_REG(_mux1_reg, _mux1_shift, \ + _mux1_width, 0, 0), \ + }, \ + .bypass = CV1800_CLK_BIT(_bypass_reg, \ + _bypass_shift), \ + .clk_sel = CV1800_CLK_BIT(_clk_sel_reg, \ + _clk_sel_shift), \ + .parent2sel = _parent2sel, \ + .sel2parent = { _sel2parent0, _sel2parent1 }, \ + } + +#define CV1800_ACLK(_name, _parent, \ + _src_en_reg, _src_en_reg_shift, \ + _output_en_reg, _output_en_shift, \ + _div_en_reg, _div_en_reg_shift, \ + _div_up_reg, _div_up_reg_shift, \ + _m_reg, _m_shift, _m_width, _m_flag, \ + _n_reg, _n_shift, _n_width, _n_flag, \ + _target_rate, _flags) \ + struct cv1800_clk_audio _name = { \ + .common = CV1800_CLK_COMMON(#_name, _parent, \ + &cv1800_clk_audio_ops,\ + _flags), \ + .src_en = CV1800_CLK_BIT(_src_en_reg, \ + _src_en_reg_shift), \ + .output_en = CV1800_CLK_BIT(_output_en_reg, \ + _output_en_shift), \ + .div_en = CV1800_CLK_BIT(_div_en_reg, \ + _div_en_reg_shift), \ + .div_up = CV1800_CLK_BIT(_div_up_reg, \ + _div_up_reg_shift), \ + .m = CV1800_CLK_REG(_m_reg, _m_shift, \ + _m_width, 0, _m_flag), \ + .n = CV1800_CLK_REG(_n_reg, _n_shift, \ + _n_width, 0, _n_flag), \ + .target_rate = _target_rate, \ + } + +extern const struct clk_ops cv1800_clk_gate_ops; +extern const struct clk_ops cv1800_clk_div_ops; +extern const struct clk_ops cv1800_clk_bypass_div_ops; +extern const struct clk_ops cv1800_clk_mux_ops; +extern const struct clk_ops cv1800_clk_bypass_mux_ops; +extern const struct clk_ops cv1800_clk_mmux_ops; +extern const struct clk_ops cv1800_clk_audio_ops; + +#endif // _CLK_SOPHGO_CV1800_IP_H_ diff --git a/drivers/clk/sophgo/clk-cv18xx-pll.c b/drivers/clk/sophgo/clk-cv18xx-pll.c new file mode 100644 index 0000000000..29e24098bf --- /dev/null +++ b/drivers/clk/sophgo/clk-cv18xx-pll.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2023 Inochi Amaoto + */ + +#include +#include +#include +#include + +#include "clk-cv18xx-pll.h" + +static inline struct cv1800_clk_pll *hw_to_cv1800_clk_pll(struct clk_hw *hw) +{ + struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw); + + return container_of(common, struct cv1800_clk_pll, common); +} + +static unsigned long ipll_calc_rate(unsigned long parent_rate, + unsigned long pre_div_sel, + unsigned long div_sel, + unsigned long post_div_sel) +{ + uint64_t rate = parent_rate; + + rate *= div_sel; + do_div(rate, pre_div_sel * post_div_sel); + + return rate; +} + +static unsigned long ipll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw); + u32 value; + + value = readl(pll->common.base + pll->pll_reg); + + return ipll_calc_rate(parent_rate, + PLL_GET_PRE_DIV_SEL(value), + PLL_GET_DIV_SEL(value), + PLL_GET_POST_DIV_SEL(value)); +} + +static int ipll_find_rate(const struct cv1800_clk_pll_limit *limit, + unsigned long prate, unsigned long *rate, + u32 *value) +{ + unsigned long best_rate = 0; + unsigned long trate = *rate; + unsigned long pre_div_sel = 0, div_sel = 0, post_div_sel = 0; + unsigned long pre, div, post; + u32 detected = *value; + unsigned long tmp; + + for_each_pll_limit_range(pre, &limit->pre_div) { + for_each_pll_limit_range(div, &limit->div) { + for_each_pll_limit_range(post, &limit->post_div) { + tmp = ipll_calc_rate(prate, pre, div, post); + + if (tmp > trate) + continue; + + if ((trate - tmp) < (trate - best_rate)) { + best_rate = tmp; + pre_div_sel = pre; + div_sel = div; + post_div_sel = post; + } + } + } + } + + if (best_rate) { + detected = PLL_SET_PRE_DIV_SEL(detected, pre_div_sel); + detected = PLL_SET_POST_DIV_SEL(detected, post_div_sel); + detected = PLL_SET_DIV_SEL(detected, div_sel); + *value = detected; + *rate = best_rate; + return 0; + } + + return -EINVAL; +} + +static int ipll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) +{ + u32 val; + struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw); + + return ipll_find_rate(pll->pll_limit, req->best_parent_rate, + &req->rate, &val); +} + +static void pll_get_mode_ctrl(unsigned long div_sel, + bool (*mode_ctrl_check)(unsigned long, + unsigned long, + unsigned long), + const struct cv1800_clk_pll_limit *limit, + u32 *value) +{ + unsigned long ictrl = 0, mode = 0; + u32 detected = *value; + + for_each_pll_limit_range(mode, &limit->mode) { + for_each_pll_limit_range(ictrl, &limit->ictrl) { + if (mode_ctrl_check(div_sel, ictrl, mode)) { + detected = PLL_SET_SEL_MODE(detected, mode); + detected = PLL_SET_ICTRL(detected, ictrl); + *value = detected; + return; + } + } + } +} + +static bool ipll_check_mode_ctrl_restrict(unsigned long div_sel, + unsigned long ictrl, + unsigned long mode) +{ + unsigned long left_rest = 20 * div_sel; + unsigned long right_rest = 35 * div_sel; + unsigned long test = 184 * (1 + mode) * (1 + ictrl) / 2; + + return test > left_rest && test <= right_rest; +} + +static int ipll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + u32 regval, detected = 0; + unsigned long flags; + struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw); + + ipll_find_rate(pll->pll_limit, parent_rate, &rate, &detected); + pll_get_mode_ctrl(PLL_GET_DIV_SEL(detected), + ipll_check_mode_ctrl_restrict, + pll->pll_limit, &detected); + + spin_lock_irqsave(pll->common.lock, flags); + + regval = readl(pll->common.base + pll->pll_reg); + regval = PLL_COPY_REG(regval, detected); + + writel(regval, pll->common.base + pll->pll_reg); + + spin_unlock_irqrestore(pll->common.lock, flags); + + cv1800_clk_wait_for_lock(&pll->common, pll->pll_status.reg, + BIT(pll->pll_status.shift)); + + return 0; +} + +static int pll_enable(struct clk_hw *hw) +{ + struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw); + + return cv1800_clk_clearbit(&pll->common, &pll->pll_pwd); +} + +static void pll_disable(struct clk_hw *hw) +{ + struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw); + + cv1800_clk_setbit(&pll->common, &pll->pll_pwd); +} + +static int pll_is_enable(struct clk_hw *hw) +{ + struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw); + + return cv1800_clk_checkbit(&pll->common, &pll->pll_pwd) == 0; +} + +const struct clk_ops cv1800_clk_ipll_ops = { + .disable = pll_disable, + .enable = pll_enable, + .is_enabled = pll_is_enable, + + .recalc_rate = ipll_recalc_rate, + .determine_rate = ipll_determine_rate, + .set_rate = ipll_set_rate, +}; + +#define PLL_SYN_FACTOR_DOT_POS 26 +#define PLL_SYN_FACTOR_MINIMUM ((4 << PLL_SYN_FACTOR_DOT_POS) + 1) + +static bool fpll_is_factional_mode(struct cv1800_clk_pll *pll) +{ + return cv1800_clk_checkbit(&pll->common, &pll->pll_syn->en); +} + +static unsigned long fpll_calc_rate(unsigned long parent_rate, + unsigned long pre_div_sel, + unsigned long div_sel, + unsigned long post_div_sel, + unsigned long ssc_syn_set, + bool is_full_parent) +{ + u64 dividend = parent_rate * div_sel; + u64 factor = ssc_syn_set * pre_div_sel * post_div_sel; + unsigned long rate; + + dividend <<= PLL_SYN_FACTOR_DOT_POS - 1; + rate = div64_u64_rem(dividend, factor, ÷nd); + + if (is_full_parent) { + dividend <<= 1; + rate <<= 1; + } + + rate += DIV64_U64_ROUND_CLOSEST(dividend, factor); + + return rate; +} + +static unsigned long fpll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw); + u32 value; + bool clk_full; + u32 syn_set; + + if (!fpll_is_factional_mode(pll)) + return ipll_recalc_rate(hw, parent_rate); + + syn_set = readl(pll->common.base + pll->pll_syn->set); + + if (syn_set == 0) + return 0; + + clk_full = cv1800_clk_checkbit(&pll->common, + &pll->pll_syn->clk_half); + + value = readl(pll->common.base + pll->pll_reg); + + return fpll_calc_rate(parent_rate, + PLL_GET_PRE_DIV_SEL(value), + PLL_GET_DIV_SEL(value), + PLL_GET_POST_DIV_SEL(value), + syn_set, clk_full); +} + +static unsigned long fpll_find_synthesizer(unsigned long parent, + unsigned long rate, + unsigned long pre_div, + unsigned long div, + unsigned long post_div, + bool is_full_parent, + u32 *ssc_syn_set) +{ + u32 test_max = U32_MAX, test_min = PLL_SYN_FACTOR_MINIMUM; + unsigned long trate; + + while (test_min < test_max) { + u32 tssc = (test_max + test_min) / 2; + + trate = fpll_calc_rate(parent, pre_div, div, post_div, + tssc, is_full_parent); + + if (trate == rate) { + test_min = tssc; + break; + } + + if (trate > rate) + test_min = tssc + 1; + else + test_max = tssc - 1; + } + + if (trate != 0) + *ssc_syn_set = test_min; + + return trate; +} + +static int fpll_find_rate(struct cv1800_clk_pll *pll, + const struct cv1800_clk_pll_limit *limit, + unsigned long prate, + unsigned long *rate, + u32 *value, u32 *ssc_syn_set) +{ + unsigned long best_rate = 0; + unsigned long pre_div_sel = 0, div_sel = 0, post_div_sel = 0; + unsigned long pre, div, post; + unsigned long trate = *rate; + u32 detected = *value; + unsigned long tmp; + bool clk_full = cv1800_clk_checkbit(&pll->common, + &pll->pll_syn->clk_half); + + for_each_pll_limit_range(pre, &limit->pre_div) { + for_each_pll_limit_range(post, &limit->post_div) { + for_each_pll_limit_range(div, &limit->div) { + tmp = fpll_find_synthesizer(prate, trate, + pre, div, post, + clk_full, + ssc_syn_set); + + if ((trate - tmp) < (trate - best_rate)) { + best_rate = tmp; + pre_div_sel = pre; + div_sel = div; + post_div_sel = post; + } + } + } + } + + if (best_rate) { + detected = PLL_SET_PRE_DIV_SEL(detected, pre_div_sel); + detected = PLL_SET_POST_DIV_SEL(detected, post_div_sel); + detected = PLL_SET_DIV_SEL(detected, div_sel); + *value = detected; + *rate = best_rate; + return 0; + } + + return -EINVAL; +} + +static int fpll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) +{ + struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw); + u32 val, ssc_syn_set; + + if (!fpll_is_factional_mode(pll)) + return ipll_determine_rate(hw, req); + + fpll_find_rate(pll, &pll->pll_limit[2], req->best_parent_rate, + &req->rate, &val, &ssc_syn_set); + + return 0; +} + +static bool fpll_check_mode_ctrl_restrict(unsigned long div_sel, + unsigned long ictrl, + unsigned long mode) +{ + unsigned long left_rest = 10 * div_sel; + unsigned long right_rest = 24 * div_sel; + unsigned long test = 184 * (1 + mode) * (1 + ictrl) / 2; + + return test > left_rest && test <= right_rest; +} + +static int fpll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + u32 regval; + u32 detected = 0, detected_ssc = 0; + unsigned long flags; + struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw); + + if (!fpll_is_factional_mode(pll)) + return ipll_set_rate(hw, rate, parent_rate); + + fpll_find_rate(pll, &pll->pll_limit[2], parent_rate, + &rate, &detected, &detected_ssc); + pll_get_mode_ctrl(PLL_GET_DIV_SEL(detected), + fpll_check_mode_ctrl_restrict, + pll->pll_limit, &detected); + + spin_lock_irqsave(pll->common.lock, flags); + + writel(detected_ssc, pll->common.base + pll->pll_syn->set); + + regval = readl(pll->common.base + pll->pll_reg); + regval = PLL_COPY_REG(regval, detected); + + writel(regval, pll->common.base + pll->pll_reg); + + spin_unlock_irqrestore(pll->common.lock, flags); + + cv1800_clk_wait_for_lock(&pll->common, pll->pll_status.reg, + BIT(pll->pll_status.shift)); + + return 0; +} + +static u8 fpll_get_parent(struct clk_hw *hw) +{ + struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw); + + if (fpll_is_factional_mode(pll)) + return 1; + + return 0; +} + +static int fpll_set_parent(struct clk_hw *hw, u8 index) +{ + struct cv1800_clk_pll *pll = hw_to_cv1800_clk_pll(hw); + + if (index) + cv1800_clk_setbit(&pll->common, &pll->pll_syn->en); + else + cv1800_clk_clearbit(&pll->common, &pll->pll_syn->en); + + return 0; +} + +const struct clk_ops cv1800_clk_fpll_ops = { + .disable = pll_disable, + .enable = pll_enable, + .is_enabled = pll_is_enable, + + .recalc_rate = fpll_recalc_rate, + .determine_rate = fpll_determine_rate, + .set_rate = fpll_set_rate, + + .set_parent = fpll_set_parent, + .get_parent = fpll_get_parent, +}; diff --git a/drivers/clk/sophgo/clk-cv18xx-pll.h b/drivers/clk/sophgo/clk-cv18xx-pll.h new file mode 100644 index 0000000000..7a33f3da2d --- /dev/null +++ b/drivers/clk/sophgo/clk-cv18xx-pll.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Inochi Amaoto + */ + +#ifndef _CLK_SOPHGO_CV1800_PLL_H_ +#define _CLK_SOPHGO_CV1800_PLL_H_ + +#include "clk-cv18xx-common.h" + +struct cv1800_clk_pll_limit { + struct { + u8 min; + u8 max; + } pre_div, div, post_div, ictrl, mode; +}; + +#define _CV1800_PLL_LIMIT(_min, _max) \ + { \ + .min = _min, \ + .max = _max, \ + } \ + +#define for_each_pll_limit_range(_var, _restrict) \ + for (_var = (_restrict)->min; _var <= (_restrict)->max; _var++) + +struct cv1800_clk_pll_synthesizer { + struct cv1800_clk_regbit en; + struct cv1800_clk_regbit clk_half; + u32 ctrl; + u32 set; +}; + +#define _PLL_PRE_DIV_SEL_FIELD GENMASK(6, 0) +#define _PLL_POST_DIV_SEL_FIELD GENMASK(14, 8) +#define _PLL_SEL_MODE_FIELD GENMASK(16, 15) +#define _PLL_DIV_SEL_FIELD GENMASK(23, 17) +#define _PLL_ICTRL_FIELD GENMASK(26, 24) + +#define _PLL_ALL_FIELD_MASK \ + (_PLL_PRE_DIV_SEL_FIELD | \ + _PLL_POST_DIV_SEL_FIELD | \ + _PLL_SEL_MODE_FIELD | \ + _PLL_DIV_SEL_FIELD | \ + _PLL_ICTRL_FIELD) + +#define PLL_COPY_REG(_dest, _src) \ + (((_dest) & (~_PLL_ALL_FIELD_MASK)) | ((_src) & _PLL_ALL_FIELD_MASK)) + +#define PLL_GET_PRE_DIV_SEL(_reg) \ + FIELD_GET(_PLL_PRE_DIV_SEL_FIELD, (_reg)) +#define PLL_GET_POST_DIV_SEL(_reg) \ + FIELD_GET(_PLL_POST_DIV_SEL_FIELD, (_reg)) +#define PLL_GET_SEL_MODE(_reg) \ + FIELD_GET(_PLL_SEL_MODE_FIELD, (_reg)) +#define PLL_GET_DIV_SEL(_reg) \ + FIELD_GET(_PLL_DIV_SEL_FIELD, (_reg)) +#define PLL_GET_ICTRL(_reg) \ + FIELD_GET(_PLL_ICTRL_FIELD, (_reg)) + +#define PLL_SET_PRE_DIV_SEL(_reg, _val) \ + _CV1800_SET_FIELD((_reg), (_val), _PLL_PRE_DIV_SEL_FIELD) +#define PLL_SET_POST_DIV_SEL(_reg, _val) \ + _CV1800_SET_FIELD((_reg), (_val), _PLL_POST_DIV_SEL_FIELD) +#define PLL_SET_SEL_MODE(_reg, _val) \ + _CV1800_SET_FIELD((_reg), (_val), _PLL_SEL_MODE_FIELD) +#define PLL_SET_DIV_SEL(_reg, _val) \ + _CV1800_SET_FIELD((_reg), (_val), _PLL_DIV_SEL_FIELD) +#define PLL_SET_ICTRL(_reg, _val) \ + _CV1800_SET_FIELD((_reg), (_val), _PLL_ICTRL_FIELD) + +struct cv1800_clk_pll { + struct cv1800_clk_common common; + u32 pll_reg; + struct cv1800_clk_regbit pll_pwd; + struct cv1800_clk_regbit pll_status; + const struct cv1800_clk_pll_limit *pll_limit; + struct cv1800_clk_pll_synthesizer *pll_syn; +}; + +#define CV1800_INTEGRAL_PLL(_name, _parent, _pll_reg, \ + _pll_pwd_reg, _pll_pwd_shift, \ + _pll_status_reg, _pll_status_shift, \ + _pll_limit, _flags) \ + struct cv1800_clk_pll _name = { \ + .common = CV1800_CLK_COMMON(#_name, _parent, \ + &cv1800_clk_ipll_ops,\ + _flags), \ + .pll_reg = _pll_reg, \ + .pll_pwd = CV1800_CLK_BIT(_pll_pwd_reg, \ + _pll_pwd_shift), \ + .pll_status = CV1800_CLK_BIT(_pll_status_reg, \ + _pll_status_shift), \ + .pll_limit = _pll_limit, \ + .pll_syn = NULL, \ + } + +#define CV1800_FACTIONAL_PLL(_name, _parent, _pll_reg, \ + _pll_pwd_reg, _pll_pwd_shift, \ + _pll_status_reg, _pll_status_shift, \ + _pll_limit, _pll_syn, _flags) \ + struct cv1800_clk_pll _name = { \ + .common = CV1800_CLK_COMMON(#_name, _parent, \ + &cv1800_clk_fpll_ops,\ + _flags), \ + .pll_reg = _pll_reg, \ + .pll_pwd = CV1800_CLK_BIT(_pll_pwd_reg, \ + _pll_pwd_shift), \ + .pll_status = CV1800_CLK_BIT(_pll_status_reg, \ + _pll_status_shift), \ + .pll_limit = _pll_limit, \ + .pll_syn = _pll_syn, \ + } + +extern const struct clk_ops cv1800_clk_ipll_ops; +extern const struct clk_ops cv1800_clk_fpll_ops; + +#endif // _CLK_SOPHGO_CV1800_PLL_H_ diff --git a/drivers/clk/stm32/Kconfig b/drivers/clk/stm32/Kconfig index 3c8493a94a..dca409d526 100644 --- a/drivers/clk/stm32/Kconfig +++ b/drivers/clk/stm32/Kconfig @@ -25,5 +25,12 @@ config COMMON_CLK_STM32MP157 help Support for stm32mp15x SoC family clocks. +config COMMON_CLK_STM32MP257 + bool "Clock driver for stm32mp25x clocks" + depends on ARM64 || COMPILE_TEST + default y + help + Support for stm32mp25x SoC family clocks. + endif diff --git a/drivers/clk/stm32/Makefile b/drivers/clk/stm32/Makefile index 5ced7fe3dd..0a627164fc 100644 --- a/drivers/clk/stm32/Makefile +++ b/drivers/clk/stm32/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_COMMON_CLK_STM32MP135) += clk-stm32mp13.o clk-stm32-core.o reset-stm32.o obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o reset-stm32.o +obj-$(CONFIG_COMMON_CLK_STM32MP257) += clk-stm32mp25.o clk-stm32-core.o reset-stm32.o diff --git a/drivers/clk/stm32/clk-stm32-core.c b/drivers/clk/stm32/clk-stm32-core.c index 58705fcad3..1721a3ed73 100644 --- a/drivers/clk/stm32/clk-stm32-core.c +++ b/drivers/clk/stm32/clk-stm32-core.c @@ -25,7 +25,6 @@ static int stm32_rcc_clock_init(struct device *dev, { const struct stm32_rcc_match_data *data = match->data; struct clk_hw_onecell_data *clk_data = data->hw_clks; - struct device_node *np = dev_of_node(dev); struct clk_hw **hws; int n, max_binding; @@ -64,7 +63,7 @@ static int stm32_rcc_clock_init(struct device *dev, hws[cfg_clock->id] = hw; } - return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); + return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data); } int stm32_rcc_init(struct device *dev, const struct of_device_id *match_data, @@ -638,7 +637,7 @@ struct clk_hw *clk_stm32_mux_register(struct device *dev, mux->lock = lock; mux->clock_data = data->clock_data; - err = clk_hw_register(dev, hw); + err = devm_clk_hw_register(dev, hw); if (err) return ERR_PTR(err); @@ -659,7 +658,7 @@ struct clk_hw *clk_stm32_gate_register(struct device *dev, gate->lock = lock; gate->clock_data = data->clock_data; - err = clk_hw_register(dev, hw); + err = devm_clk_hw_register(dev, hw); if (err) return ERR_PTR(err); @@ -680,7 +679,7 @@ struct clk_hw *clk_stm32_div_register(struct device *dev, div->lock = lock; div->clock_data = data->clock_data; - err = clk_hw_register(dev, hw); + err = devm_clk_hw_register(dev, hw); if (err) return ERR_PTR(err); @@ -701,7 +700,7 @@ struct clk_hw *clk_stm32_composite_register(struct device *dev, composite->lock = lock; composite->clock_data = data->clock_data; - err = clk_hw_register(dev, hw); + err = devm_clk_hw_register(dev, hw); if (err) return ERR_PTR(err); diff --git a/drivers/clk/stm32/clk-stm32mp13.c b/drivers/clk/stm32/clk-stm32mp13.c index d4ecb3c34a..bf81d74917 100644 --- a/drivers/clk/stm32/clk-stm32mp13.c +++ b/drivers/clk/stm32/clk-stm32mp13.c @@ -1536,77 +1536,16 @@ static const struct of_device_id stm32mp13_match_data[] = { }; MODULE_DEVICE_TABLE(of, stm32mp13_match_data); -static int stm32mp1_rcc_init(struct device *dev) -{ - void __iomem *rcc_base; - int ret = -ENOMEM; - - rcc_base = of_iomap(dev_of_node(dev), 0); - if (!rcc_base) { - dev_err(dev, "%pOFn: unable to map resource", dev_of_node(dev)); - goto out; - } - - ret = stm32_rcc_init(dev, stm32mp13_match_data, rcc_base); -out: - if (ret) { - if (rcc_base) - iounmap(rcc_base); - - of_node_put(dev_of_node(dev)); - } - - return ret; -} - -static int get_clock_deps(struct device *dev) -{ - static const char * const clock_deps_name[] = { - "hsi", "hse", "csi", "lsi", "lse", - }; - size_t deps_size = sizeof(struct clk *) * ARRAY_SIZE(clock_deps_name); - struct clk **clk_deps; - int i; - - clk_deps = devm_kzalloc(dev, deps_size, GFP_KERNEL); - if (!clk_deps) - return -ENOMEM; - - for (i = 0; i < ARRAY_SIZE(clock_deps_name); i++) { - struct clk *clk = of_clk_get_by_name(dev_of_node(dev), - clock_deps_name[i]); - - if (IS_ERR(clk)) { - if (PTR_ERR(clk) != -EINVAL && PTR_ERR(clk) != -ENOENT) - return PTR_ERR(clk); - } else { - /* Device gets a reference count on the clock */ - clk_deps[i] = devm_clk_get(dev, __clk_get_name(clk)); - clk_put(clk); - } - } - - return 0; -} - static int stm32mp1_rcc_clocks_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - int ret = get_clock_deps(dev); + void __iomem *base; - if (!ret) - ret = stm32mp1_rcc_init(dev); - - return ret; -} - -static void stm32mp1_rcc_clocks_remove(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct device_node *child, *np = dev_of_node(dev); + base = devm_platform_ioremap_resource(pdev, 0); + if (WARN_ON(IS_ERR(base))) + return PTR_ERR(base); - for_each_available_child_of_node(np, child) - of_clk_del_provider(child); + return stm32_rcc_init(dev, stm32mp13_match_data, base); } static struct platform_driver stm32mp13_rcc_clocks_driver = { @@ -1615,7 +1554,6 @@ static struct platform_driver stm32mp13_rcc_clocks_driver = { .of_match_table = stm32mp13_match_data, }, .probe = stm32mp1_rcc_clocks_probe, - .remove_new = stm32mp1_rcc_clocks_remove, }; static int __init stm32mp13_clocks_init(void) diff --git a/drivers/clk/stm32/clk-stm32mp25.c b/drivers/clk/stm32/clk-stm32mp25.c new file mode 100644 index 0000000000..210b75b39e --- /dev/null +++ b/drivers/clk/stm32/clk-stm32mp25.c @@ -0,0 +1,1875 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) STMicroelectronics 2023 - All Rights Reserved + * Author: Gabriel Fernandez for STMicroelectronics. + */ + +#include +#include + +#include "clk-stm32-core.h" +#include "reset-stm32.h" +#include "stm32mp25_rcc.h" + +#include +#include + +enum { + HSE, + HSI, + MSI, + LSE, + LSI, + HSE_DIV2, + ICN_HS_MCU, + ICN_LS_MCU, + ICN_SDMMC, + ICN_DDR, + ICN_DISPLAY, + ICN_HSL, + ICN_NIC, + ICN_VID, + FLEXGEN_07, + FLEXGEN_08, + FLEXGEN_09, + FLEXGEN_10, + FLEXGEN_11, + FLEXGEN_12, + FLEXGEN_13, + FLEXGEN_14, + FLEXGEN_15, + FLEXGEN_16, + FLEXGEN_17, + FLEXGEN_18, + FLEXGEN_19, + FLEXGEN_20, + FLEXGEN_21, + FLEXGEN_22, + FLEXGEN_23, + FLEXGEN_24, + FLEXGEN_25, + FLEXGEN_26, + FLEXGEN_27, + FLEXGEN_28, + FLEXGEN_29, + FLEXGEN_30, + FLEXGEN_31, + FLEXGEN_32, + FLEXGEN_33, + FLEXGEN_34, + FLEXGEN_35, + FLEXGEN_36, + FLEXGEN_37, + FLEXGEN_38, + FLEXGEN_39, + FLEXGEN_40, + FLEXGEN_41, + FLEXGEN_42, + FLEXGEN_43, + FLEXGEN_44, + FLEXGEN_45, + FLEXGEN_46, + FLEXGEN_47, + FLEXGEN_48, + FLEXGEN_49, + FLEXGEN_50, + FLEXGEN_51, + FLEXGEN_52, + FLEXGEN_53, + FLEXGEN_54, + FLEXGEN_55, + FLEXGEN_56, + FLEXGEN_57, + FLEXGEN_58, + FLEXGEN_59, + FLEXGEN_60, + FLEXGEN_61, + FLEXGEN_62, + FLEXGEN_63, + ICN_APB1, + ICN_APB2, + ICN_APB3, + ICN_APB4, + ICN_APBDBG, + TIMG1, + TIMG2, + PLL3, + DSI_TXBYTE, +}; + +static const struct clk_parent_data adc12_src[] = { + { .index = FLEXGEN_46 }, + { .index = ICN_LS_MCU }, +}; + +static const struct clk_parent_data adc3_src[] = { + { .index = FLEXGEN_47 }, + { .index = ICN_LS_MCU }, + { .index = FLEXGEN_46 }, +}; + +static const struct clk_parent_data usb2phy1_src[] = { + { .index = FLEXGEN_57 }, + { .index = HSE_DIV2 }, +}; + +static const struct clk_parent_data usb2phy2_src[] = { + { .index = FLEXGEN_58 }, + { .index = HSE_DIV2 }, +}; + +static const struct clk_parent_data usb3pciphy_src[] = { + { .index = FLEXGEN_34 }, + { .index = HSE_DIV2 }, +}; + +static struct clk_stm32_gate ck_ker_ltdc; + +static const struct clk_parent_data dsiblane_src[] = { + { .index = DSI_TXBYTE }, + { .hw = &ck_ker_ltdc.hw }, +}; + +static const struct clk_parent_data dsiphy_src[] = { + { .index = FLEXGEN_28 }, + { .index = HSE }, +}; + +static const struct clk_parent_data lvdsphy_src[] = { + { .index = FLEXGEN_32 }, + { .index = HSE }, +}; + +static const struct clk_parent_data dts_src[] = { + { .index = HSI }, + { .index = HSE }, + { .index = MSI }, +}; + +static const struct clk_parent_data mco1_src[] = { + { .index = FLEXGEN_61 }, +}; + +static const struct clk_parent_data mco2_src[] = { + { .index = FLEXGEN_62 }, +}; + +enum enum_mux_cfg { + MUX_ADC12, + MUX_ADC3, + MUX_DSIBLANE, + MUX_DSIPHY, + MUX_DTS, + MUX_LVDSPHY, + MUX_MCO1, + MUX_MCO2, + MUX_USB2PHY1, + MUX_USB2PHY2, + MUX_USB3PCIEPHY, + MUX_NB +}; + +#define MUX_CFG(id, _offset, _shift, _witdh) \ + [id] = { \ + .offset = (_offset), \ + .shift = (_shift), \ + .width = (_witdh), \ + } + +static const struct stm32_mux_cfg stm32mp25_muxes[MUX_NB] = { + MUX_CFG(MUX_ADC12, RCC_ADC12CFGR, 12, 1), + MUX_CFG(MUX_ADC3, RCC_ADC3CFGR, 12, 2), + MUX_CFG(MUX_DSIBLANE, RCC_DSICFGR, 12, 1), + MUX_CFG(MUX_DSIPHY, RCC_DSICFGR, 15, 1), + MUX_CFG(MUX_DTS, RCC_DTSCFGR, 12, 2), + MUX_CFG(MUX_LVDSPHY, RCC_LVDSCFGR, 15, 1), + MUX_CFG(MUX_MCO1, RCC_MCO1CFGR, 0, 1), + MUX_CFG(MUX_MCO2, RCC_MCO2CFGR, 0, 1), + MUX_CFG(MUX_USB2PHY1, RCC_USB2PHY1CFGR, 15, 1), + MUX_CFG(MUX_USB2PHY2, RCC_USB2PHY2CFGR, 15, 1), + MUX_CFG(MUX_USB3PCIEPHY, RCC_USB3PCIEPHYCFGR, 15, 1), +}; + +enum enum_gate_cfg { + GATE_ADC12, + GATE_ADC3, + GATE_ADF1, + GATE_CCI, + GATE_CRC, + GATE_CRYP1, + GATE_CRYP2, + GATE_CSI, + GATE_DCMIPP, + GATE_DSI, + GATE_DTS, + GATE_ETH1, + GATE_ETH1MAC, + GATE_ETH1RX, + GATE_ETH1STP, + GATE_ETH1TX, + GATE_ETH2, + GATE_ETH2MAC, + GATE_ETH2RX, + GATE_ETH2STP, + GATE_ETH2TX, + GATE_ETHSW, + GATE_ETHSWACMCFG, + GATE_ETHSWACMMSG, + GATE_ETHSWMAC, + GATE_ETHSWREF, + GATE_FDCAN, + GATE_GPU, + GATE_HASH, + GATE_HDP, + GATE_I2C1, + GATE_I2C2, + GATE_I2C3, + GATE_I2C4, + GATE_I2C5, + GATE_I2C6, + GATE_I2C7, + GATE_I2C8, + GATE_I3C1, + GATE_I3C2, + GATE_I3C3, + GATE_I3C4, + GATE_IS2M, + GATE_IWDG1, + GATE_IWDG2, + GATE_IWDG3, + GATE_IWDG4, + GATE_IWDG5, + GATE_LPTIM1, + GATE_LPTIM2, + GATE_LPTIM3, + GATE_LPTIM4, + GATE_LPTIM5, + GATE_LPUART1, + GATE_LTDC, + GATE_LVDS, + GATE_MCO1, + GATE_MCO2, + GATE_MDF1, + GATE_OSPIIOM, + GATE_PCIE, + GATE_PKA, + GATE_RNG, + GATE_SAES, + GATE_SAI1, + GATE_SAI2, + GATE_SAI3, + GATE_SAI4, + GATE_SDMMC1, + GATE_SDMMC2, + GATE_SDMMC3, + GATE_SERC, + GATE_SPDIFRX, + GATE_SPI1, + GATE_SPI2, + GATE_SPI3, + GATE_SPI4, + GATE_SPI5, + GATE_SPI6, + GATE_SPI7, + GATE_SPI8, + GATE_TIM1, + GATE_TIM10, + GATE_TIM11, + GATE_TIM12, + GATE_TIM13, + GATE_TIM14, + GATE_TIM15, + GATE_TIM16, + GATE_TIM17, + GATE_TIM2, + GATE_TIM20, + GATE_TIM3, + GATE_TIM4, + GATE_TIM5, + GATE_TIM6, + GATE_TIM7, + GATE_TIM8, + GATE_UART4, + GATE_UART5, + GATE_UART7, + GATE_UART8, + GATE_UART9, + GATE_USART1, + GATE_USART2, + GATE_USART3, + GATE_USART6, + GATE_USBH, + GATE_USB2PHY1, + GATE_USB2PHY2, + GATE_USB3DR, + GATE_USB3PCIEPHY, + GATE_USBTC, + GATE_VDEC, + GATE_VENC, + GATE_VREF, + GATE_WWDG1, + GATE_WWDG2, + GATE_NB +}; + +#define GATE_CFG(id, _offset, _bit_idx, _offset_clr) \ + [id] = { \ + .offset = (_offset), \ + .bit_idx = (_bit_idx), \ + .set_clr = (_offset_clr), \ + } + +static const struct stm32_gate_cfg stm32mp25_gates[GATE_NB] = { + GATE_CFG(GATE_ADC12, RCC_ADC12CFGR, 1, 0), + GATE_CFG(GATE_ADC3, RCC_ADC3CFGR, 1, 0), + GATE_CFG(GATE_ADF1, RCC_ADF1CFGR, 1, 0), + GATE_CFG(GATE_CCI, RCC_CCICFGR, 1, 0), + GATE_CFG(GATE_CRC, RCC_CRCCFGR, 1, 0), + GATE_CFG(GATE_CRYP1, RCC_CRYP1CFGR, 1, 0), + GATE_CFG(GATE_CRYP2, RCC_CRYP2CFGR, 1, 0), + GATE_CFG(GATE_CSI, RCC_CSICFGR, 1, 0), + GATE_CFG(GATE_DCMIPP, RCC_DCMIPPCFGR, 1, 0), + GATE_CFG(GATE_DSI, RCC_DSICFGR, 1, 0), + GATE_CFG(GATE_DTS, RCC_DTSCFGR, 1, 0), + GATE_CFG(GATE_ETH1, RCC_ETH1CFGR, 5, 0), + GATE_CFG(GATE_ETH1MAC, RCC_ETH1CFGR, 1, 0), + GATE_CFG(GATE_ETH1RX, RCC_ETH1CFGR, 10, 0), + GATE_CFG(GATE_ETH1STP, RCC_ETH1CFGR, 4, 0), + GATE_CFG(GATE_ETH1TX, RCC_ETH1CFGR, 8, 0), + GATE_CFG(GATE_ETH2, RCC_ETH2CFGR, 5, 0), + GATE_CFG(GATE_ETH2MAC, RCC_ETH2CFGR, 1, 0), + GATE_CFG(GATE_ETH2RX, RCC_ETH2CFGR, 10, 0), + GATE_CFG(GATE_ETH2STP, RCC_ETH2CFGR, 4, 0), + GATE_CFG(GATE_ETH2TX, RCC_ETH2CFGR, 8, 0), + GATE_CFG(GATE_ETHSW, RCC_ETHSWCFGR, 5, 0), + GATE_CFG(GATE_ETHSWACMCFG, RCC_ETHSWACMCFGR, 1, 0), + GATE_CFG(GATE_ETHSWACMMSG, RCC_ETHSWACMMSGCFGR, 1, 0), + GATE_CFG(GATE_ETHSWMAC, RCC_ETHSWCFGR, 1, 0), + GATE_CFG(GATE_ETHSWREF, RCC_ETHSWCFGR, 21, 0), + GATE_CFG(GATE_FDCAN, RCC_FDCANCFGR, 1, 0), + GATE_CFG(GATE_GPU, RCC_GPUCFGR, 1, 0), + GATE_CFG(GATE_HASH, RCC_HASHCFGR, 1, 0), + GATE_CFG(GATE_HDP, RCC_HDPCFGR, 1, 0), + GATE_CFG(GATE_I2C1, RCC_I2C1CFGR, 1, 0), + GATE_CFG(GATE_I2C2, RCC_I2C2CFGR, 1, 0), + GATE_CFG(GATE_I2C3, RCC_I2C3CFGR, 1, 0), + GATE_CFG(GATE_I2C4, RCC_I2C4CFGR, 1, 0), + GATE_CFG(GATE_I2C5, RCC_I2C5CFGR, 1, 0), + GATE_CFG(GATE_I2C6, RCC_I2C6CFGR, 1, 0), + GATE_CFG(GATE_I2C7, RCC_I2C7CFGR, 1, 0), + GATE_CFG(GATE_I2C8, RCC_I2C8CFGR, 1, 0), + GATE_CFG(GATE_I3C1, RCC_I3C1CFGR, 1, 0), + GATE_CFG(GATE_I3C2, RCC_I3C2CFGR, 1, 0), + GATE_CFG(GATE_I3C3, RCC_I3C3CFGR, 1, 0), + GATE_CFG(GATE_I3C4, RCC_I3C4CFGR, 1, 0), + GATE_CFG(GATE_IS2M, RCC_IS2MCFGR, 1, 0), + GATE_CFG(GATE_IWDG1, RCC_IWDG1CFGR, 1, 0), + GATE_CFG(GATE_IWDG2, RCC_IWDG2CFGR, 1, 0), + GATE_CFG(GATE_IWDG3, RCC_IWDG3CFGR, 1, 0), + GATE_CFG(GATE_IWDG4, RCC_IWDG4CFGR, 1, 0), + GATE_CFG(GATE_IWDG5, RCC_IWDG5CFGR, 1, 0), + GATE_CFG(GATE_LPTIM1, RCC_LPTIM1CFGR, 1, 0), + GATE_CFG(GATE_LPTIM2, RCC_LPTIM2CFGR, 1, 0), + GATE_CFG(GATE_LPTIM3, RCC_LPTIM3CFGR, 1, 0), + GATE_CFG(GATE_LPTIM4, RCC_LPTIM4CFGR, 1, 0), + GATE_CFG(GATE_LPTIM5, RCC_LPTIM5CFGR, 1, 0), + GATE_CFG(GATE_LPUART1, RCC_LPUART1CFGR, 1, 0), + GATE_CFG(GATE_LTDC, RCC_LTDCCFGR, 1, 0), + GATE_CFG(GATE_LVDS, RCC_LVDSCFGR, 1, 0), + GATE_CFG(GATE_MCO1, RCC_MCO1CFGR, 8, 0), + GATE_CFG(GATE_MCO2, RCC_MCO2CFGR, 8, 0), + GATE_CFG(GATE_MDF1, RCC_MDF1CFGR, 1, 0), + GATE_CFG(GATE_OSPIIOM, RCC_OSPIIOMCFGR, 1, 0), + GATE_CFG(GATE_PCIE, RCC_PCIECFGR, 1, 0), + GATE_CFG(GATE_PKA, RCC_PKACFGR, 1, 0), + GATE_CFG(GATE_RNG, RCC_RNGCFGR, 1, 0), + GATE_CFG(GATE_SAES, RCC_SAESCFGR, 1, 0), + GATE_CFG(GATE_SAI1, RCC_SAI1CFGR, 1, 0), + GATE_CFG(GATE_SAI2, RCC_SAI2CFGR, 1, 0), + GATE_CFG(GATE_SAI3, RCC_SAI3CFGR, 1, 0), + GATE_CFG(GATE_SAI4, RCC_SAI4CFGR, 1, 0), + GATE_CFG(GATE_SDMMC1, RCC_SDMMC1CFGR, 1, 0), + GATE_CFG(GATE_SDMMC2, RCC_SDMMC2CFGR, 1, 0), + GATE_CFG(GATE_SDMMC3, RCC_SDMMC3CFGR, 1, 0), + GATE_CFG(GATE_SERC, RCC_SERCCFGR, 1, 0), + GATE_CFG(GATE_SPDIFRX, RCC_SPDIFRXCFGR, 1, 0), + GATE_CFG(GATE_SPI1, RCC_SPI1CFGR, 1, 0), + GATE_CFG(GATE_SPI2, RCC_SPI2CFGR, 1, 0), + GATE_CFG(GATE_SPI3, RCC_SPI3CFGR, 1, 0), + GATE_CFG(GATE_SPI4, RCC_SPI4CFGR, 1, 0), + GATE_CFG(GATE_SPI5, RCC_SPI5CFGR, 1, 0), + GATE_CFG(GATE_SPI6, RCC_SPI6CFGR, 1, 0), + GATE_CFG(GATE_SPI7, RCC_SPI7CFGR, 1, 0), + GATE_CFG(GATE_SPI8, RCC_SPI8CFGR, 1, 0), + GATE_CFG(GATE_TIM1, RCC_TIM1CFGR, 1, 0), + GATE_CFG(GATE_TIM10, RCC_TIM10CFGR, 1, 0), + GATE_CFG(GATE_TIM11, RCC_TIM11CFGR, 1, 0), + GATE_CFG(GATE_TIM12, RCC_TIM12CFGR, 1, 0), + GATE_CFG(GATE_TIM13, RCC_TIM13CFGR, 1, 0), + GATE_CFG(GATE_TIM14, RCC_TIM14CFGR, 1, 0), + GATE_CFG(GATE_TIM15, RCC_TIM15CFGR, 1, 0), + GATE_CFG(GATE_TIM16, RCC_TIM16CFGR, 1, 0), + GATE_CFG(GATE_TIM17, RCC_TIM17CFGR, 1, 0), + GATE_CFG(GATE_TIM2, RCC_TIM2CFGR, 1, 0), + GATE_CFG(GATE_TIM20, RCC_TIM20CFGR, 1, 0), + GATE_CFG(GATE_TIM3, RCC_TIM3CFGR, 1, 0), + GATE_CFG(GATE_TIM4, RCC_TIM4CFGR, 1, 0), + GATE_CFG(GATE_TIM5, RCC_TIM5CFGR, 1, 0), + GATE_CFG(GATE_TIM6, RCC_TIM6CFGR, 1, 0), + GATE_CFG(GATE_TIM7, RCC_TIM7CFGR, 1, 0), + GATE_CFG(GATE_TIM8, RCC_TIM8CFGR, 1, 0), + GATE_CFG(GATE_UART4, RCC_UART4CFGR, 1, 0), + GATE_CFG(GATE_UART5, RCC_UART5CFGR, 1, 0), + GATE_CFG(GATE_UART7, RCC_UART7CFGR, 1, 0), + GATE_CFG(GATE_UART8, RCC_UART8CFGR, 1, 0), + GATE_CFG(GATE_UART9, RCC_UART9CFGR, 1, 0), + GATE_CFG(GATE_USART1, RCC_USART1CFGR, 1, 0), + GATE_CFG(GATE_USART2, RCC_USART2CFGR, 1, 0), + GATE_CFG(GATE_USART3, RCC_USART3CFGR, 1, 0), + GATE_CFG(GATE_USART6, RCC_USART6CFGR, 1, 0), + GATE_CFG(GATE_USBH, RCC_USBHCFGR, 1, 0), + GATE_CFG(GATE_USB2PHY1, RCC_USB2PHY1CFGR, 1, 0), + GATE_CFG(GATE_USB2PHY2, RCC_USB2PHY2CFGR, 1, 0), + GATE_CFG(GATE_USB3DR, RCC_USB3DRCFGR, 1, 0), + GATE_CFG(GATE_USB3PCIEPHY, RCC_USB3PCIEPHYCFGR, 1, 0), + GATE_CFG(GATE_USBTC, RCC_USBTCCFGR, 1, 0), + GATE_CFG(GATE_VDEC, RCC_VDECCFGR, 1, 0), + GATE_CFG(GATE_VENC, RCC_VENCCFGR, 1, 0), + GATE_CFG(GATE_VREF, RCC_VREFCFGR, 1, 0), + GATE_CFG(GATE_WWDG1, RCC_WWDG1CFGR, 1, 0), + GATE_CFG(GATE_WWDG2, RCC_WWDG2CFGR, 1, 0), +}; + +#define CLK_HW_INIT_INDEX(_name, _parent, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_data = (const struct clk_parent_data[]) { \ + { .index = _parent }, \ + }, \ + .num_parents = 1, \ + .ops = _ops, \ + }) + +/* ADC */ +static struct clk_stm32_gate ck_icn_p_adc12 = { + .gate_id = GATE_ADC12, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_adc12", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_composite ck_ker_adc12 = { + .gate_id = GATE_ADC12, + .mux_id = MUX_ADC12, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_adc12", adc12_src, &clk_stm32_composite_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_adc3 = { + .gate_id = GATE_ADC3, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_adc3", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_composite ck_ker_adc3 = { + .gate_id = GATE_ADC3, + .mux_id = MUX_ADC3, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_adc3", adc3_src, &clk_stm32_composite_ops, 0), +}; + +/* ADF */ +static struct clk_stm32_gate ck_icn_p_adf1 = { + .gate_id = GATE_ADF1, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_adf1", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_adf1 = { + .gate_id = GATE_ADF1, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_adf1", FLEXGEN_42, &clk_stm32_gate_ops, 0), +}; + +/* DCMI */ +static struct clk_stm32_gate ck_icn_p_cci = { + .gate_id = GATE_CCI, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_cci", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +/* CSI-HOST */ +static struct clk_stm32_gate ck_icn_p_csi = { + .gate_id = GATE_CSI, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_csi", ICN_APB4, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_csi = { + .gate_id = GATE_CSI, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_csi", FLEXGEN_29, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_csitxesc = { + .gate_id = GATE_CSI, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_csitxesc", FLEXGEN_30, &clk_stm32_gate_ops, 0), +}; + +/* CSI-PHY */ +static struct clk_stm32_gate ck_ker_csiphy = { + .gate_id = GATE_CSI, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_csiphy", FLEXGEN_31, &clk_stm32_gate_ops, 0), +}; + +/* DCMIPP */ +static struct clk_stm32_gate ck_icn_p_dcmipp = { + .gate_id = GATE_DCMIPP, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_dcmipp", ICN_APB4, &clk_stm32_gate_ops, 0), +}; + +/* CRC */ +static struct clk_stm32_gate ck_icn_p_crc = { + .gate_id = GATE_CRC, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_crc", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +/* CRYP */ +static struct clk_stm32_gate ck_icn_p_cryp1 = { + .gate_id = GATE_CRYP1, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_cryp1", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_cryp2 = { + .gate_id = GATE_CRYP2, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_cryp2", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +/* DBG & TRACE*/ +/* Trace and debug clocks are managed by SCMI */ + +/* LTDC */ +static struct clk_stm32_gate ck_icn_p_ltdc = { + .gate_id = GATE_LTDC, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ltdc", ICN_APB4, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_ltdc = { + .gate_id = GATE_LTDC, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_ltdc", FLEXGEN_27, &clk_stm32_gate_ops, + CLK_SET_RATE_PARENT), +}; + +/* DSI */ +static struct clk_stm32_gate ck_icn_p_dsi = { + .gate_id = GATE_DSI, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_dsi", ICN_APB4, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_composite clk_lanebyte = { + .gate_id = GATE_DSI, + .mux_id = MUX_DSIBLANE, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS_DATA("clk_lanebyte", dsiblane_src, + &clk_stm32_composite_ops, 0), +}; + +/* LVDS */ +static struct clk_stm32_gate ck_icn_p_lvds = { + .gate_id = GATE_LVDS, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lvds", ICN_APB4, &clk_stm32_gate_ops, 0), +}; + +/* DSI PHY */ +static struct clk_stm32_composite clk_phy_dsi = { + .gate_id = GATE_DSI, + .mux_id = MUX_DSIPHY, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS_DATA("clk_phy_dsi", dsiphy_src, + &clk_stm32_composite_ops, 0), +}; + +/* LVDS PHY */ +static struct clk_stm32_composite ck_ker_lvdsphy = { + .gate_id = GATE_LVDS, + .mux_id = MUX_LVDSPHY, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_lvdsphy", lvdsphy_src, + &clk_stm32_composite_ops, 0), +}; + +/* DTS */ +static struct clk_stm32_composite ck_ker_dts = { + .gate_id = GATE_DTS, + .mux_id = MUX_DTS, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_dts", dts_src, + &clk_stm32_composite_ops, 0), +}; + +/* ETHERNET */ +static struct clk_stm32_gate ck_icn_p_eth1 = { + .gate_id = GATE_ETH1, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_eth1", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_eth1stp = { + .gate_id = GATE_ETH1STP, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1stp", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_eth1 = { + .gate_id = GATE_ETH1, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1", FLEXGEN_54, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_eth1ptp = { + .gate_id = GATE_ETH1, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1ptp", FLEXGEN_56, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_eth1mac = { + .gate_id = GATE_ETH1MAC, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1mac", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_eth1tx = { + .gate_id = GATE_ETH1TX, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1tx", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_eth1rx = { + .gate_id = GATE_ETH1RX, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1rx", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_eth2 = { + .gate_id = GATE_ETH2, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_eth2", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_eth2stp = { + .gate_id = GATE_ETH2STP, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2stp", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_eth2 = { + .gate_id = GATE_ETH2, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2", FLEXGEN_55, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_eth2ptp = { + .gate_id = GATE_ETH2, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2ptp", FLEXGEN_56, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_eth2mac = { + .gate_id = GATE_ETH2MAC, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2mac", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_eth2tx = { + .gate_id = GATE_ETH2TX, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2tx", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_eth2rx = { + .gate_id = GATE_ETH2RX, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2rx", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_ethsw = { + .gate_id = GATE_ETHSWMAC, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ethsw", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_ethsw = { + .gate_id = GATE_ETHSW, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_ethsw", FLEXGEN_54, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_ethswref = { + .gate_id = GATE_ETHSWREF, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_ethswref", FLEXGEN_60, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_ethsw_acm_cfg = { + .gate_id = GATE_ETHSWACMCFG, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ethsw_acm_cfg", ICN_LS_MCU, + &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_ethsw_acm_msg = { + .gate_id = GATE_ETHSWACMMSG, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ethsw_acm_msg", ICN_LS_MCU, + &clk_stm32_gate_ops, 0), +}; + +/* FDCAN */ +static struct clk_stm32_gate ck_icn_p_fdcan = { + .gate_id = GATE_FDCAN, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_fdcan", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_fdcan = { + .gate_id = GATE_FDCAN, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_fdcan", FLEXGEN_26, &clk_stm32_gate_ops, 0), +}; + +/* GPU */ +static struct clk_stm32_gate ck_icn_m_gpu = { + .gate_id = GATE_GPU, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_gpu", FLEXGEN_59, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_gpu = { + .gate_id = GATE_GPU, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_gpu", PLL3, &clk_stm32_gate_ops, 0), +}; + +/* HASH */ +static struct clk_stm32_gate ck_icn_p_hash = { + .gate_id = GATE_HASH, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hash", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +/* HDP */ +static struct clk_stm32_gate ck_icn_p_hdp = { + .gate_id = GATE_HDP, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hdp", ICN_APB3, &clk_stm32_gate_ops, 0), +}; + +/* I2C */ +static struct clk_stm32_gate ck_icn_p_i2c8 = { + .gate_id = GATE_I2C8, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c8", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_i2c1 = { + .gate_id = GATE_I2C1, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c1", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_i2c2 = { + .gate_id = GATE_I2C2, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c2", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_i2c3 = { + .gate_id = GATE_I2C3, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c3", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_i2c4 = { + .gate_id = GATE_I2C4, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c4", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_i2c5 = { + .gate_id = GATE_I2C5, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c5", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_i2c6 = { + .gate_id = GATE_I2C6, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c6", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_i2c7 = { + .gate_id = GATE_I2C7, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c7", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_i2c1 = { + .gate_id = GATE_I2C1, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c1", FLEXGEN_12, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_i2c2 = { + .gate_id = GATE_I2C2, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c2", FLEXGEN_12, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_i2c3 = { + .gate_id = GATE_I2C3, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c3", FLEXGEN_13, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_i2c5 = { + .gate_id = GATE_I2C5, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c5", FLEXGEN_13, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_i2c4 = { + .gate_id = GATE_I2C4, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c4", FLEXGEN_14, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_i2c6 = { + .gate_id = GATE_I2C6, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c6", FLEXGEN_14, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_i2c7 = { + .gate_id = GATE_I2C7, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c7", FLEXGEN_15, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_i2c8 = { + .gate_id = GATE_I2C8, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c8", FLEXGEN_38, &clk_stm32_gate_ops, 0), +}; + +/* I3C */ +static struct clk_stm32_gate ck_icn_p_i3c1 = { + .gate_id = GATE_I3C1, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c1", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_i3c2 = { + .gate_id = GATE_I3C2, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c2", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_i3c3 = { + .gate_id = GATE_I3C3, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c3", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_i3c4 = { + .gate_id = GATE_I3C4, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c4", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_i3c1 = { + .gate_id = GATE_I3C1, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c1", FLEXGEN_12, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_i3c2 = { + .gate_id = GATE_I3C2, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c2", FLEXGEN_12, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_i3c3 = { + .gate_id = GATE_I3C3, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c3", FLEXGEN_13, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_i3c4 = { + .gate_id = GATE_I3C4, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c4", FLEXGEN_36, &clk_stm32_gate_ops, 0), +}; + +/* I2S */ +static struct clk_stm32_gate ck_icn_p_is2m = { + .gate_id = GATE_IS2M, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_is2m", ICN_APB3, &clk_stm32_gate_ops, 0), +}; + +/* IWDG */ +static struct clk_stm32_gate ck_icn_p_iwdg2 = { + .gate_id = GATE_IWDG2, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg2", ICN_APB3, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_iwdg3 = { + .gate_id = GATE_IWDG3, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg3", ICN_APB3, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_iwdg4 = { + .gate_id = GATE_IWDG4, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg4", ICN_APB3, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_iwdg5 = { + .gate_id = GATE_IWDG5, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg5", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +/* LPTIM */ +static struct clk_stm32_gate ck_icn_p_lptim1 = { + .gate_id = GATE_LPTIM1, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim1", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_lptim2 = { + .gate_id = GATE_LPTIM2, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim2", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_lptim3 = { + .gate_id = GATE_LPTIM3, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim3", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_lptim4 = { + .gate_id = GATE_LPTIM4, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim4", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_lptim5 = { + .gate_id = GATE_LPTIM5, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim5", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_lptim1 = { + .gate_id = GATE_LPTIM1, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim1", FLEXGEN_07, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_lptim2 = { + .gate_id = GATE_LPTIM2, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim2", FLEXGEN_07, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_lptim3 = { + .gate_id = GATE_LPTIM3, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim3", FLEXGEN_40, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_lptim4 = { + .gate_id = GATE_LPTIM4, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim4", FLEXGEN_41, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_lptim5 = { + .gate_id = GATE_LPTIM5, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim5", FLEXGEN_41, &clk_stm32_gate_ops, 0), +}; + +/* LPUART */ +static struct clk_stm32_gate ck_icn_p_lpuart1 = { + .gate_id = GATE_LPUART1, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lpuart1", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_lpuart1 = { + .gate_id = GATE_LPUART1, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_lpuart1", FLEXGEN_39, &clk_stm32_gate_ops, 0), +}; + +/* MCO1 & MCO2 */ +static struct clk_stm32_composite ck_mco1 = { + .gate_id = GATE_MCO1, + .mux_id = MUX_MCO1, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_mco1", mco1_src, &clk_stm32_composite_ops, 0), +}; + +static struct clk_stm32_composite ck_mco2 = { + .gate_id = GATE_MCO2, + .mux_id = MUX_MCO2, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_mco2", mco2_src, &clk_stm32_composite_ops, 0), +}; + +/* MDF */ +static struct clk_stm32_gate ck_icn_p_mdf1 = { + .gate_id = GATE_MDF1, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_mdf1", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_mdf1 = { + .gate_id = GATE_MDF1, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_mdf1", FLEXGEN_23, &clk_stm32_gate_ops, 0), +}; + +/* OSPI */ +static struct clk_stm32_gate ck_icn_p_ospiiom = { + .gate_id = GATE_OSPIIOM, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ospiiom", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +/* PCIE */ +static struct clk_stm32_gate ck_icn_p_pcie = { + .gate_id = GATE_PCIE, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_pcie", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +/* SAI */ +static struct clk_stm32_gate ck_icn_p_sai1 = { + .gate_id = GATE_SAI1, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai1", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_sai2 = { + .gate_id = GATE_SAI2, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai2", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_sai3 = { + .gate_id = GATE_SAI3, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai3", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_sai4 = { + .gate_id = GATE_SAI4, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai4", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_sai1 = { + .gate_id = GATE_SAI1, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai1", FLEXGEN_23, &clk_stm32_gate_ops, + CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate ck_ker_sai2 = { + .gate_id = GATE_SAI2, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai2", FLEXGEN_24, &clk_stm32_gate_ops, + CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate ck_ker_sai3 = { + .gate_id = GATE_SAI3, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai3", FLEXGEN_25, &clk_stm32_gate_ops, + CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate ck_ker_sai4 = { + .gate_id = GATE_SAI4, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai4", FLEXGEN_25, &clk_stm32_gate_ops, + CLK_SET_RATE_PARENT), +}; + +/* SDMMC */ +static struct clk_stm32_gate ck_icn_m_sdmmc1 = { + .gate_id = GATE_SDMMC1, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc1", ICN_SDMMC, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_m_sdmmc2 = { + .gate_id = GATE_SDMMC2, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc2", ICN_SDMMC, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_m_sdmmc3 = { + .gate_id = GATE_SDMMC3, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc3", ICN_SDMMC, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_sdmmc1 = { + .gate_id = GATE_SDMMC1, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc1", FLEXGEN_51, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_sdmmc2 = { + .gate_id = GATE_SDMMC2, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc2", FLEXGEN_52, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_sdmmc3 = { + .gate_id = GATE_SDMMC3, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc3", FLEXGEN_53, &clk_stm32_gate_ops, 0), +}; + +/* SPDIF */ +static struct clk_stm32_gate ck_icn_p_spdifrx = { + .gate_id = GATE_SPDIFRX, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spdifrx", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_spdifrx = { + .gate_id = GATE_SPDIFRX, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_spdifrx", FLEXGEN_11, &clk_stm32_gate_ops, 0), +}; + +/* SPI */ +static struct clk_stm32_gate ck_icn_p_spi1 = { + .gate_id = GATE_SPI1, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi1", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_spi2 = { + .gate_id = GATE_SPI2, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi2", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_spi3 = { + .gate_id = GATE_SPI3, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi3", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_spi4 = { + .gate_id = GATE_SPI4, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi4", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_spi5 = { + .gate_id = GATE_SPI5, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi5", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_spi6 = { + .gate_id = GATE_SPI6, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi6", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_spi7 = { + .gate_id = GATE_SPI7, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi7", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_spi8 = { + .gate_id = GATE_SPI8, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi8", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_spi1 = { + .gate_id = GATE_SPI1, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi1", FLEXGEN_16, &clk_stm32_gate_ops, + CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate ck_ker_spi2 = { + .gate_id = GATE_SPI2, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi2", FLEXGEN_10, &clk_stm32_gate_ops, + CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate ck_ker_spi3 = { + .gate_id = GATE_SPI3, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi3", FLEXGEN_10, &clk_stm32_gate_ops, + CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate ck_ker_spi4 = { + .gate_id = GATE_SPI4, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi4", FLEXGEN_17, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_spi5 = { + .gate_id = GATE_SPI5, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi5", FLEXGEN_17, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_spi6 = { + .gate_id = GATE_SPI6, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi6", FLEXGEN_18, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_spi7 = { + .gate_id = GATE_SPI7, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi7", FLEXGEN_18, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_spi8 = { + .gate_id = GATE_SPI8, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi8", FLEXGEN_37, &clk_stm32_gate_ops, 0), +}; + +/* Timers */ +static struct clk_stm32_gate ck_icn_p_tim2 = { + .gate_id = GATE_TIM2, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim2", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim3 = { + .gate_id = GATE_TIM3, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim3", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim4 = { + .gate_id = GATE_TIM4, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim4", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim5 = { + .gate_id = GATE_TIM5, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim5", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim6 = { + .gate_id = GATE_TIM6, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim6", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim7 = { + .gate_id = GATE_TIM7, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim7", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim10 = { + .gate_id = GATE_TIM10, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim10", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim11 = { + .gate_id = GATE_TIM11, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim11", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim12 = { + .gate_id = GATE_TIM12, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim12", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim13 = { + .gate_id = GATE_TIM13, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim13", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim14 = { + .gate_id = GATE_TIM14, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim14", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim1 = { + .gate_id = GATE_TIM1, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim1", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim8 = { + .gate_id = GATE_TIM8, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim8", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim15 = { + .gate_id = GATE_TIM15, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim15", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim16 = { + .gate_id = GATE_TIM16, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim16", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim17 = { + .gate_id = GATE_TIM17, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim17", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_tim20 = { + .gate_id = GATE_TIM20, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim20", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim2 = { + .gate_id = GATE_TIM2, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim2", TIMG1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim3 = { + .gate_id = GATE_TIM3, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim3", TIMG1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim4 = { + .gate_id = GATE_TIM4, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim4", TIMG1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim5 = { + .gate_id = GATE_TIM5, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim5", TIMG1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim6 = { + .gate_id = GATE_TIM6, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim6", TIMG1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim7 = { + .gate_id = GATE_TIM7, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim7", TIMG1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim10 = { + .gate_id = GATE_TIM10, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim10", TIMG1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim11 = { + .gate_id = GATE_TIM11, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim11", TIMG1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim12 = { + .gate_id = GATE_TIM12, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim12", TIMG1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim13 = { + .gate_id = GATE_TIM13, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim13", TIMG1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim14 = { + .gate_id = GATE_TIM14, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim14", TIMG1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim1 = { + .gate_id = GATE_TIM1, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim1", TIMG2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim8 = { + .gate_id = GATE_TIM8, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim8", TIMG2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim15 = { + .gate_id = GATE_TIM15, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim15", TIMG2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim16 = { + .gate_id = GATE_TIM16, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim16", TIMG2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim17 = { + .gate_id = GATE_TIM17, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim17", TIMG2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_tim20 = { + .gate_id = GATE_TIM20, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim20", TIMG2, &clk_stm32_gate_ops, 0), +}; + +/* UART/USART */ +static struct clk_stm32_gate ck_icn_p_usart2 = { + .gate_id = GATE_USART2, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart2", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_usart3 = { + .gate_id = GATE_USART3, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart3", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_uart4 = { + .gate_id = GATE_UART4, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart4", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_uart5 = { + .gate_id = GATE_UART5, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart5", ICN_APB1, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_usart1 = { + .gate_id = GATE_USART1, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart1", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_usart6 = { + .gate_id = GATE_USART6, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart6", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_uart7 = { + .gate_id = GATE_UART7, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart7", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_uart8 = { + .gate_id = GATE_UART8, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart8", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_uart9 = { + .gate_id = GATE_UART9, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart9", ICN_APB2, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_usart2 = { + .gate_id = GATE_USART2, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart2", FLEXGEN_08, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_uart4 = { + .gate_id = GATE_UART4, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart4", FLEXGEN_08, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_usart3 = { + .gate_id = GATE_USART3, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart3", FLEXGEN_09, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_uart5 = { + .gate_id = GATE_UART5, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart5", FLEXGEN_09, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_usart1 = { + .gate_id = GATE_USART1, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart1", FLEXGEN_19, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_usart6 = { + .gate_id = GATE_USART6, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart6", FLEXGEN_20, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_uart7 = { + .gate_id = GATE_UART7, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart7", FLEXGEN_21, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_uart8 = { + .gate_id = GATE_UART8, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart8", FLEXGEN_21, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_uart9 = { + .gate_id = GATE_UART9, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart9", FLEXGEN_22, &clk_stm32_gate_ops, 0), +}; + +/* USB2PHY1 */ +static struct clk_stm32_composite ck_ker_usb2phy1 = { + .gate_id = GATE_USB2PHY1, + .mux_id = MUX_USB2PHY1, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_usb2phy1", usb2phy1_src, + &clk_stm32_composite_ops, 0), +}; + +/* USB2H */ +static struct clk_stm32_gate ck_icn_m_usb2ehci = { + .gate_id = GATE_USBH, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_usb2ehci", ICN_HSL, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_m_usb2ohci = { + .gate_id = GATE_USBH, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_usb2ohci", ICN_HSL, &clk_stm32_gate_ops, 0), +}; + +/* USB2PHY2 */ +static struct clk_stm32_composite ck_ker_usb2phy2_en = { + .gate_id = GATE_USB2PHY2, + .mux_id = MUX_USB2PHY2, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_usb2phy2_en", usb2phy2_src, + &clk_stm32_composite_ops, 0), +}; + +/* USB3 PCIe COMBOPHY */ +static struct clk_stm32_gate ck_icn_p_usb3pciephy = { + .gate_id = GATE_USB3PCIEPHY, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usb3pciephy", ICN_APB4, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_composite ck_ker_usb3pciephy = { + .gate_id = GATE_USB3PCIEPHY, + .mux_id = MUX_USB3PCIEPHY, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_usb3pciephy", usb3pciphy_src, + &clk_stm32_composite_ops, 0), +}; + +/* USB3 DRD */ +static struct clk_stm32_gate ck_icn_m_usb3dr = { + .gate_id = GATE_USB3DR, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_usb3dr", ICN_HSL, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_usb2phy2 = { + .gate_id = GATE_USB3DR, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_usb2phy2", FLEXGEN_58, &clk_stm32_gate_ops, 0), +}; + +/* USBTC */ +static struct clk_stm32_gate ck_icn_p_usbtc = { + .gate_id = GATE_USBTC, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usbtc", ICN_APB4, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_ker_usbtc = { + .gate_id = GATE_USBTC, + .hw.init = CLK_HW_INIT_INDEX("ck_ker_usbtc", FLEXGEN_35, &clk_stm32_gate_ops, 0), +}; + +/* VDEC / VENC */ +static struct clk_stm32_gate ck_icn_p_vdec = { + .gate_id = GATE_VDEC, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_vdec", ICN_APB4, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_venc = { + .gate_id = GATE_VENC, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_venc", ICN_APB4, &clk_stm32_gate_ops, 0), +}; + +/* VREF */ +static struct clk_stm32_gate ck_icn_p_vref = { + .gate_id = GATE_VREF, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_vref", ICN_APB3, &clk_stm32_gate_ops, 0), +}; + +/* WWDG */ +static struct clk_stm32_gate ck_icn_p_wwdg1 = { + .gate_id = GATE_WWDG1, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_wwdg1", ICN_APB3, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ck_icn_p_wwdg2 = { + .gate_id = GATE_WWDG2, + .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_wwdg2", ICN_LS_MCU, &clk_stm32_gate_ops, 0), +}; + +#define SECF_NONE -1 + +static const struct clock_config stm32mp25_clock_cfg[] = { + STM32_GATE_CFG(CK_BUS_ETH1, ck_icn_p_eth1, SECF_NONE), + STM32_GATE_CFG(CK_BUS_ETH2, ck_icn_p_eth2, SECF_NONE), + STM32_GATE_CFG(CK_BUS_PCIE, ck_icn_p_pcie, SECF_NONE), + STM32_GATE_CFG(CK_BUS_ETHSW, ck_icn_p_ethsw, SECF_NONE), + STM32_GATE_CFG(CK_BUS_ADC12, ck_icn_p_adc12, SECF_NONE), + STM32_GATE_CFG(CK_BUS_ADC3, ck_icn_p_adc3, SECF_NONE), + STM32_GATE_CFG(CK_BUS_CCI, ck_icn_p_cci, SECF_NONE), + STM32_GATE_CFG(CK_BUS_CRC, ck_icn_p_crc, SECF_NONE), + STM32_GATE_CFG(CK_BUS_MDF1, ck_icn_p_mdf1, SECF_NONE), + STM32_GATE_CFG(CK_BUS_OSPIIOM, ck_icn_p_ospiiom, SECF_NONE), + STM32_GATE_CFG(CK_BUS_HASH, ck_icn_p_hash, SECF_NONE), + STM32_GATE_CFG(CK_BUS_CRYP1, ck_icn_p_cryp1, SECF_NONE), + STM32_GATE_CFG(CK_BUS_CRYP2, ck_icn_p_cryp2, SECF_NONE), + STM32_GATE_CFG(CK_BUS_ADF1, ck_icn_p_adf1, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SPI8, ck_icn_p_spi8, SECF_NONE), + STM32_GATE_CFG(CK_BUS_LPUART1, ck_icn_p_lpuart1, SECF_NONE), + STM32_GATE_CFG(CK_BUS_I2C8, ck_icn_p_i2c8, SECF_NONE), + STM32_GATE_CFG(CK_BUS_LPTIM3, ck_icn_p_lptim3, SECF_NONE), + STM32_GATE_CFG(CK_BUS_LPTIM4, ck_icn_p_lptim4, SECF_NONE), + STM32_GATE_CFG(CK_BUS_LPTIM5, ck_icn_p_lptim5, SECF_NONE), + STM32_GATE_CFG(CK_BUS_IWDG5, ck_icn_p_iwdg5, SECF_NONE), + STM32_GATE_CFG(CK_BUS_WWDG2, ck_icn_p_wwdg2, SECF_NONE), + STM32_GATE_CFG(CK_BUS_I3C4, ck_icn_p_i3c4, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SDMMC1, ck_icn_m_sdmmc1, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SDMMC2, ck_icn_m_sdmmc2, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SDMMC3, ck_icn_m_sdmmc3, SECF_NONE), + STM32_GATE_CFG(CK_BUS_USB2OHCI, ck_icn_m_usb2ohci, SECF_NONE), + STM32_GATE_CFG(CK_BUS_USB2EHCI, ck_icn_m_usb2ehci, SECF_NONE), + STM32_GATE_CFG(CK_BUS_USB3DR, ck_icn_m_usb3dr, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM2, ck_icn_p_tim2, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM3, ck_icn_p_tim3, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM4, ck_icn_p_tim4, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM5, ck_icn_p_tim5, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM6, ck_icn_p_tim6, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM7, ck_icn_p_tim7, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM10, ck_icn_p_tim10, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM11, ck_icn_p_tim11, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM12, ck_icn_p_tim12, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM13, ck_icn_p_tim13, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM14, ck_icn_p_tim14, SECF_NONE), + STM32_GATE_CFG(CK_BUS_LPTIM1, ck_icn_p_lptim1, SECF_NONE), + STM32_GATE_CFG(CK_BUS_LPTIM2, ck_icn_p_lptim2, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SPI2, ck_icn_p_spi2, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SPI3, ck_icn_p_spi3, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SPDIFRX, ck_icn_p_spdifrx, SECF_NONE), + STM32_GATE_CFG(CK_BUS_USART2, ck_icn_p_usart2, SECF_NONE), + STM32_GATE_CFG(CK_BUS_USART3, ck_icn_p_usart3, SECF_NONE), + STM32_GATE_CFG(CK_BUS_UART4, ck_icn_p_uart4, SECF_NONE), + STM32_GATE_CFG(CK_BUS_UART5, ck_icn_p_uart5, SECF_NONE), + STM32_GATE_CFG(CK_BUS_I2C1, ck_icn_p_i2c1, SECF_NONE), + STM32_GATE_CFG(CK_BUS_I2C2, ck_icn_p_i2c2, SECF_NONE), + STM32_GATE_CFG(CK_BUS_I2C3, ck_icn_p_i2c3, SECF_NONE), + STM32_GATE_CFG(CK_BUS_I2C4, ck_icn_p_i2c4, SECF_NONE), + STM32_GATE_CFG(CK_BUS_I2C5, ck_icn_p_i2c5, SECF_NONE), + STM32_GATE_CFG(CK_BUS_I2C6, ck_icn_p_i2c6, SECF_NONE), + STM32_GATE_CFG(CK_BUS_I2C7, ck_icn_p_i2c7, SECF_NONE), + STM32_GATE_CFG(CK_BUS_I3C1, ck_icn_p_i3c1, SECF_NONE), + STM32_GATE_CFG(CK_BUS_I3C2, ck_icn_p_i3c2, SECF_NONE), + STM32_GATE_CFG(CK_BUS_I3C3, ck_icn_p_i3c3, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM1, ck_icn_p_tim1, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM8, ck_icn_p_tim8, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM15, ck_icn_p_tim15, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM16, ck_icn_p_tim16, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM17, ck_icn_p_tim17, SECF_NONE), + STM32_GATE_CFG(CK_BUS_TIM20, ck_icn_p_tim20, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SAI1, ck_icn_p_sai1, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SAI2, ck_icn_p_sai2, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SAI3, ck_icn_p_sai3, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SAI4, ck_icn_p_sai4, SECF_NONE), + STM32_GATE_CFG(CK_BUS_USART1, ck_icn_p_usart1, SECF_NONE), + STM32_GATE_CFG(CK_BUS_USART6, ck_icn_p_usart6, SECF_NONE), + STM32_GATE_CFG(CK_BUS_UART7, ck_icn_p_uart7, SECF_NONE), + STM32_GATE_CFG(CK_BUS_UART8, ck_icn_p_uart8, SECF_NONE), + STM32_GATE_CFG(CK_BUS_UART9, ck_icn_p_uart9, SECF_NONE), + STM32_GATE_CFG(CK_BUS_FDCAN, ck_icn_p_fdcan, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SPI1, ck_icn_p_spi1, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SPI4, ck_icn_p_spi4, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SPI5, ck_icn_p_spi5, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SPI6, ck_icn_p_spi6, SECF_NONE), + STM32_GATE_CFG(CK_BUS_SPI7, ck_icn_p_spi7, SECF_NONE), + STM32_GATE_CFG(CK_BUS_IWDG2, ck_icn_p_iwdg2, SECF_NONE), + STM32_GATE_CFG(CK_BUS_IWDG3, ck_icn_p_iwdg3, SECF_NONE), + STM32_GATE_CFG(CK_BUS_IWDG4, ck_icn_p_iwdg4, SECF_NONE), + STM32_GATE_CFG(CK_BUS_WWDG1, ck_icn_p_wwdg1, SECF_NONE), + STM32_GATE_CFG(CK_BUS_VREF, ck_icn_p_vref, SECF_NONE), + STM32_GATE_CFG(CK_BUS_HDP, ck_icn_p_hdp, SECF_NONE), + STM32_GATE_CFG(CK_BUS_IS2M, ck_icn_p_is2m, SECF_NONE), + STM32_GATE_CFG(CK_BUS_DSI, ck_icn_p_dsi, SECF_NONE), + STM32_GATE_CFG(CK_BUS_LTDC, ck_icn_p_ltdc, SECF_NONE), + STM32_GATE_CFG(CK_BUS_CSI, ck_icn_p_csi, SECF_NONE), + STM32_GATE_CFG(CK_BUS_DCMIPP, ck_icn_p_dcmipp, SECF_NONE), + STM32_GATE_CFG(CK_BUS_LVDS, ck_icn_p_lvds, SECF_NONE), + STM32_GATE_CFG(CK_BUS_USBTC, ck_icn_p_usbtc, SECF_NONE), + STM32_GATE_CFG(CK_BUS_USB3PCIEPHY, ck_icn_p_usb3pciephy, SECF_NONE), + STM32_GATE_CFG(CK_BUS_VDEC, ck_icn_p_vdec, SECF_NONE), + STM32_GATE_CFG(CK_BUS_VENC, ck_icn_p_venc, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM2, ck_ker_tim2, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM3, ck_ker_tim3, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM4, ck_ker_tim4, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM5, ck_ker_tim5, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM6, ck_ker_tim6, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM7, ck_ker_tim7, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM10, ck_ker_tim10, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM11, ck_ker_tim11, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM12, ck_ker_tim12, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM13, ck_ker_tim13, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM14, ck_ker_tim14, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM1, ck_ker_tim1, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM8, ck_ker_tim8, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM15, ck_ker_tim15, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM16, ck_ker_tim16, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM17, ck_ker_tim17, SECF_NONE), + STM32_GATE_CFG(CK_KER_TIM20, ck_ker_tim20, SECF_NONE), + STM32_GATE_CFG(CK_KER_LPTIM1, ck_ker_lptim1, SECF_NONE), + STM32_GATE_CFG(CK_KER_LPTIM2, ck_ker_lptim2, SECF_NONE), + STM32_GATE_CFG(CK_KER_USART2, ck_ker_usart2, SECF_NONE), + STM32_GATE_CFG(CK_KER_UART4, ck_ker_uart4, SECF_NONE), + STM32_GATE_CFG(CK_KER_USART3, ck_ker_usart3, SECF_NONE), + STM32_GATE_CFG(CK_KER_UART5, ck_ker_uart5, SECF_NONE), + STM32_GATE_CFG(CK_KER_SPI2, ck_ker_spi2, SECF_NONE), + STM32_GATE_CFG(CK_KER_SPI3, ck_ker_spi3, SECF_NONE), + STM32_GATE_CFG(CK_KER_SPDIFRX, ck_ker_spdifrx, SECF_NONE), + STM32_GATE_CFG(CK_KER_I2C1, ck_ker_i2c1, SECF_NONE), + STM32_GATE_CFG(CK_KER_I2C2, ck_ker_i2c2, SECF_NONE), + STM32_GATE_CFG(CK_KER_I3C1, ck_ker_i3c1, SECF_NONE), + STM32_GATE_CFG(CK_KER_I3C2, ck_ker_i3c2, SECF_NONE), + STM32_GATE_CFG(CK_KER_I2C3, ck_ker_i2c3, SECF_NONE), + STM32_GATE_CFG(CK_KER_I2C5, ck_ker_i2c5, SECF_NONE), + STM32_GATE_CFG(CK_KER_I3C3, ck_ker_i3c3, SECF_NONE), + STM32_GATE_CFG(CK_KER_I2C4, ck_ker_i2c4, SECF_NONE), + STM32_GATE_CFG(CK_KER_I2C6, ck_ker_i2c6, SECF_NONE), + STM32_GATE_CFG(CK_KER_I2C7, ck_ker_i2c7, SECF_NONE), + STM32_GATE_CFG(CK_KER_SPI1, ck_ker_spi1, SECF_NONE), + STM32_GATE_CFG(CK_KER_SPI4, ck_ker_spi4, SECF_NONE), + STM32_GATE_CFG(CK_KER_SPI5, ck_ker_spi5, SECF_NONE), + STM32_GATE_CFG(CK_KER_SPI6, ck_ker_spi6, SECF_NONE), + STM32_GATE_CFG(CK_KER_SPI7, ck_ker_spi7, SECF_NONE), + STM32_GATE_CFG(CK_KER_USART1, ck_ker_usart1, SECF_NONE), + STM32_GATE_CFG(CK_KER_USART6, ck_ker_usart6, SECF_NONE), + STM32_GATE_CFG(CK_KER_UART7, ck_ker_uart7, SECF_NONE), + STM32_GATE_CFG(CK_KER_UART8, ck_ker_uart8, SECF_NONE), + STM32_GATE_CFG(CK_KER_UART9, ck_ker_uart9, SECF_NONE), + STM32_GATE_CFG(CK_KER_MDF1, ck_ker_mdf1, SECF_NONE), + STM32_GATE_CFG(CK_KER_SAI1, ck_ker_sai1, SECF_NONE), + STM32_GATE_CFG(CK_KER_SAI2, ck_ker_sai2, SECF_NONE), + STM32_GATE_CFG(CK_KER_SAI3, ck_ker_sai3, SECF_NONE), + STM32_GATE_CFG(CK_KER_SAI4, ck_ker_sai4, SECF_NONE), + STM32_GATE_CFG(CK_KER_FDCAN, ck_ker_fdcan, SECF_NONE), + STM32_GATE_CFG(CK_KER_CSI, ck_ker_csi, SECF_NONE), + STM32_GATE_CFG(CK_KER_CSITXESC, ck_ker_csitxesc, SECF_NONE), + STM32_GATE_CFG(CK_KER_CSIPHY, ck_ker_csiphy, SECF_NONE), + STM32_GATE_CFG(CK_KER_USBTC, ck_ker_usbtc, SECF_NONE), + STM32_GATE_CFG(CK_KER_I3C4, ck_ker_i3c4, SECF_NONE), + STM32_GATE_CFG(CK_KER_SPI8, ck_ker_spi8, SECF_NONE), + STM32_GATE_CFG(CK_KER_I2C8, ck_ker_i2c8, SECF_NONE), + STM32_GATE_CFG(CK_KER_LPUART1, ck_ker_lpuart1, SECF_NONE), + STM32_GATE_CFG(CK_KER_LPTIM3, ck_ker_lptim3, SECF_NONE), + STM32_GATE_CFG(CK_KER_LPTIM4, ck_ker_lptim4, SECF_NONE), + STM32_GATE_CFG(CK_KER_LPTIM5, ck_ker_lptim5, SECF_NONE), + STM32_GATE_CFG(CK_KER_ADF1, ck_ker_adf1, SECF_NONE), + STM32_GATE_CFG(CK_KER_SDMMC1, ck_ker_sdmmc1, SECF_NONE), + STM32_GATE_CFG(CK_KER_SDMMC2, ck_ker_sdmmc2, SECF_NONE), + STM32_GATE_CFG(CK_KER_SDMMC3, ck_ker_sdmmc3, SECF_NONE), + STM32_GATE_CFG(CK_KER_ETH1, ck_ker_eth1, SECF_NONE), + STM32_GATE_CFG(CK_ETH1_STP, ck_ker_eth1stp, SECF_NONE), + STM32_GATE_CFG(CK_KER_ETHSW, ck_ker_ethsw, SECF_NONE), + STM32_GATE_CFG(CK_KER_ETH2, ck_ker_eth2, SECF_NONE), + STM32_GATE_CFG(CK_ETH2_STP, ck_ker_eth2stp, SECF_NONE), + STM32_GATE_CFG(CK_KER_ETH1PTP, ck_ker_eth1ptp, SECF_NONE), + STM32_GATE_CFG(CK_KER_ETH2PTP, ck_ker_eth2ptp, SECF_NONE), + STM32_GATE_CFG(CK_BUS_GPU, ck_icn_m_gpu, SECF_NONE), + STM32_GATE_CFG(CK_KER_GPU, ck_ker_gpu, SECF_NONE), + STM32_GATE_CFG(CK_KER_ETHSWREF, ck_ker_ethswref, SECF_NONE), + STM32_GATE_CFG(CK_BUS_ETHSWACMCFG, ck_icn_p_ethsw_acm_cfg, SECF_NONE), + STM32_GATE_CFG(CK_BUS_ETHSWACMMSG, ck_icn_p_ethsw_acm_msg, SECF_NONE), + STM32_GATE_CFG(CK_ETH1_MAC, ck_ker_eth1mac, SECF_NONE), + STM32_GATE_CFG(CK_ETH1_TX, ck_ker_eth1tx, SECF_NONE), + STM32_GATE_CFG(CK_ETH1_RX, ck_ker_eth1rx, SECF_NONE), + STM32_GATE_CFG(CK_ETH2_MAC, ck_ker_eth2mac, SECF_NONE), + STM32_GATE_CFG(CK_ETH2_TX, ck_ker_eth2tx, SECF_NONE), + STM32_GATE_CFG(CK_ETH2_RX, ck_ker_eth2rx, SECF_NONE), + STM32_COMPOSITE_CFG(CK_MCO1, ck_mco1, SECF_NONE), + STM32_COMPOSITE_CFG(CK_MCO2, ck_mco2, SECF_NONE), + STM32_COMPOSITE_CFG(CK_KER_ADC12, ck_ker_adc12, SECF_NONE), + STM32_COMPOSITE_CFG(CK_KER_ADC3, ck_ker_adc3, SECF_NONE), + STM32_COMPOSITE_CFG(CK_KER_USB2PHY1, ck_ker_usb2phy1, SECF_NONE), + STM32_GATE_CFG(CK_KER_USB2PHY2, ck_ker_usb2phy2, SECF_NONE), + STM32_COMPOSITE_CFG(CK_KER_USB2PHY2EN, ck_ker_usb2phy2_en, SECF_NONE), + STM32_COMPOSITE_CFG(CK_KER_USB3PCIEPHY, ck_ker_usb3pciephy, SECF_NONE), + STM32_COMPOSITE_CFG(CK_KER_DSIBLANE, clk_lanebyte, SECF_NONE), + STM32_COMPOSITE_CFG(CK_KER_DSIPHY, clk_phy_dsi, SECF_NONE), + STM32_COMPOSITE_CFG(CK_KER_LVDSPHY, ck_ker_lvdsphy, SECF_NONE), + STM32_COMPOSITE_CFG(CK_KER_DTS, ck_ker_dts, SECF_NONE), + STM32_GATE_CFG(CK_KER_LTDC, ck_ker_ltdc, SECF_NONE), +}; + +#define RESET_MP25(id, _offset, _bit_idx, _set_clr) \ + [id] = &(struct stm32_reset_cfg){ \ + .offset = (_offset), \ + .bit_idx = (_bit_idx), \ + .set_clr = (_set_clr), \ + } + +static const struct stm32_reset_cfg *stm32mp25_reset_cfg[STM32MP25_LAST_RESET] = { + RESET_MP25(TIM1_R, RCC_TIM1CFGR, 0, 0), + RESET_MP25(TIM2_R, RCC_TIM2CFGR, 0, 0), + RESET_MP25(TIM3_R, RCC_TIM3CFGR, 0, 0), + RESET_MP25(TIM4_R, RCC_TIM4CFGR, 0, 0), + RESET_MP25(TIM5_R, RCC_TIM5CFGR, 0, 0), + RESET_MP25(TIM6_R, RCC_TIM6CFGR, 0, 0), + RESET_MP25(TIM7_R, RCC_TIM7CFGR, 0, 0), + RESET_MP25(TIM8_R, RCC_TIM8CFGR, 0, 0), + RESET_MP25(TIM10_R, RCC_TIM10CFGR, 0, 0), + RESET_MP25(TIM11_R, RCC_TIM11CFGR, 0, 0), + RESET_MP25(TIM12_R, RCC_TIM12CFGR, 0, 0), + RESET_MP25(TIM13_R, RCC_TIM13CFGR, 0, 0), + RESET_MP25(TIM14_R, RCC_TIM14CFGR, 0, 0), + RESET_MP25(TIM15_R, RCC_TIM15CFGR, 0, 0), + RESET_MP25(TIM16_R, RCC_TIM16CFGR, 0, 0), + RESET_MP25(TIM17_R, RCC_TIM17CFGR, 0, 0), + RESET_MP25(TIM20_R, RCC_TIM20CFGR, 0, 0), + RESET_MP25(LPTIM1_R, RCC_LPTIM1CFGR, 0, 0), + RESET_MP25(LPTIM2_R, RCC_LPTIM2CFGR, 0, 0), + RESET_MP25(LPTIM3_R, RCC_LPTIM3CFGR, 0, 0), + RESET_MP25(LPTIM4_R, RCC_LPTIM4CFGR, 0, 0), + RESET_MP25(LPTIM5_R, RCC_LPTIM5CFGR, 0, 0), + RESET_MP25(SPI1_R, RCC_SPI1CFGR, 0, 0), + RESET_MP25(SPI2_R, RCC_SPI2CFGR, 0, 0), + RESET_MP25(SPI3_R, RCC_SPI3CFGR, 0, 0), + RESET_MP25(SPI4_R, RCC_SPI4CFGR, 0, 0), + RESET_MP25(SPI5_R, RCC_SPI5CFGR, 0, 0), + RESET_MP25(SPI6_R, RCC_SPI6CFGR, 0, 0), + RESET_MP25(SPI7_R, RCC_SPI7CFGR, 0, 0), + RESET_MP25(SPI8_R, RCC_SPI8CFGR, 0, 0), + RESET_MP25(SPDIFRX_R, RCC_SPDIFRXCFGR, 0, 0), + RESET_MP25(USART1_R, RCC_USART1CFGR, 0, 0), + RESET_MP25(USART2_R, RCC_USART2CFGR, 0, 0), + RESET_MP25(USART3_R, RCC_USART3CFGR, 0, 0), + RESET_MP25(UART4_R, RCC_UART4CFGR, 0, 0), + RESET_MP25(UART5_R, RCC_UART5CFGR, 0, 0), + RESET_MP25(USART6_R, RCC_USART6CFGR, 0, 0), + RESET_MP25(UART7_R, RCC_UART7CFGR, 0, 0), + RESET_MP25(UART8_R, RCC_UART8CFGR, 0, 0), + RESET_MP25(UART9_R, RCC_UART9CFGR, 0, 0), + RESET_MP25(LPUART1_R, RCC_LPUART1CFGR, 0, 0), + RESET_MP25(IS2M_R, RCC_IS2MCFGR, 0, 0), + RESET_MP25(I2C1_R, RCC_I2C1CFGR, 0, 0), + RESET_MP25(I2C2_R, RCC_I2C2CFGR, 0, 0), + RESET_MP25(I2C3_R, RCC_I2C3CFGR, 0, 0), + RESET_MP25(I2C4_R, RCC_I2C4CFGR, 0, 0), + RESET_MP25(I2C5_R, RCC_I2C5CFGR, 0, 0), + RESET_MP25(I2C6_R, RCC_I2C6CFGR, 0, 0), + RESET_MP25(I2C7_R, RCC_I2C7CFGR, 0, 0), + RESET_MP25(I2C8_R, RCC_I2C8CFGR, 0, 0), + RESET_MP25(SAI1_R, RCC_SAI1CFGR, 0, 0), + RESET_MP25(SAI2_R, RCC_SAI2CFGR, 0, 0), + RESET_MP25(SAI3_R, RCC_SAI3CFGR, 0, 0), + RESET_MP25(SAI4_R, RCC_SAI4CFGR, 0, 0), + RESET_MP25(MDF1_R, RCC_MDF1CFGR, 0, 0), + RESET_MP25(MDF2_R, RCC_ADF1CFGR, 0, 0), + RESET_MP25(FDCAN_R, RCC_FDCANCFGR, 0, 0), + RESET_MP25(HDP_R, RCC_HDPCFGR, 0, 0), + RESET_MP25(ADC12_R, RCC_ADC12CFGR, 0, 0), + RESET_MP25(ADC3_R, RCC_ADC3CFGR, 0, 0), + RESET_MP25(ETH1_R, RCC_ETH1CFGR, 0, 0), + RESET_MP25(ETH2_R, RCC_ETH2CFGR, 0, 0), + RESET_MP25(USBH_R, RCC_USBHCFGR, 0, 0), + RESET_MP25(USB2PHY1_R, RCC_USB2PHY1CFGR, 0, 0), + RESET_MP25(USB2PHY2_R, RCC_USB2PHY2CFGR, 0, 0), + RESET_MP25(USB3DR_R, RCC_USB3DRCFGR, 0, 0), + RESET_MP25(USB3PCIEPHY_R, RCC_USB3PCIEPHYCFGR, 0, 0), + RESET_MP25(USBTC_R, RCC_USBTCCFGR, 0, 0), + RESET_MP25(ETHSW_R, RCC_ETHSWCFGR, 0, 0), + RESET_MP25(SDMMC1_R, RCC_SDMMC1CFGR, 0, 0), + RESET_MP25(SDMMC1DLL_R, RCC_SDMMC1CFGR, 16, 0), + RESET_MP25(SDMMC2_R, RCC_SDMMC2CFGR, 0, 0), + RESET_MP25(SDMMC2DLL_R, RCC_SDMMC2CFGR, 16, 0), + RESET_MP25(SDMMC3_R, RCC_SDMMC3CFGR, 0, 0), + RESET_MP25(SDMMC3DLL_R, RCC_SDMMC3CFGR, 16, 0), + RESET_MP25(GPU_R, RCC_GPUCFGR, 0, 0), + RESET_MP25(LTDC_R, RCC_LTDCCFGR, 0, 0), + RESET_MP25(DSI_R, RCC_DSICFGR, 0, 0), + RESET_MP25(LVDS_R, RCC_LVDSCFGR, 0, 0), + RESET_MP25(CSI_R, RCC_CSICFGR, 0, 0), + RESET_MP25(DCMIPP_R, RCC_DCMIPPCFGR, 0, 0), + RESET_MP25(CCI_R, RCC_CCICFGR, 0, 0), + RESET_MP25(VDEC_R, RCC_VDECCFGR, 0, 0), + RESET_MP25(VENC_R, RCC_VENCCFGR, 0, 0), + RESET_MP25(WWDG1_R, RCC_WWDG1CFGR, 0, 0), + RESET_MP25(WWDG2_R, RCC_WWDG2CFGR, 0, 0), + RESET_MP25(VREF_R, RCC_VREFCFGR, 0, 0), + RESET_MP25(DTS_R, RCC_DTSCFGR, 0, 0), + RESET_MP25(CRC_R, RCC_CRCCFGR, 0, 0), + RESET_MP25(SERC_R, RCC_SERCCFGR, 0, 0), + RESET_MP25(OSPIIOM_R, RCC_OSPIIOMCFGR, 0, 0), + RESET_MP25(I3C1_R, RCC_I3C1CFGR, 0, 0), + RESET_MP25(I3C2_R, RCC_I3C2CFGR, 0, 0), + RESET_MP25(I3C3_R, RCC_I3C3CFGR, 0, 0), + RESET_MP25(I3C4_R, RCC_I3C4CFGR, 0, 0), + RESET_MP25(IWDG2_KER_R, RCC_IWDGC1CFGSETR, 18, 1), + RESET_MP25(IWDG4_KER_R, RCC_IWDGC2CFGSETR, 18, 1), + RESET_MP25(RNG_R, RCC_RNGCFGR, 0, 0), + RESET_MP25(PKA_R, RCC_PKACFGR, 0, 0), + RESET_MP25(SAES_R, RCC_SAESCFGR, 0, 0), + RESET_MP25(HASH_R, RCC_HASHCFGR, 0, 0), + RESET_MP25(CRYP1_R, RCC_CRYP1CFGR, 0, 0), + RESET_MP25(CRYP2_R, RCC_CRYP2CFGR, 0, 0), + RESET_MP25(PCIE_R, RCC_PCIECFGR, 0, 0), +}; + +static u16 stm32mp25_cpt_gate[GATE_NB]; + +static struct clk_stm32_clock_data stm32mp25_clock_data = { + .gate_cpt = stm32mp25_cpt_gate, + .gates = stm32mp25_gates, + .muxes = stm32mp25_muxes, +}; + +static struct clk_stm32_reset_data stm32mp25_reset_data = { + .reset_lines = stm32mp25_reset_cfg, + .nr_lines = ARRAY_SIZE(stm32mp25_reset_cfg), +}; + +static const struct stm32_rcc_match_data stm32mp25_data = { + .tab_clocks = stm32mp25_clock_cfg, + .num_clocks = ARRAY_SIZE(stm32mp25_clock_cfg), + .maxbinding = STM32MP25_LAST_CLK, + .clock_data = &stm32mp25_clock_data, + .reset_data = &stm32mp25_reset_data, +}; + +static const struct of_device_id stm32mp25_match_data[] = { + { .compatible = "st,stm32mp25-rcc", .data = &stm32mp25_data, }, + { } +}; +MODULE_DEVICE_TABLE(of, stm32mp25_match_data); + +static int stm32mp25_rcc_clocks_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + void __iomem *base; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + return stm32_rcc_init(dev, stm32mp25_match_data, base); +} + +static struct platform_driver stm32mp25_rcc_clocks_driver = { + .driver = { + .name = "stm32mp25_rcc", + .of_match_table = stm32mp25_match_data, + }, + .probe = stm32mp25_rcc_clocks_probe, +}; + +static int __init stm32mp25_clocks_init(void) +{ + return platform_driver_register(&stm32mp25_rcc_clocks_driver); +} + +core_initcall(stm32mp25_clocks_init); diff --git a/drivers/clk/stm32/reset-stm32.c b/drivers/clk/stm32/reset-stm32.c index 14c2ee1eeb..5a8f525842 100644 --- a/drivers/clk/stm32/reset-stm32.c +++ b/drivers/clk/stm32/reset-stm32.c @@ -19,6 +19,7 @@ struct stm32_reset_data { struct reset_controller_dev rcdev; void __iomem *membase; u32 clear_offset; + const struct stm32_reset_cfg **reset_lines; }; static inline struct stm32_reset_data * @@ -27,22 +28,46 @@ to_stm32_reset_data(struct reset_controller_dev *rcdev) return container_of(rcdev, struct stm32_reset_data, rcdev); } +static const struct stm32_reset_cfg *stm32_get_reset_line(struct reset_controller_dev *rcdev, + unsigned long id, + struct stm32_reset_cfg *line) +{ + struct stm32_reset_data *data = to_stm32_reset_data(rcdev); + + if (!data->reset_lines) { + int reg_width = sizeof(u32); + int bank = id / (reg_width * BITS_PER_BYTE); + int offset = id % (reg_width * BITS_PER_BYTE); + + line->offset = bank * reg_width; + line->bit_idx = offset; + line->set_clr = (data->clear_offset ? true : false); + + return line; + } + + return data->reset_lines[id]; +} + static int stm32_reset_update(struct reset_controller_dev *rcdev, unsigned long id, bool assert) { struct stm32_reset_data *data = to_stm32_reset_data(rcdev); - int reg_width = sizeof(u32); - int bank = id / (reg_width * BITS_PER_BYTE); - int offset = id % (reg_width * BITS_PER_BYTE); + struct stm32_reset_cfg line_reset; + const struct stm32_reset_cfg *ptr_line; - if (data->clear_offset) { + ptr_line = stm32_get_reset_line(rcdev, id, &line_reset); + if (!ptr_line) + return -EPERM; + + if (ptr_line->set_clr) { void __iomem *addr; - addr = data->membase + (bank * reg_width); + addr = data->membase + ptr_line->offset; if (!assert) addr += data->clear_offset; - writel(BIT(offset), addr); + writel(BIT(ptr_line->bit_idx), addr); } else { unsigned long flags; @@ -50,14 +75,14 @@ static int stm32_reset_update(struct reset_controller_dev *rcdev, spin_lock_irqsave(&data->lock, flags); - reg = readl(data->membase + (bank * reg_width)); + reg = readl(data->membase + ptr_line->offset); if (assert) - reg |= BIT(offset); + reg |= BIT(ptr_line->bit_idx); else - reg &= ~BIT(offset); + reg &= ~BIT(ptr_line->bit_idx); - writel(reg, data->membase + (bank * reg_width)); + writel(reg, data->membase + ptr_line->offset); spin_unlock_irqrestore(&data->lock, flags); } @@ -81,14 +106,17 @@ static int stm32_reset_status(struct reset_controller_dev *rcdev, unsigned long id) { struct stm32_reset_data *data = to_stm32_reset_data(rcdev); - int reg_width = sizeof(u32); - int bank = id / (reg_width * BITS_PER_BYTE); - int offset = id % (reg_width * BITS_PER_BYTE); + struct stm32_reset_cfg line_reset; + const struct stm32_reset_cfg *ptr_line; u32 reg; - reg = readl(data->membase + (bank * reg_width)); + ptr_line = stm32_get_reset_line(rcdev, id, &line_reset); + if (!ptr_line) + return -EPERM; + + reg = readl(data->membase + ptr_line->offset); - return !!(reg & BIT(offset)); + return !!(reg & BIT(ptr_line->bit_idx)); } static const struct reset_control_ops stm32_reset_ops = { @@ -113,6 +141,7 @@ int stm32_rcc_reset_init(struct device *dev, struct clk_stm32_reset_data *data, reset_data->rcdev.ops = &stm32_reset_ops; reset_data->rcdev.of_node = dev_of_node(dev); reset_data->rcdev.nr_resets = data->nr_lines; + reset_data->reset_lines = data->reset_lines; reset_data->clear_offset = data->clear_offset; return reset_controller_register(&reset_data->rcdev); diff --git a/drivers/clk/stm32/reset-stm32.h b/drivers/clk/stm32/reset-stm32.h index 8cf1cc9be4..f79cad21df 100644 --- a/drivers/clk/stm32/reset-stm32.h +++ b/drivers/clk/stm32/reset-stm32.h @@ -4,8 +4,15 @@ * Author: Gabriel Fernandez for STMicroelectronics. */ +struct stm32_reset_cfg { + u16 offset; + u8 bit_idx; + bool set_clr; +}; + struct clk_stm32_reset_data { const struct reset_control_ops *ops; + const struct stm32_reset_cfg **reset_lines; unsigned int nr_lines; u32 clear_offset; }; diff --git a/drivers/clk/stm32/stm32mp25_rcc.h b/drivers/clk/stm32/stm32mp25_rcc.h new file mode 100644 index 0000000000..687bc6a786 --- /dev/null +++ b/drivers/clk/stm32/stm32mp25_rcc.h @@ -0,0 +1,712 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) STMicroelectronics 2023 - All Rights Reserved + * Author: Gabriel Fernandez for STMicroelectronics. + */ + +#ifndef STM32MP25_RCC_H +#define STM32MP25_RCC_H + +#define RCC_SECCFGR0 0x0 +#define RCC_SECCFGR1 0x4 +#define RCC_SECCFGR2 0x8 +#define RCC_SECCFGR3 0xC +#define RCC_PRIVCFGR0 0x10 +#define RCC_PRIVCFGR1 0x14 +#define RCC_PRIVCFGR2 0x18 +#define RCC_PRIVCFGR3 0x1C +#define RCC_RCFGLOCKR0 0x20 +#define RCC_RCFGLOCKR1 0x24 +#define RCC_RCFGLOCKR2 0x28 +#define RCC_RCFGLOCKR3 0x2C +#define RCC_R0CIDCFGR 0x30 +#define RCC_R0SEMCR 0x34 +#define RCC_R1CIDCFGR 0x38 +#define RCC_R1SEMCR 0x3C +#define RCC_R2CIDCFGR 0x40 +#define RCC_R2SEMCR 0x44 +#define RCC_R3CIDCFGR 0x48 +#define RCC_R3SEMCR 0x4C +#define RCC_R4CIDCFGR 0x50 +#define RCC_R4SEMCR 0x54 +#define RCC_R5CIDCFGR 0x58 +#define RCC_R5SEMCR 0x5C +#define RCC_R6CIDCFGR 0x60 +#define RCC_R6SEMCR 0x64 +#define RCC_R7CIDCFGR 0x68 +#define RCC_R7SEMCR 0x6C +#define RCC_R8CIDCFGR 0x70 +#define RCC_R8SEMCR 0x74 +#define RCC_R9CIDCFGR 0x78 +#define RCC_R9SEMCR 0x7C +#define RCC_R10CIDCFGR 0x80 +#define RCC_R10SEMCR 0x84 +#define RCC_R11CIDCFGR 0x88 +#define RCC_R11SEMCR 0x8C +#define RCC_R12CIDCFGR 0x90 +#define RCC_R12SEMCR 0x94 +#define RCC_R13CIDCFGR 0x98 +#define RCC_R13SEMCR 0x9C +#define RCC_R14CIDCFGR 0xA0 +#define RCC_R14SEMCR 0xA4 +#define RCC_R15CIDCFGR 0xA8 +#define RCC_R15SEMCR 0xAC +#define RCC_R16CIDCFGR 0xB0 +#define RCC_R16SEMCR 0xB4 +#define RCC_R17CIDCFGR 0xB8 +#define RCC_R17SEMCR 0xBC +#define RCC_R18CIDCFGR 0xC0 +#define RCC_R18SEMCR 0xC4 +#define RCC_R19CIDCFGR 0xC8 +#define RCC_R19SEMCR 0xCC +#define RCC_R20CIDCFGR 0xD0 +#define RCC_R20SEMCR 0xD4 +#define RCC_R21CIDCFGR 0xD8 +#define RCC_R21SEMCR 0xDC +#define RCC_R22CIDCFGR 0xE0 +#define RCC_R22SEMCR 0xE4 +#define RCC_R23CIDCFGR 0xE8 +#define RCC_R23SEMCR 0xEC +#define RCC_R24CIDCFGR 0xF0 +#define RCC_R24SEMCR 0xF4 +#define RCC_R25CIDCFGR 0xF8 +#define RCC_R25SEMCR 0xFC +#define RCC_R26CIDCFGR 0x100 +#define RCC_R26SEMCR 0x104 +#define RCC_R27CIDCFGR 0x108 +#define RCC_R27SEMCR 0x10C +#define RCC_R28CIDCFGR 0x110 +#define RCC_R28SEMCR 0x114 +#define RCC_R29CIDCFGR 0x118 +#define RCC_R29SEMCR 0x11C +#define RCC_R30CIDCFGR 0x120 +#define RCC_R30SEMCR 0x124 +#define RCC_R31CIDCFGR 0x128 +#define RCC_R31SEMCR 0x12C +#define RCC_R32CIDCFGR 0x130 +#define RCC_R32SEMCR 0x134 +#define RCC_R33CIDCFGR 0x138 +#define RCC_R33SEMCR 0x13C +#define RCC_R34CIDCFGR 0x140 +#define RCC_R34SEMCR 0x144 +#define RCC_R35CIDCFGR 0x148 +#define RCC_R35SEMCR 0x14C +#define RCC_R36CIDCFGR 0x150 +#define RCC_R36SEMCR 0x154 +#define RCC_R37CIDCFGR 0x158 +#define RCC_R37SEMCR 0x15C +#define RCC_R38CIDCFGR 0x160 +#define RCC_R38SEMCR 0x164 +#define RCC_R39CIDCFGR 0x168 +#define RCC_R39SEMCR 0x16C +#define RCC_R40CIDCFGR 0x170 +#define RCC_R40SEMCR 0x174 +#define RCC_R41CIDCFGR 0x178 +#define RCC_R41SEMCR 0x17C +#define RCC_R42CIDCFGR 0x180 +#define RCC_R42SEMCR 0x184 +#define RCC_R43CIDCFGR 0x188 +#define RCC_R43SEMCR 0x18C +#define RCC_R44CIDCFGR 0x190 +#define RCC_R44SEMCR 0x194 +#define RCC_R45CIDCFGR 0x198 +#define RCC_R45SEMCR 0x19C +#define RCC_R46CIDCFGR 0x1A0 +#define RCC_R46SEMCR 0x1A4 +#define RCC_R47CIDCFGR 0x1A8 +#define RCC_R47SEMCR 0x1AC +#define RCC_R48CIDCFGR 0x1B0 +#define RCC_R48SEMCR 0x1B4 +#define RCC_R49CIDCFGR 0x1B8 +#define RCC_R49SEMCR 0x1BC +#define RCC_R50CIDCFGR 0x1C0 +#define RCC_R50SEMCR 0x1C4 +#define RCC_R51CIDCFGR 0x1C8 +#define RCC_R51SEMCR 0x1CC +#define RCC_R52CIDCFGR 0x1D0 +#define RCC_R52SEMCR 0x1D4 +#define RCC_R53CIDCFGR 0x1D8 +#define RCC_R53SEMCR 0x1DC +#define RCC_R54CIDCFGR 0x1E0 +#define RCC_R54SEMCR 0x1E4 +#define RCC_R55CIDCFGR 0x1E8 +#define RCC_R55SEMCR 0x1EC +#define RCC_R56CIDCFGR 0x1F0 +#define RCC_R56SEMCR 0x1F4 +#define RCC_R57CIDCFGR 0x1F8 +#define RCC_R57SEMCR 0x1FC +#define RCC_R58CIDCFGR 0x200 +#define RCC_R58SEMCR 0x204 +#define RCC_R59CIDCFGR 0x208 +#define RCC_R59SEMCR 0x20C +#define RCC_R60CIDCFGR 0x210 +#define RCC_R60SEMCR 0x214 +#define RCC_R61CIDCFGR 0x218 +#define RCC_R61SEMCR 0x21C +#define RCC_R62CIDCFGR 0x220 +#define RCC_R62SEMCR 0x224 +#define RCC_R63CIDCFGR 0x228 +#define RCC_R63SEMCR 0x22C +#define RCC_R64CIDCFGR 0x230 +#define RCC_R64SEMCR 0x234 +#define RCC_R65CIDCFGR 0x238 +#define RCC_R65SEMCR 0x23C +#define RCC_R66CIDCFGR 0x240 +#define RCC_R66SEMCR 0x244 +#define RCC_R67CIDCFGR 0x248 +#define RCC_R67SEMCR 0x24C +#define RCC_R68CIDCFGR 0x250 +#define RCC_R68SEMCR 0x254 +#define RCC_R69CIDCFGR 0x258 +#define RCC_R69SEMCR 0x25C +#define RCC_R70CIDCFGR 0x260 +#define RCC_R70SEMCR 0x264 +#define RCC_R71CIDCFGR 0x268 +#define RCC_R71SEMCR 0x26C +#define RCC_R72CIDCFGR 0x270 +#define RCC_R72SEMCR 0x274 +#define RCC_R73CIDCFGR 0x278 +#define RCC_R73SEMCR 0x27C +#define RCC_R74CIDCFGR 0x280 +#define RCC_R74SEMCR 0x284 +#define RCC_R75CIDCFGR 0x288 +#define RCC_R75SEMCR 0x28C +#define RCC_R76CIDCFGR 0x290 +#define RCC_R76SEMCR 0x294 +#define RCC_R77CIDCFGR 0x298 +#define RCC_R77SEMCR 0x29C +#define RCC_R78CIDCFGR 0x2A0 +#define RCC_R78SEMCR 0x2A4 +#define RCC_R79CIDCFGR 0x2A8 +#define RCC_R79SEMCR 0x2AC +#define RCC_R80CIDCFGR 0x2B0 +#define RCC_R80SEMCR 0x2B4 +#define RCC_R81CIDCFGR 0x2B8 +#define RCC_R81SEMCR 0x2BC +#define RCC_R82CIDCFGR 0x2C0 +#define RCC_R82SEMCR 0x2C4 +#define RCC_R83CIDCFGR 0x2C8 +#define RCC_R83SEMCR 0x2CC +#define RCC_R84CIDCFGR 0x2D0 +#define RCC_R84SEMCR 0x2D4 +#define RCC_R85CIDCFGR 0x2D8 +#define RCC_R85SEMCR 0x2DC +#define RCC_R86CIDCFGR 0x2E0 +#define RCC_R86SEMCR 0x2E4 +#define RCC_R87CIDCFGR 0x2E8 +#define RCC_R87SEMCR 0x2EC +#define RCC_R88CIDCFGR 0x2F0 +#define RCC_R88SEMCR 0x2F4 +#define RCC_R89CIDCFGR 0x2F8 +#define RCC_R89SEMCR 0x2FC +#define RCC_R90CIDCFGR 0x300 +#define RCC_R90SEMCR 0x304 +#define RCC_R91CIDCFGR 0x308 +#define RCC_R91SEMCR 0x30C +#define RCC_R92CIDCFGR 0x310 +#define RCC_R92SEMCR 0x314 +#define RCC_R93CIDCFGR 0x318 +#define RCC_R93SEMCR 0x31C +#define RCC_R94CIDCFGR 0x320 +#define RCC_R94SEMCR 0x324 +#define RCC_R95CIDCFGR 0x328 +#define RCC_R95SEMCR 0x32C +#define RCC_R96CIDCFGR 0x330 +#define RCC_R96SEMCR 0x334 +#define RCC_R97CIDCFGR 0x338 +#define RCC_R97SEMCR 0x33C +#define RCC_R98CIDCFGR 0x340 +#define RCC_R98SEMCR 0x344 +#define RCC_R99CIDCFGR 0x348 +#define RCC_R99SEMCR 0x34C +#define RCC_R100CIDCFGR 0x350 +#define RCC_R100SEMCR 0x354 +#define RCC_R101CIDCFGR 0x358 +#define RCC_R101SEMCR 0x35C +#define RCC_R102CIDCFGR 0x360 +#define RCC_R102SEMCR 0x364 +#define RCC_R103CIDCFGR 0x368 +#define RCC_R103SEMCR 0x36C +#define RCC_R104CIDCFGR 0x370 +#define RCC_R104SEMCR 0x374 +#define RCC_R105CIDCFGR 0x378 +#define RCC_R105SEMCR 0x37C +#define RCC_R106CIDCFGR 0x380 +#define RCC_R106SEMCR 0x384 +#define RCC_R107CIDCFGR 0x388 +#define RCC_R107SEMCR 0x38C +#define RCC_R108CIDCFGR 0x390 +#define RCC_R108SEMCR 0x394 +#define RCC_R109CIDCFGR 0x398 +#define RCC_R109SEMCR 0x39C +#define RCC_R110CIDCFGR 0x3A0 +#define RCC_R110SEMCR 0x3A4 +#define RCC_R111CIDCFGR 0x3A8 +#define RCC_R111SEMCR 0x3AC +#define RCC_R112CIDCFGR 0x3B0 +#define RCC_R112SEMCR 0x3B4 +#define RCC_R113CIDCFGR 0x3B8 +#define RCC_R113SEMCR 0x3BC +#define RCC_GRSTCSETR 0x400 +#define RCC_C1RSTCSETR 0x404 +#define RCC_C1P1RSTCSETR 0x408 +#define RCC_C2RSTCSETR 0x40C +#define RCC_HWRSTSCLRR 0x410 +#define RCC_C1HWRSTSCLRR 0x414 +#define RCC_C2HWRSTSCLRR 0x418 +#define RCC_C1BOOTRSTSSETR 0x41C +#define RCC_C1BOOTRSTSCLRR 0x420 +#define RCC_C2BOOTRSTSSETR 0x424 +#define RCC_C2BOOTRSTSCLRR 0x428 +#define RCC_C1SREQSETR 0x42C +#define RCC_C1SREQCLRR 0x430 +#define RCC_CPUBOOTCR 0x434 +#define RCC_STBYBOOTCR 0x438 +#define RCC_LEGBOOTCR 0x43C +#define RCC_BDCR 0x440 +#define RCC_D3DCR 0x444 +#define RCC_D3DSR 0x448 +#define RCC_RDCR 0x44C +#define RCC_C1MSRDCR 0x450 +#define RCC_PWRLPDLYCR 0x454 +#define RCC_C1CIESETR 0x458 +#define RCC_C1CIFCLRR 0x45C +#define RCC_C2CIESETR 0x460 +#define RCC_C2CIFCLRR 0x464 +#define RCC_IWDGC1FZSETR 0x468 +#define RCC_IWDGC1FZCLRR 0x46C +#define RCC_IWDGC1CFGSETR 0x470 +#define RCC_IWDGC1CFGCLRR 0x474 +#define RCC_IWDGC2FZSETR 0x478 +#define RCC_IWDGC2FZCLRR 0x47C +#define RCC_IWDGC2CFGSETR 0x480 +#define RCC_IWDGC2CFGCLRR 0x484 +#define RCC_IWDGC3CFGSETR 0x488 +#define RCC_IWDGC3CFGCLRR 0x48C +#define RCC_C3CFGR 0x490 +#define RCC_MCO1CFGR 0x494 +#define RCC_MCO2CFGR 0x498 +#define RCC_OCENSETR 0x49C +#define RCC_OCENCLRR 0x4A0 +#define RCC_OCRDYR 0x4A4 +#define RCC_HSICFGR 0x4A8 +#define RCC_MSICFGR 0x4AC +#define RCC_RTCDIVR 0x4B0 +#define RCC_APB1DIVR 0x4B4 +#define RCC_APB2DIVR 0x4B8 +#define RCC_APB3DIVR 0x4BC +#define RCC_APB4DIVR 0x4C0 +#define RCC_APBDBGDIVR 0x4C4 +#define RCC_TIMG1PRER 0x4C8 +#define RCC_TIMG2PRER 0x4CC +#define RCC_LSMCUDIVR 0x4D0 +#define RCC_DDRCPCFGR 0x4D4 +#define RCC_DDRCAPBCFGR 0x4D8 +#define RCC_DDRPHYCAPBCFGR 0x4DC +#define RCC_DDRPHYCCFGR 0x4E0 +#define RCC_DDRCFGR 0x4E4 +#define RCC_DDRITFCFGR 0x4E8 +#define RCC_SYSRAMCFGR 0x4F0 +#define RCC_VDERAMCFGR 0x4F4 +#define RCC_SRAM1CFGR 0x4F8 +#define RCC_SRAM2CFGR 0x4FC +#define RCC_RETRAMCFGR 0x500 +#define RCC_BKPSRAMCFGR 0x504 +#define RCC_LPSRAM1CFGR 0x508 +#define RCC_LPSRAM2CFGR 0x50C +#define RCC_LPSRAM3CFGR 0x510 +#define RCC_OSPI1CFGR 0x514 +#define RCC_OSPI2CFGR 0x518 +#define RCC_FMCCFGR 0x51C +#define RCC_DBGCFGR 0x520 +#define RCC_STM500CFGR 0x524 +#define RCC_ETRCFGR 0x528 +#define RCC_GPIOACFGR 0x52C +#define RCC_GPIOBCFGR 0x530 +#define RCC_GPIOCCFGR 0x534 +#define RCC_GPIODCFGR 0x538 +#define RCC_GPIOECFGR 0x53C +#define RCC_GPIOFCFGR 0x540 +#define RCC_GPIOGCFGR 0x544 +#define RCC_GPIOHCFGR 0x548 +#define RCC_GPIOICFGR 0x54C +#define RCC_GPIOJCFGR 0x550 +#define RCC_GPIOKCFGR 0x554 +#define RCC_GPIOZCFGR 0x558 +#define RCC_HPDMA1CFGR 0x55C +#define RCC_HPDMA2CFGR 0x560 +#define RCC_HPDMA3CFGR 0x564 +#define RCC_LPDMACFGR 0x568 +#define RCC_HSEMCFGR 0x56C +#define RCC_IPCC1CFGR 0x570 +#define RCC_IPCC2CFGR 0x574 +#define RCC_RTCCFGR 0x578 +#define RCC_SYSCPU1CFGR 0x580 +#define RCC_BSECCFGR 0x584 +#define RCC_IS2MCFGR 0x58C +#define RCC_PLL2CFGR1 0x590 +#define RCC_PLL2CFGR2 0x594 +#define RCC_PLL2CFGR3 0x598 +#define RCC_PLL2CFGR4 0x59C +#define RCC_PLL2CFGR5 0x5A0 +#define RCC_PLL2CFGR6 0x5A8 +#define RCC_PLL2CFGR7 0x5AC +#define RCC_PLL3CFGR1 0x5B8 +#define RCC_PLL3CFGR2 0x5BC +#define RCC_PLL3CFGR3 0x5C0 +#define RCC_PLL3CFGR4 0x5C4 +#define RCC_PLL3CFGR5 0x5C8 +#define RCC_PLL3CFGR6 0x5D0 +#define RCC_PLL3CFGR7 0x5D4 +#define RCC_HSIFMONCR 0x5E0 +#define RCC_HSIFVALR 0x5E4 +#define RCC_TIM1CFGR 0x700 +#define RCC_TIM2CFGR 0x704 +#define RCC_TIM3CFGR 0x708 +#define RCC_TIM4CFGR 0x70C +#define RCC_TIM5CFGR 0x710 +#define RCC_TIM6CFGR 0x714 +#define RCC_TIM7CFGR 0x718 +#define RCC_TIM8CFGR 0x71C +#define RCC_TIM10CFGR 0x720 +#define RCC_TIM11CFGR 0x724 +#define RCC_TIM12CFGR 0x728 +#define RCC_TIM13CFGR 0x72C +#define RCC_TIM14CFGR 0x730 +#define RCC_TIM15CFGR 0x734 +#define RCC_TIM16CFGR 0x738 +#define RCC_TIM17CFGR 0x73C +#define RCC_TIM20CFGR 0x740 +#define RCC_LPTIM1CFGR 0x744 +#define RCC_LPTIM2CFGR 0x748 +#define RCC_LPTIM3CFGR 0x74C +#define RCC_LPTIM4CFGR 0x750 +#define RCC_LPTIM5CFGR 0x754 +#define RCC_SPI1CFGR 0x758 +#define RCC_SPI2CFGR 0x75C +#define RCC_SPI3CFGR 0x760 +#define RCC_SPI4CFGR 0x764 +#define RCC_SPI5CFGR 0x768 +#define RCC_SPI6CFGR 0x76C +#define RCC_SPI7CFGR 0x770 +#define RCC_SPI8CFGR 0x774 +#define RCC_SPDIFRXCFGR 0x778 +#define RCC_USART1CFGR 0x77C +#define RCC_USART2CFGR 0x780 +#define RCC_USART3CFGR 0x784 +#define RCC_UART4CFGR 0x788 +#define RCC_UART5CFGR 0x78C +#define RCC_USART6CFGR 0x790 +#define RCC_UART7CFGR 0x794 +#define RCC_UART8CFGR 0x798 +#define RCC_UART9CFGR 0x79C +#define RCC_LPUART1CFGR 0x7A0 +#define RCC_I2C1CFGR 0x7A4 +#define RCC_I2C2CFGR 0x7A8 +#define RCC_I2C3CFGR 0x7AC +#define RCC_I2C4CFGR 0x7B0 +#define RCC_I2C5CFGR 0x7B4 +#define RCC_I2C6CFGR 0x7B8 +#define RCC_I2C7CFGR 0x7BC +#define RCC_I2C8CFGR 0x7C0 +#define RCC_SAI1CFGR 0x7C4 +#define RCC_SAI2CFGR 0x7C8 +#define RCC_SAI3CFGR 0x7CC +#define RCC_SAI4CFGR 0x7D0 +#define RCC_MDF1CFGR 0x7D8 +#define RCC_ADF1CFGR 0x7DC +#define RCC_FDCANCFGR 0x7E0 +#define RCC_HDPCFGR 0x7E4 +#define RCC_ADC12CFGR 0x7E8 +#define RCC_ADC3CFGR 0x7EC +#define RCC_ETH1CFGR 0x7F0 +#define RCC_ETH2CFGR 0x7F4 +#define RCC_USBHCFGR 0x7FC +#define RCC_USB2PHY1CFGR 0x800 +#define RCC_USB2PHY2CFGR 0x804 +#define RCC_USB3DRCFGR 0x808 +#define RCC_USB3PCIEPHYCFGR 0x80C +#define RCC_PCIECFGR 0x810 +#define RCC_USBTCCFGR 0x814 +#define RCC_ETHSWCFGR 0x818 +#define RCC_ETHSWACMCFGR 0x81C +#define RCC_ETHSWACMMSGCFGR 0x820 +#define RCC_STGENCFGR 0x824 +#define RCC_SDMMC1CFGR 0x830 +#define RCC_SDMMC2CFGR 0x834 +#define RCC_SDMMC3CFGR 0x838 +#define RCC_GPUCFGR 0x83C +#define RCC_LTDCCFGR 0x840 +#define RCC_DSICFGR 0x844 +#define RCC_LVDSCFGR 0x850 +#define RCC_CSICFGR 0x858 +#define RCC_DCMIPPCFGR 0x85C +#define RCC_CCICFGR 0x860 +#define RCC_VDECCFGR 0x864 +#define RCC_VENCCFGR 0x868 +#define RCC_RNGCFGR 0x870 +#define RCC_PKACFGR 0x874 +#define RCC_SAESCFGR 0x878 +#define RCC_HASHCFGR 0x87C +#define RCC_CRYP1CFGR 0x880 +#define RCC_CRYP2CFGR 0x884 +#define RCC_IWDG1CFGR 0x888 +#define RCC_IWDG2CFGR 0x88C +#define RCC_IWDG3CFGR 0x890 +#define RCC_IWDG4CFGR 0x894 +#define RCC_IWDG5CFGR 0x898 +#define RCC_WWDG1CFGR 0x89C +#define RCC_WWDG2CFGR 0x8A0 +#define RCC_VREFCFGR 0x8A8 +#define RCC_DTSCFGR 0x8AC +#define RCC_CRCCFGR 0x8B4 +#define RCC_SERCCFGR 0x8B8 +#define RCC_OSPIIOMCFGR 0x8BC +#define RCC_GICV2MCFGR 0x8C0 +#define RCC_I3C1CFGR 0x8C8 +#define RCC_I3C2CFGR 0x8CC +#define RCC_I3C3CFGR 0x8D0 +#define RCC_I3C4CFGR 0x8D4 +#define RCC_MUXSELCFGR 0x1000 +#define RCC_XBAR0CFGR 0x1018 +#define RCC_XBAR1CFGR 0x101C +#define RCC_XBAR2CFGR 0x1020 +#define RCC_XBAR3CFGR 0x1024 +#define RCC_XBAR4CFGR 0x1028 +#define RCC_XBAR5CFGR 0x102C +#define RCC_XBAR6CFGR 0x1030 +#define RCC_XBAR7CFGR 0x1034 +#define RCC_XBAR8CFGR 0x1038 +#define RCC_XBAR9CFGR 0x103C +#define RCC_XBAR10CFGR 0x1040 +#define RCC_XBAR11CFGR 0x1044 +#define RCC_XBAR12CFGR 0x1048 +#define RCC_XBAR13CFGR 0x104C +#define RCC_XBAR14CFGR 0x1050 +#define RCC_XBAR15CFGR 0x1054 +#define RCC_XBAR16CFGR 0x1058 +#define RCC_XBAR17CFGR 0x105C +#define RCC_XBAR18CFGR 0x1060 +#define RCC_XBAR19CFGR 0x1064 +#define RCC_XBAR20CFGR 0x1068 +#define RCC_XBAR21CFGR 0x106C +#define RCC_XBAR22CFGR 0x1070 +#define RCC_XBAR23CFGR 0x1074 +#define RCC_XBAR24CFGR 0x1078 +#define RCC_XBAR25CFGR 0x107C +#define RCC_XBAR26CFGR 0x1080 +#define RCC_XBAR27CFGR 0x1084 +#define RCC_XBAR28CFGR 0x1088 +#define RCC_XBAR29CFGR 0x108C +#define RCC_XBAR30CFGR 0x1090 +#define RCC_XBAR31CFGR 0x1094 +#define RCC_XBAR32CFGR 0x1098 +#define RCC_XBAR33CFGR 0x109C +#define RCC_XBAR34CFGR 0x10A0 +#define RCC_XBAR35CFGR 0x10A4 +#define RCC_XBAR36CFGR 0x10A8 +#define RCC_XBAR37CFGR 0x10AC +#define RCC_XBAR38CFGR 0x10B0 +#define RCC_XBAR39CFGR 0x10B4 +#define RCC_XBAR40CFGR 0x10B8 +#define RCC_XBAR41CFGR 0x10BC +#define RCC_XBAR42CFGR 0x10C0 +#define RCC_XBAR43CFGR 0x10C4 +#define RCC_XBAR44CFGR 0x10C8 +#define RCC_XBAR45CFGR 0x10CC +#define RCC_XBAR46CFGR 0x10D0 +#define RCC_XBAR47CFGR 0x10D4 +#define RCC_XBAR48CFGR 0x10D8 +#define RCC_XBAR49CFGR 0x10DC +#define RCC_XBAR50CFGR 0x10E0 +#define RCC_XBAR51CFGR 0x10E4 +#define RCC_XBAR52CFGR 0x10E8 +#define RCC_XBAR53CFGR 0x10EC +#define RCC_XBAR54CFGR 0x10F0 +#define RCC_XBAR55CFGR 0x10F4 +#define RCC_XBAR56CFGR 0x10F8 +#define RCC_XBAR57CFGR 0x10FC +#define RCC_XBAR58CFGR 0x1100 +#define RCC_XBAR59CFGR 0x1104 +#define RCC_XBAR60CFGR 0x1108 +#define RCC_XBAR61CFGR 0x110C +#define RCC_XBAR62CFGR 0x1110 +#define RCC_XBAR63CFGR 0x1114 +#define RCC_PREDIV0CFGR 0x1118 +#define RCC_PREDIV1CFGR 0x111C +#define RCC_PREDIV2CFGR 0x1120 +#define RCC_PREDIV3CFGR 0x1124 +#define RCC_PREDIV4CFGR 0x1128 +#define RCC_PREDIV5CFGR 0x112C +#define RCC_PREDIV6CFGR 0x1130 +#define RCC_PREDIV7CFGR 0x1134 +#define RCC_PREDIV8CFGR 0x1138 +#define RCC_PREDIV9CFGR 0x113C +#define RCC_PREDIV10CFGR 0x1140 +#define RCC_PREDIV11CFGR 0x1144 +#define RCC_PREDIV12CFGR 0x1148 +#define RCC_PREDIV13CFGR 0x114C +#define RCC_PREDIV14CFGR 0x1150 +#define RCC_PREDIV15CFGR 0x1154 +#define RCC_PREDIV16CFGR 0x1158 +#define RCC_PREDIV17CFGR 0x115C +#define RCC_PREDIV18CFGR 0x1160 +#define RCC_PREDIV19CFGR 0x1164 +#define RCC_PREDIV20CFGR 0x1168 +#define RCC_PREDIV21CFGR 0x116C +#define RCC_PREDIV22CFGR 0x1170 +#define RCC_PREDIV23CFGR 0x1174 +#define RCC_PREDIV24CFGR 0x1178 +#define RCC_PREDIV25CFGR 0x117C +#define RCC_PREDIV26CFGR 0x1180 +#define RCC_PREDIV27CFGR 0x1184 +#define RCC_PREDIV28CFGR 0x1188 +#define RCC_PREDIV29CFGR 0x118C +#define RCC_PREDIV30CFGR 0x1190 +#define RCC_PREDIV31CFGR 0x1194 +#define RCC_PREDIV32CFGR 0x1198 +#define RCC_PREDIV33CFGR 0x119C +#define RCC_PREDIV34CFGR 0x11A0 +#define RCC_PREDIV35CFGR 0x11A4 +#define RCC_PREDIV36CFGR 0x11A8 +#define RCC_PREDIV37CFGR 0x11AC +#define RCC_PREDIV38CFGR 0x11B0 +#define RCC_PREDIV39CFGR 0x11B4 +#define RCC_PREDIV40CFGR 0x11B8 +#define RCC_PREDIV41CFGR 0x11BC +#define RCC_PREDIV42CFGR 0x11C0 +#define RCC_PREDIV43CFGR 0x11C4 +#define RCC_PREDIV44CFGR 0x11C8 +#define RCC_PREDIV45CFGR 0x11CC +#define RCC_PREDIV46CFGR 0x11D0 +#define RCC_PREDIV47CFGR 0x11D4 +#define RCC_PREDIV48CFGR 0x11D8 +#define RCC_PREDIV49CFGR 0x11DC +#define RCC_PREDIV50CFGR 0x11E0 +#define RCC_PREDIV51CFGR 0x11E4 +#define RCC_PREDIV52CFGR 0x11E8 +#define RCC_PREDIV53CFGR 0x11EC +#define RCC_PREDIV54CFGR 0x11F0 +#define RCC_PREDIV55CFGR 0x11F4 +#define RCC_PREDIV56CFGR 0x11F8 +#define RCC_PREDIV57CFGR 0x11FC +#define RCC_PREDIV58CFGR 0x1200 +#define RCC_PREDIV59CFGR 0x1204 +#define RCC_PREDIV60CFGR 0x1208 +#define RCC_PREDIV61CFGR 0x120C +#define RCC_PREDIV62CFGR 0x1210 +#define RCC_PREDIV63CFGR 0x1214 +#define RCC_PREDIVSR1 0x1218 +#define RCC_PREDIVSR2 0x121C +#define RCC_FINDIV0CFGR 0x1224 +#define RCC_FINDIV1CFGR 0x1228 +#define RCC_FINDIV2CFGR 0x122C +#define RCC_FINDIV3CFGR 0x1230 +#define RCC_FINDIV4CFGR 0x1234 +#define RCC_FINDIV5CFGR 0x1238 +#define RCC_FINDIV6CFGR 0x123C +#define RCC_FINDIV7CFGR 0x1240 +#define RCC_FINDIV8CFGR 0x1244 +#define RCC_FINDIV9CFGR 0x1248 +#define RCC_FINDIV10CFGR 0x124C +#define RCC_FINDIV11CFGR 0x1250 +#define RCC_FINDIV12CFGR 0x1254 +#define RCC_FINDIV13CFGR 0x1258 +#define RCC_FINDIV14CFGR 0x125C +#define RCC_FINDIV15CFGR 0x1260 +#define RCC_FINDIV16CFGR 0x1264 +#define RCC_FINDIV17CFGR 0x1268 +#define RCC_FINDIV18CFGR 0x126C +#define RCC_FINDIV19CFGR 0x1270 +#define RCC_FINDIV20CFGR 0x1274 +#define RCC_FINDIV21CFGR 0x1278 +#define RCC_FINDIV22CFGR 0x127C +#define RCC_FINDIV23CFGR 0x1280 +#define RCC_FINDIV24CFGR 0x1284 +#define RCC_FINDIV25CFGR 0x1288 +#define RCC_FINDIV26CFGR 0x128C +#define RCC_FINDIV27CFGR 0x1290 +#define RCC_FINDIV28CFGR 0x1294 +#define RCC_FINDIV29CFGR 0x1298 +#define RCC_FINDIV30CFGR 0x129C +#define RCC_FINDIV31CFGR 0x12A0 +#define RCC_FINDIV32CFGR 0x12A4 +#define RCC_FINDIV33CFGR 0x12A8 +#define RCC_FINDIV34CFGR 0x12AC +#define RCC_FINDIV35CFGR 0x12B0 +#define RCC_FINDIV36CFGR 0x12B4 +#define RCC_FINDIV37CFGR 0x12B8 +#define RCC_FINDIV38CFGR 0x12BC +#define RCC_FINDIV39CFGR 0x12C0 +#define RCC_FINDIV40CFGR 0x12C4 +#define RCC_FINDIV41CFGR 0x12C8 +#define RCC_FINDIV42CFGR 0x12CC +#define RCC_FINDIV43CFGR 0x12D0 +#define RCC_FINDIV44CFGR 0x12D4 +#define RCC_FINDIV45CFGR 0x12D8 +#define RCC_FINDIV46CFGR 0x12DC +#define RCC_FINDIV47CFGR 0x12E0 +#define RCC_FINDIV48CFGR 0x12E4 +#define RCC_FINDIV49CFGR 0x12E8 +#define RCC_FINDIV50CFGR 0x12EC +#define RCC_FINDIV51CFGR 0x12F0 +#define RCC_FINDIV52CFGR 0x12F4 +#define RCC_FINDIV53CFGR 0x12F8 +#define RCC_FINDIV54CFGR 0x12FC +#define RCC_FINDIV55CFGR 0x1300 +#define RCC_FINDIV56CFGR 0x1304 +#define RCC_FINDIV57CFGR 0x1308 +#define RCC_FINDIV58CFGR 0x130C +#define RCC_FINDIV59CFGR 0x1310 +#define RCC_FINDIV60CFGR 0x1314 +#define RCC_FINDIV61CFGR 0x1318 +#define RCC_FINDIV62CFGR 0x131C +#define RCC_FINDIV63CFGR 0x1320 +#define RCC_FINDIVSR1 0x1324 +#define RCC_FINDIVSR2 0x1328 +#define RCC_FCALCOBS0CFGR 0x1340 +#define RCC_FCALCOBS1CFGR 0x1344 +#define RCC_FCALCREFCFGR 0x1348 +#define RCC_FCALCCR1 0x134C +#define RCC_FCALCCR2 0x1354 +#define RCC_FCALCSR 0x1358 +#define RCC_PLL4CFGR1 0x1360 +#define RCC_PLL4CFGR2 0x1364 +#define RCC_PLL4CFGR3 0x1368 +#define RCC_PLL4CFGR4 0x136C +#define RCC_PLL4CFGR5 0x1370 +#define RCC_PLL4CFGR6 0x1378 +#define RCC_PLL4CFGR7 0x137C +#define RCC_PLL5CFGR1 0x1388 +#define RCC_PLL5CFGR2 0x138C +#define RCC_PLL5CFGR3 0x1390 +#define RCC_PLL5CFGR4 0x1394 +#define RCC_PLL5CFGR5 0x1398 +#define RCC_PLL5CFGR6 0x13A0 +#define RCC_PLL5CFGR7 0x13A4 +#define RCC_PLL6CFGR1 0x13B0 +#define RCC_PLL6CFGR2 0x13B4 +#define RCC_PLL6CFGR3 0x13B8 +#define RCC_PLL6CFGR4 0x13BC +#define RCC_PLL6CFGR5 0x13C0 +#define RCC_PLL6CFGR6 0x13C8 +#define RCC_PLL6CFGR7 0x13CC +#define RCC_PLL7CFGR1 0x13D8 +#define RCC_PLL7CFGR2 0x13DC +#define RCC_PLL7CFGR3 0x13E0 +#define RCC_PLL7CFGR4 0x13E4 +#define RCC_PLL7CFGR5 0x13E8 +#define RCC_PLL7CFGR6 0x13F0 +#define RCC_PLL7CFGR7 0x13F4 +#define RCC_PLL8CFGR1 0x1400 +#define RCC_PLL8CFGR2 0x1404 +#define RCC_PLL8CFGR3 0x1408 +#define RCC_PLL8CFGR4 0x140C +#define RCC_PLL8CFGR5 0x1410 +#define RCC_PLL8CFGR6 0x1418 +#define RCC_PLL8CFGR7 0x141C +#define RCC_VERR 0xFFF4 +#define RCC_IDR 0xFFF8 +#define RCC_SIDR 0xFFFC + +#endif /* STM32MP25_RCC_H */ diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c index 9d3ffd3fb2..0736f6c812 100644 --- a/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c @@ -125,6 +125,7 @@ static const struct of_device_id sun20i_d1_r_ccu_ids[] = { { .compatible = "allwinner,sun20i-d1-r-ccu" }, { } }; +MODULE_DEVICE_TABLE(of, sun20i_d1_r_ccu_ids); static struct platform_driver sun20i_d1_r_ccu_driver = { .probe = sun20i_d1_r_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c index 48a8fb2c43..60756aadfa 100644 --- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c +++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c @@ -1394,6 +1394,7 @@ static const struct of_device_id sun20i_d1_ccu_ids[] = { { .compatible = "allwinner,sun20i-d1-ccu" }, { } }; +MODULE_DEVICE_TABLE(of, sun20i_d1_ccu_ids); static struct platform_driver sun20i_d1_ccu_driver = { .probe = sun20i_d1_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c index 451ebb7c99..14f5c3da65 100644 --- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c +++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c @@ -1481,6 +1481,7 @@ static const struct of_device_id sun4i_a10_ccu_ids[] = { }, { } }; +MODULE_DEVICE_TABLE(of, sun4i_a10_ccu_ids); static struct platform_driver sun4i_a10_ccu_driver = { .probe = sun4i_a10_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c index fddd6c877c..3b983bb59b 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c @@ -202,6 +202,7 @@ static const struct of_device_id sun50i_a100_r_ccu_ids[] = { { .compatible = "allwinner,sun50i-a100-r-ccu" }, { } }; +MODULE_DEVICE_TABLE(of, sun50i_a100_r_ccu_ids); static struct platform_driver sun50i_a100_r_ccu_driver = { .probe = sun50i_a100_r_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c index 5f93b5526e..38aa6e5f29 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c @@ -1264,6 +1264,7 @@ static const struct of_device_id sun50i_a100_ccu_ids[] = { { .compatible = "allwinner,sun50i-a100-ccu" }, { } }; +MODULE_DEVICE_TABLE(of, sun50i_a100_ccu_ids); static struct platform_driver sun50i_a100_ccu_driver = { .probe = sun50i_a100_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c index 6a4b2b9ef3..491b16cfe3 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c @@ -171,11 +171,13 @@ static struct ccu_nkm pll_mipi_clk = { * user manual, and by experiments the PLL doesn't work without * these bits toggled. */ - .enable = BIT(31) | BIT(23) | BIT(22), - .lock = BIT(28), - .n = _SUNXI_CCU_MULT(8, 4), - .k = _SUNXI_CCU_MULT_MIN(4, 2, 2), - .m = _SUNXI_CCU_DIV(0, 4), + .enable = BIT(31) | BIT(23) | BIT(22), + .lock = BIT(28), + .n = _SUNXI_CCU_MULT(8, 4), + .k = _SUNXI_CCU_MULT_MIN(4, 2, 2), + .m = _SUNXI_CCU_DIV(0, 4), + .max_m_n_ratio = 3, + .min_parent_m_ratio = 24000000, .common = { .reg = 0x040, .hw.init = CLK_HW_INIT("pll-mipi", "pll-video0", @@ -980,6 +982,7 @@ static const struct of_device_id sun50i_a64_ccu_ids[] = { { .compatible = "allwinner,sun50i-a64-ccu" }, { } }; +MODULE_DEVICE_TABLE(of, sun50i_a64_ccu_ids); static struct platform_driver sun50i_a64_ccu_driver = { .probe = sun50i_a64_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c index 02b28cfc55..e2dc29fa99 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c @@ -244,6 +244,7 @@ static const struct of_device_id sun50i_h6_r_ccu_ids[] = { }, { } }; +MODULE_DEVICE_TABLE(of, sun50i_h6_r_ccu_ids); static struct platform_driver sun50i_h6_r_ccu_driver = { .probe = sun50i_h6_r_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c index 892df80727..e6421c2ba1 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c @@ -1274,6 +1274,7 @@ static const struct of_device_id sun50i_h6_ccu_ids[] = { { .compatible = "allwinner,sun50i-h6-ccu" }, { } }; +MODULE_DEVICE_TABLE(of, sun50i_h6_ccu_ids); static struct platform_driver sun50i_h6_ccu_driver = { .probe = sun50i_h6_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c index 21e918582a..45aae1ae51 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c @@ -1154,6 +1154,7 @@ static const struct of_device_id sun50i_h616_ccu_ids[] = { { .compatible = "allwinner,sun50i-h616-ccu" }, { } }; +MODULE_DEVICE_TABLE(of, sun50i_h616_ccu_ids); static struct platform_driver sun50i_h616_ccu_driver = { .probe = sun50i_h616_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 0762deffb3..8cb8cbbdba 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -1271,6 +1271,7 @@ static const struct of_device_id sun6i_a31_ccu_ids[] = { { .compatible = "allwinner,sun6i-a31-ccu" }, { } }; +MODULE_DEVICE_TABLE(of, sun6i_a31_ccu_ids); static struct platform_driver sun6i_a31_ccu_driver = { .probe = sun6i_a31_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c index fdc8ccc586..5a98c4e9e6 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c @@ -336,6 +336,7 @@ static const struct of_device_id sun6i_rtc_ccu_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, sun6i_rtc_ccu_match); int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg) { diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c index e80cc3864e..e748ad612b 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c @@ -751,6 +751,7 @@ static const struct of_device_id sun8i_a23_ccu_ids[] = { { .compatible = "allwinner,sun8i-a23-ccu" }, { } }; +MODULE_DEVICE_TABLE(of, sun8i_a23_ccu_ids); static struct platform_driver sun8i_a23_ccu_driver = { .probe = sun8i_a23_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c index d12878a1ba..8a27a17776 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c @@ -823,6 +823,7 @@ static const struct of_device_id sun8i_a33_ccu_ids[] = { { .compatible = "allwinner,sun8i-a33-ccu" }, { } }; +MODULE_DEVICE_TABLE(of, sun8i_a33_ccu_ids); static struct platform_driver sun8i_a33_ccu_driver = { .probe = sun8i_a33_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c index 76cbd9e9e8..93eca47935 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c @@ -911,6 +911,7 @@ static const struct of_device_id sun8i_a83t_ccu_ids[] = { { .compatible = "allwinner,sun8i-a83t-ccu" }, { } }; +MODULE_DEVICE_TABLE(of, sun8i_a83t_ccu_ids); static struct platform_driver sun8i_a83t_ccu_driver = { .probe = sun8i_a83t_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c index 6a043a0a9d..b0b8dba239 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c @@ -337,6 +337,7 @@ static const struct of_device_id sunxi_de2_clk_ids[] = { }, { } }; +MODULE_DEVICE_TABLE(of, sunxi_de2_clk_ids); static struct platform_driver sunxi_de2_clk_driver = { .probe = sunxi_de2_clk_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c index 74274c17ef..ca5739fa04 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c @@ -1082,6 +1082,7 @@ static const struct of_device_id sun8i_h3_ccu_ids[] = { }, { } }; +MODULE_DEVICE_TABLE(of, sun8i_h3_ccu_ids); static struct platform_driver sun8i_h3_ccu_driver = { .probe = sun8i_h3_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c index 4890a976b1..bac7e737db 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c @@ -262,6 +262,7 @@ static const struct of_device_id sun8i_r_ccu_ids[] = { }, { } }; +MODULE_DEVICE_TABLE(of, sun8i_r_ccu_ids); static struct platform_driver sun8i_r_ccu_driver = { .probe = sun8i_r_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c index 31eca0d3bc..3774b293e7 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c @@ -1363,6 +1363,7 @@ static const struct of_device_id sun8i_r40_ccu_ids[] = { { .compatible = "allwinner,sun8i-r40-ccu" }, { } }; +MODULE_DEVICE_TABLE(of, sun8i_r40_ccu_ids); static struct platform_driver sun8i_r40_ccu_driver = { .probe = sun8i_r40_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index f3ce8664b2..994258a3ad 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -768,6 +768,7 @@ static const struct of_device_id sun8i_v3s_ccu_ids[] = { }, { } }; +MODULE_DEVICE_TABLE(of, sun8i_v3s_ccu_ids); static struct platform_driver sun8i_v3s_ccu_driver = { .probe = sun8i_v3s_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c index 1d8b1ae161..ae7939d3f5 100644 --- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c @@ -254,6 +254,7 @@ static const struct of_device_id sun9i_a80_de_clk_ids[] = { { .compatible = "allwinner,sun9i-a80-de-clks" }, { } }; +MODULE_DEVICE_TABLE(of, sun9i_a80_de_clk_ids); static struct platform_driver sun9i_a80_de_clk_driver = { .probe = sun9i_a80_de_clk_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c index a0fb0da8f3..bfa2ff9d52 100644 --- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c @@ -127,6 +127,7 @@ static const struct of_device_id sun9i_a80_usb_clk_ids[] = { { .compatible = "allwinner,sun9i-a80-usb-clks" }, { } }; +MODULE_DEVICE_TABLE(of, sun9i_a80_usb_clk_ids); static struct platform_driver sun9i_a80_usb_clk_driver = { .probe = sun9i_a80_usb_clk_probe, diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c index 730fd8e280..c05805e4ad 100644 --- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c @@ -1236,6 +1236,7 @@ static const struct of_device_id sun9i_a80_ccu_ids[] = { { .compatible = "allwinner,sun9i-a80-ccu" }, { } }; +MODULE_DEVICE_TABLE(of, sun9i_a80_ccu_ids); static struct platform_driver sun9i_a80_ccu_driver = { .probe = sun9i_a80_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c index 0d5b60b123..76d3d070b2 100644 --- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c +++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c @@ -565,6 +565,7 @@ static const struct of_device_id suniv_f1c100s_ccu_ids[] = { { .compatible = "allwinner,suniv-f1c100s-ccu" }, { } }; +MODULE_DEVICE_TABLE(of, suniv_f1c100s_ccu_ids); static struct platform_driver suniv_f1c100s_ccu_driver = { .probe = suniv_f1c100s_ccu_probe, diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c index 853f84398e..1168d894d6 100644 --- a/drivers/clk/sunxi-ng/ccu_nkm.c +++ b/drivers/clk/sunxi-ng/ccu_nkm.c @@ -16,6 +16,20 @@ struct _ccu_nkm { unsigned long m, min_m, max_m; }; +static bool ccu_nkm_is_valid_rate(struct ccu_common *common, unsigned long parent, + unsigned long n, unsigned long m) +{ + struct ccu_nkm *nkm = container_of(common, struct ccu_nkm, common); + + if (nkm->max_m_n_ratio && (m > nkm->max_m_n_ratio * n)) + return false; + + if (nkm->min_parent_m_ratio && (parent < nkm->min_parent_m_ratio * m)) + return false; + + return true; +} + static unsigned long ccu_nkm_find_best_with_parent_adj(struct ccu_common *common, struct clk_hw *parent_hw, unsigned long *parent, unsigned long rate, @@ -31,6 +45,10 @@ static unsigned long ccu_nkm_find_best_with_parent_adj(struct ccu_common *common unsigned long tmp_rate, tmp_parent; tmp_parent = clk_hw_round_rate(parent_hw, rate * _m / (_n * _k)); + + if (!ccu_nkm_is_valid_rate(common, tmp_parent, _n, _m)) + continue; + tmp_rate = tmp_parent * _n * _k / _m; if (ccu_is_better_rate(common, rate, tmp_rate, best_rate) || @@ -64,6 +82,9 @@ static unsigned long ccu_nkm_find_best(unsigned long parent, unsigned long rate, for (_k = nkm->min_k; _k <= nkm->max_k; _k++) { for (_n = nkm->min_n; _n <= nkm->max_n; _n++) { for (_m = nkm->min_m; _m <= nkm->max_m; _m++) { + if (!ccu_nkm_is_valid_rate(common, parent, _n, _m)) + continue; + unsigned long tmp_rate; tmp_rate = parent * _n * _k / _m; diff --git a/drivers/clk/sunxi-ng/ccu_nkm.h b/drivers/clk/sunxi-ng/ccu_nkm.h index 6601defb3f..c409212ee4 100644 --- a/drivers/clk/sunxi-ng/ccu_nkm.h +++ b/drivers/clk/sunxi-ng/ccu_nkm.h @@ -27,6 +27,8 @@ struct ccu_nkm { struct ccu_mux_internal mux; unsigned int fixed_post_div; + unsigned long max_m_n_ratio; + unsigned long min_parent_m_ratio; struct ccu_common common; }; diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c index 403ec81f56..3386bd1903 100644 --- a/drivers/clk/ti/dpll.c +++ b/drivers/clk/ti/dpll.c @@ -34,8 +34,6 @@ static const struct clk_ops dpll_m4xen_ck_ops = { .save_context = &omap3_core_dpll_save_context, .restore_context = &omap3_core_dpll_restore_context, }; -#else -static const struct clk_ops dpll_m4xen_ck_ops = {}; #endif #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) || \ @@ -95,11 +93,7 @@ static const struct clk_ops omap3_dpll_core_ck_ops = { .recalc_rate = &omap3_dpll_recalc, .round_rate = &omap2_dpll_round_rate, }; -#else -static const struct clk_ops omap3_dpll_core_ck_ops = {}; -#endif -#ifdef CONFIG_ARCH_OMAP3 static const struct clk_ops omap3_dpll_ck_ops = { .enable = &omap3_noncore_dpll_enable, .disable = &omap3_noncore_dpll_disable, @@ -137,9 +131,13 @@ static const struct clk_ops omap3_dpll_per_ck_ops = { }; #endif +#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ + defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \ + defined(CONFIG_SOC_AM43XX) static const struct clk_ops dpll_x2_ck_ops = { .recalc_rate = &omap3_clkoutx2_recalc, }; +#endif /** * _register_dpll - low level registration of a DPLL clock diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 8d4a520566..5bb43cc1a8 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -331,7 +331,7 @@ static u64 notrace hisi_161010101_read_cntvct_el0(void) return __hisi_161010101_read_reg(cntvct_el0); } -static struct ate_acpi_oem_info hisi_161010101_oem_info[] = { +static const struct ate_acpi_oem_info hisi_161010101_oem_info[] = { /* * Note that trailing spaces are required to properly match * the OEM table information. diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c index 8da972dc17..3fcbd02b24 100644 --- a/drivers/clocksource/renesas-ostm.c +++ b/drivers/clocksource/renesas-ostm.c @@ -210,6 +210,7 @@ static int __init ostm_init(struct device_node *np) pr_info("%pOF: used for clock events\n", np); } + of_node_set_flag(np, OF_POPULATED); return 0; err_cleanup: @@ -224,7 +225,7 @@ err_free: TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init); -#ifdef CONFIG_ARCH_RZG2L +#if defined(CONFIG_ARCH_RZG2L) || defined(CONFIG_ARCH_R9A09G057) static int __init ostm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; diff --git a/drivers/clocksource/timer-clint.c b/drivers/clocksource/timer-clint.c index 09fd292eb8..0bdd9d7ec5 100644 --- a/drivers/clocksource/timer-clint.c +++ b/drivers/clocksource/timer-clint.c @@ -251,7 +251,7 @@ static int __init clint_timer_init_dt(struct device_node *np) } irq_set_chained_handler(clint_ipi_irq, clint_ipi_interrupt); - riscv_ipi_set_virq_range(rc, BITS_PER_BYTE, true); + riscv_ipi_set_virq_range(rc, BITS_PER_BYTE); clint_clear_ipi(); #endif diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index 56acf26172..b7a34b1a97 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c @@ -129,7 +129,6 @@ struct dmtimer { void __iomem *func_base; /* function register base */ atomic_t enabled; - unsigned long rate; unsigned reserved:1; unsigned posted:1; unsigned omap1:1; diff --git a/drivers/comedi/drivers/cb_pcidas64.c b/drivers/comedi/drivers/cb_pcidas64.c index ff19fc3859..d398c6df94 100644 --- a/drivers/comedi/drivers/cb_pcidas64.c +++ b/drivers/comedi/drivers/cb_pcidas64.c @@ -374,11 +374,6 @@ static inline u16 pipe_full_bits(u16 hw_status_bits) return (hw_status_bits >> 10) & 0x3; }; -static inline unsigned int dma_chain_flag_bits(u16 prepost_bits) -{ - return (prepost_bits >> 6) & 0x3; -} - static inline unsigned int adc_upper_read_ptr_code(u16 prepost_bits) { return (prepost_bits >> 12) & 0x3; diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c index 3f24481fc0..893b4f0726 100644 --- a/drivers/counter/counter-core.c +++ b/drivers/counter/counter-core.c @@ -49,12 +49,12 @@ static void counter_device_release(struct device *dev) kfree(container_of(counter, struct counter_device_allochelper, counter)); } -static struct device_type counter_device_type = { +static const struct device_type counter_device_type = { .name = "counter_device", .release = counter_device_release, }; -static struct bus_type counter_bus_type = { +static const struct bus_type counter_bus_type = { .name = "counter", .dev_name = "counter", }; diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c index 6206d2dc3d..0664ef969f 100644 --- a/drivers/counter/stm32-timer-cnt.c +++ b/drivers/counter/stm32-timer-cnt.c @@ -8,9 +8,11 @@ * */ #include +#include #include #include #include +#include #include #include #include @@ -21,6 +23,12 @@ #define TIM_CCER_MASK (TIM_CCER_CC1P | TIM_CCER_CC1NP | \ TIM_CCER_CC2P | TIM_CCER_CC2NP) +#define STM32_CH1_SIG 0 +#define STM32_CH2_SIG 1 +#define STM32_CLOCK_SIG 2 +#define STM32_CH3_SIG 3 +#define STM32_CH4_SIG 4 + struct stm32_timer_regs { u32 cr1; u32 cnt; @@ -34,6 +42,11 @@ struct stm32_timer_cnt { u32 max_arr; bool enabled; struct stm32_timer_regs bak; + bool has_encoder; + unsigned int nchannels; + unsigned int nr_irqs; + spinlock_t lock; /* protects nb_ovf */ + u64 nb_ovf; }; static const enum counter_function stm32_count_functions[] = { @@ -107,12 +120,18 @@ static int stm32_count_function_write(struct counter_device *counter, sms = TIM_SMCR_SMS_SLAVE_MODE_DISABLED; break; case COUNTER_FUNCTION_QUADRATURE_X2_A: + if (!priv->has_encoder) + return -EOPNOTSUPP; sms = TIM_SMCR_SMS_ENCODER_MODE_1; break; case COUNTER_FUNCTION_QUADRATURE_X2_B: + if (!priv->has_encoder) + return -EOPNOTSUPP; sms = TIM_SMCR_SMS_ENCODER_MODE_2; break; case COUNTER_FUNCTION_QUADRATURE_X4: + if (!priv->has_encoder) + return -EOPNOTSUPP; sms = TIM_SMCR_SMS_ENCODER_MODE_3; break; default: @@ -216,11 +235,108 @@ static int stm32_count_enable_write(struct counter_device *counter, return 0; } +static int stm32_count_prescaler_read(struct counter_device *counter, + struct counter_count *count, u64 *prescaler) +{ + struct stm32_timer_cnt *const priv = counter_priv(counter); + u32 psc; + + regmap_read(priv->regmap, TIM_PSC, &psc); + + *prescaler = psc + 1; + + return 0; +} + +static int stm32_count_prescaler_write(struct counter_device *counter, + struct counter_count *count, u64 prescaler) +{ + struct stm32_timer_cnt *const priv = counter_priv(counter); + u32 psc; + + if (!prescaler || prescaler > MAX_TIM_PSC + 1) + return -ERANGE; + + psc = prescaler - 1; + + return regmap_write(priv->regmap, TIM_PSC, psc); +} + +static int stm32_count_cap_read(struct counter_device *counter, + struct counter_count *count, + size_t ch, u64 *cap) +{ + struct stm32_timer_cnt *const priv = counter_priv(counter); + u32 ccrx; + + if (ch >= priv->nchannels) + return -EOPNOTSUPP; + + switch (ch) { + case 0: + regmap_read(priv->regmap, TIM_CCR1, &ccrx); + break; + case 1: + regmap_read(priv->regmap, TIM_CCR2, &ccrx); + break; + case 2: + regmap_read(priv->regmap, TIM_CCR3, &ccrx); + break; + case 3: + regmap_read(priv->regmap, TIM_CCR4, &ccrx); + break; + default: + return -EINVAL; + } + + dev_dbg(counter->parent, "CCR%zu: 0x%08x\n", ch + 1, ccrx); + + *cap = ccrx; + + return 0; +} + +static int stm32_count_nb_ovf_read(struct counter_device *counter, + struct counter_count *count, u64 *val) +{ + struct stm32_timer_cnt *const priv = counter_priv(counter); + unsigned long irqflags; + + spin_lock_irqsave(&priv->lock, irqflags); + *val = priv->nb_ovf; + spin_unlock_irqrestore(&priv->lock, irqflags); + + return 0; +} + +static int stm32_count_nb_ovf_write(struct counter_device *counter, + struct counter_count *count, u64 val) +{ + struct stm32_timer_cnt *const priv = counter_priv(counter); + unsigned long irqflags; + + spin_lock_irqsave(&priv->lock, irqflags); + priv->nb_ovf = val; + spin_unlock_irqrestore(&priv->lock, irqflags); + + return 0; +} + +static DEFINE_COUNTER_ARRAY_CAPTURE(stm32_count_cap_array, 4); + static struct counter_comp stm32_count_ext[] = { COUNTER_COMP_DIRECTION(stm32_count_direction_read), COUNTER_COMP_ENABLE(stm32_count_enable_read, stm32_count_enable_write), COUNTER_COMP_CEILING(stm32_count_ceiling_read, stm32_count_ceiling_write), + COUNTER_COMP_COUNT_U64("prescaler", stm32_count_prescaler_read, + stm32_count_prescaler_write), + COUNTER_COMP_ARRAY_CAPTURE(stm32_count_cap_read, NULL, stm32_count_cap_array), + COUNTER_COMP_COUNT_U64("num_overflows", stm32_count_nb_ovf_read, stm32_count_nb_ovf_write), +}; + +static const enum counter_synapse_action stm32_clock_synapse_actions[] = { + COUNTER_SYNAPSE_ACTION_RISING_EDGE, }; static const enum counter_synapse_action stm32_synapse_actions[] = { @@ -243,25 +359,152 @@ static int stm32_action_read(struct counter_device *counter, switch (function) { case COUNTER_FUNCTION_INCREASE: /* counts on internal clock when CEN=1 */ - *action = COUNTER_SYNAPSE_ACTION_NONE; + if (synapse->signal->id == STM32_CLOCK_SIG) + *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE; + else + *action = COUNTER_SYNAPSE_ACTION_NONE; return 0; case COUNTER_FUNCTION_QUADRATURE_X2_A: /* counts up/down on TI1FP1 edge depending on TI2FP2 level */ - if (synapse->signal->id == count->synapses[0].signal->id) + if (synapse->signal->id == STM32_CH1_SIG) *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; else *action = COUNTER_SYNAPSE_ACTION_NONE; return 0; case COUNTER_FUNCTION_QUADRATURE_X2_B: /* counts up/down on TI2FP2 edge depending on TI1FP1 level */ - if (synapse->signal->id == count->synapses[1].signal->id) + if (synapse->signal->id == STM32_CH2_SIG) *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; else *action = COUNTER_SYNAPSE_ACTION_NONE; return 0; case COUNTER_FUNCTION_QUADRATURE_X4: /* counts up/down on both TI1FP1 and TI2FP2 edges */ - *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; + if (synapse->signal->id == STM32_CH1_SIG || synapse->signal->id == STM32_CH2_SIG) + *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; + else + *action = COUNTER_SYNAPSE_ACTION_NONE; + return 0; + default: + return -EINVAL; + } +} + +struct stm32_count_cc_regs { + u32 ccmr_reg; + u32 ccmr_mask; + u32 ccmr_bits; + u32 ccer_bits; +}; + +static const struct stm32_count_cc_regs stm32_cc[] = { + { TIM_CCMR1, TIM_CCMR_CC1S, TIM_CCMR_CC1S_TI1, + TIM_CCER_CC1E | TIM_CCER_CC1P | TIM_CCER_CC1NP }, + { TIM_CCMR1, TIM_CCMR_CC2S, TIM_CCMR_CC2S_TI2, + TIM_CCER_CC2E | TIM_CCER_CC2P | TIM_CCER_CC2NP }, + { TIM_CCMR2, TIM_CCMR_CC3S, TIM_CCMR_CC3S_TI3, + TIM_CCER_CC3E | TIM_CCER_CC3P | TIM_CCER_CC3NP }, + { TIM_CCMR2, TIM_CCMR_CC4S, TIM_CCMR_CC4S_TI4, + TIM_CCER_CC4E | TIM_CCER_CC4P | TIM_CCER_CC4NP }, +}; + +static int stm32_count_capture_configure(struct counter_device *counter, unsigned int ch, + bool enable) +{ + struct stm32_timer_cnt *const priv = counter_priv(counter); + const struct stm32_count_cc_regs *cc; + u32 ccmr, ccer; + + if (ch >= ARRAY_SIZE(stm32_cc) || ch >= priv->nchannels) { + dev_err(counter->parent, "invalid ch: %d\n", ch); + return -EINVAL; + } + + cc = &stm32_cc[ch]; + + /* + * configure channel in input capture mode, map channel 1 on TI1, channel2 on TI2... + * Select both edges / non-inverted to trigger a capture. + */ + if (enable) { + /* first clear possibly latched capture flag upon enabling */ + if (!regmap_test_bits(priv->regmap, TIM_CCER, cc->ccer_bits)) + regmap_write(priv->regmap, TIM_SR, ~TIM_SR_CC_IF(ch)); + regmap_update_bits(priv->regmap, cc->ccmr_reg, cc->ccmr_mask, + cc->ccmr_bits); + regmap_set_bits(priv->regmap, TIM_CCER, cc->ccer_bits); + } else { + regmap_clear_bits(priv->regmap, TIM_CCER, cc->ccer_bits); + regmap_clear_bits(priv->regmap, cc->ccmr_reg, cc->ccmr_mask); + } + + regmap_read(priv->regmap, cc->ccmr_reg, &ccmr); + regmap_read(priv->regmap, TIM_CCER, &ccer); + dev_dbg(counter->parent, "%s(%s) ch%d 0x%08x 0x%08x\n", __func__, enable ? "ena" : "dis", + ch, ccmr, ccer); + + return 0; +} + +static int stm32_count_events_configure(struct counter_device *counter) +{ + struct stm32_timer_cnt *const priv = counter_priv(counter); + struct counter_event_node *event_node; + u32 dier = 0; + int i, ret; + + list_for_each_entry(event_node, &counter->events_list, l) { + switch (event_node->event) { + case COUNTER_EVENT_OVERFLOW_UNDERFLOW: + /* first clear possibly latched UIF before enabling */ + if (!regmap_test_bits(priv->regmap, TIM_DIER, TIM_DIER_UIE)) + regmap_write(priv->regmap, TIM_SR, (u32)~TIM_SR_UIF); + dier |= TIM_DIER_UIE; + break; + case COUNTER_EVENT_CAPTURE: + ret = stm32_count_capture_configure(counter, event_node->channel, true); + if (ret) + return ret; + dier |= TIM_DIER_CC_IE(event_node->channel); + break; + default: + /* should never reach this path */ + return -EINVAL; + } + } + + /* Enable / disable all events at once, from events_list, so write all DIER bits */ + regmap_write(priv->regmap, TIM_DIER, dier); + + /* check for disabled capture events */ + for (i = 0 ; i < priv->nchannels; i++) { + if (!(dier & TIM_DIER_CC_IE(i))) { + ret = stm32_count_capture_configure(counter, i, false); + if (ret) + return ret; + } + } + + return 0; +} + +static int stm32_count_watch_validate(struct counter_device *counter, + const struct counter_watch *watch) +{ + struct stm32_timer_cnt *const priv = counter_priv(counter); + + /* Interrupts are optional */ + if (!priv->nr_irqs) + return -EOPNOTSUPP; + + switch (watch->event) { + case COUNTER_EVENT_CAPTURE: + if (watch->channel >= priv->nchannels) { + dev_err(counter->parent, "Invalid channel %d\n", watch->channel); + return -EINVAL; + } + return 0; + case COUNTER_EVENT_OVERFLOW_UNDERFLOW: return 0; default: return -EINVAL; @@ -274,35 +517,89 @@ static const struct counter_ops stm32_timer_cnt_ops = { .function_read = stm32_count_function_read, .function_write = stm32_count_function_write, .action_read = stm32_action_read, + .events_configure = stm32_count_events_configure, + .watch_validate = stm32_count_watch_validate, +}; + +static int stm32_count_clk_get_freq(struct counter_device *counter, + struct counter_signal *signal, u64 *freq) +{ + struct stm32_timer_cnt *const priv = counter_priv(counter); + + *freq = clk_get_rate(priv->clk); + + return 0; +} + +static struct counter_comp stm32_count_clock_ext[] = { + COUNTER_COMP_FREQUENCY(stm32_count_clk_get_freq), }; static struct counter_signal stm32_signals[] = { + /* + * Need to declare all the signals as a static array, and keep the signals order here, + * even if they're unused or unexisting on some timer instances. It's an abstraction, + * e.g. high level view of the counter features. + * + * Userspace programs may rely on signal0 to be "Channel 1", signal1 to be "Channel 2", + * and so on. When a signal is unexisting, the COUNTER_SYNAPSE_ACTION_NONE can be used, + * to indicate that a signal doesn't affect the counter. + */ { - .id = 0, - .name = "Channel 1 Quadrature A" + .id = STM32_CH1_SIG, + .name = "Channel 1" }, { - .id = 1, - .name = "Channel 1 Quadrature B" - } + .id = STM32_CH2_SIG, + .name = "Channel 2" + }, + { + .id = STM32_CLOCK_SIG, + .name = "Clock", + .ext = stm32_count_clock_ext, + .num_ext = ARRAY_SIZE(stm32_count_clock_ext), + }, + { + .id = STM32_CH3_SIG, + .name = "Channel 3" + }, + { + .id = STM32_CH4_SIG, + .name = "Channel 4" + }, }; static struct counter_synapse stm32_count_synapses[] = { { .actions_list = stm32_synapse_actions, .num_actions = ARRAY_SIZE(stm32_synapse_actions), - .signal = &stm32_signals[0] + .signal = &stm32_signals[STM32_CH1_SIG] }, { .actions_list = stm32_synapse_actions, .num_actions = ARRAY_SIZE(stm32_synapse_actions), - .signal = &stm32_signals[1] - } + .signal = &stm32_signals[STM32_CH2_SIG] + }, + { + .actions_list = stm32_clock_synapse_actions, + .num_actions = ARRAY_SIZE(stm32_clock_synapse_actions), + .signal = &stm32_signals[STM32_CLOCK_SIG] + }, + { + .actions_list = stm32_synapse_actions, + .num_actions = ARRAY_SIZE(stm32_synapse_actions), + .signal = &stm32_signals[STM32_CH3_SIG] + }, + { + .actions_list = stm32_synapse_actions, + .num_actions = ARRAY_SIZE(stm32_synapse_actions), + .signal = &stm32_signals[STM32_CH4_SIG] + }, }; static struct counter_count stm32_counts = { .id = 0, - .name = "Channel 1 Count", + .name = "STM32 Timer Counter", .functions_list = stm32_count_functions, .num_functions = ARRAY_SIZE(stm32_count_functions), .synapses = stm32_count_synapses, @@ -311,13 +608,111 @@ static struct counter_count stm32_counts = { .num_ext = ARRAY_SIZE(stm32_count_ext) }; +static irqreturn_t stm32_timer_cnt_isr(int irq, void *ptr) +{ + struct counter_device *counter = ptr; + struct stm32_timer_cnt *const priv = counter_priv(counter); + u32 clr = GENMASK(31, 0); /* SR flags can be cleared by writing 0 (wr 1 has no effect) */ + u32 sr, dier; + int i; + + regmap_read(priv->regmap, TIM_SR, &sr); + regmap_read(priv->regmap, TIM_DIER, &dier); + /* + * Some status bits in SR don't match with the enable bits in DIER. Only take care of + * the possibly enabled bits in DIER (that matches in between SR and DIER). + */ + dier &= (TIM_DIER_UIE | TIM_DIER_CC1IE | TIM_DIER_CC2IE | TIM_DIER_CC3IE | TIM_DIER_CC4IE); + sr &= dier; + + if (sr & TIM_SR_UIF) { + spin_lock(&priv->lock); + priv->nb_ovf++; + spin_unlock(&priv->lock); + counter_push_event(counter, COUNTER_EVENT_OVERFLOW_UNDERFLOW, 0); + dev_dbg(counter->parent, "COUNTER_EVENT_OVERFLOW_UNDERFLOW\n"); + /* SR flags can be cleared by writing 0, only clear relevant flag */ + clr &= ~TIM_SR_UIF; + } + + /* Check capture events */ + for (i = 0 ; i < priv->nchannels; i++) { + if (sr & TIM_SR_CC_IF(i)) { + counter_push_event(counter, COUNTER_EVENT_CAPTURE, i); + clr &= ~TIM_SR_CC_IF(i); + dev_dbg(counter->parent, "COUNTER_EVENT_CAPTURE, %d\n", i); + } + } + + regmap_write(priv->regmap, TIM_SR, clr); + + return IRQ_HANDLED; +}; + +static void stm32_timer_cnt_detect_channels(struct device *dev, + struct stm32_timer_cnt *priv) +{ + u32 ccer, ccer_backup; + + regmap_read(priv->regmap, TIM_CCER, &ccer_backup); + regmap_set_bits(priv->regmap, TIM_CCER, TIM_CCER_CCXE); + regmap_read(priv->regmap, TIM_CCER, &ccer); + regmap_write(priv->regmap, TIM_CCER, ccer_backup); + priv->nchannels = hweight32(ccer & TIM_CCER_CCXE); + + dev_dbg(dev, "has %d cc channels\n", priv->nchannels); +} + +/* encoder supported on TIM1 TIM2 TIM3 TIM4 TIM5 TIM8 */ +#define STM32_TIM_ENCODER_SUPPORTED (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(7)) + +static const char * const stm32_timer_trigger_compat[] = { + "st,stm32-timer-trigger", + "st,stm32h7-timer-trigger", +}; + +static int stm32_timer_cnt_probe_encoder(struct device *dev, + struct stm32_timer_cnt *priv) +{ + struct device *parent = dev->parent; + struct device_node *tnode = NULL, *pnode = parent->of_node; + int i, ret; + u32 idx; + + /* + * Need to retrieve the trigger node index from DT, to be able + * to determine if the counter supports encoder mode. It also + * enforce backward compatibility, and allow to support other + * counter modes in this driver (when the timer doesn't support + * encoder). + */ + for (i = 0; i < ARRAY_SIZE(stm32_timer_trigger_compat) && !tnode; i++) + tnode = of_get_compatible_child(pnode, stm32_timer_trigger_compat[i]); + if (!tnode) { + dev_err(dev, "Can't find trigger node\n"); + return -ENODATA; + } + + ret = of_property_read_u32(tnode, "reg", &idx); + if (ret) { + dev_err(dev, "Can't get index (%d)\n", ret); + return ret; + } + + priv->has_encoder = !!(STM32_TIM_ENCODER_SUPPORTED & BIT(idx)); + + dev_dbg(dev, "encoder support: %s\n", priv->has_encoder ? "yes" : "no"); + + return 0; +} + static int stm32_timer_cnt_probe(struct platform_device *pdev) { struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent); struct device *dev = &pdev->dev; struct stm32_timer_cnt *priv; struct counter_device *counter; - int ret; + int i, ret; if (IS_ERR_OR_NULL(ddata)) return -EINVAL; @@ -331,6 +726,13 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev) priv->regmap = ddata->regmap; priv->clk = ddata->clk; priv->max_arr = ddata->max_arr; + priv->nr_irqs = ddata->nr_irqs; + + ret = stm32_timer_cnt_probe_encoder(dev, priv); + if (ret) + return ret; + + stm32_timer_cnt_detect_channels(dev, priv); counter->name = dev_name(dev); counter->parent = dev; @@ -340,8 +742,39 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev) counter->signals = stm32_signals; counter->num_signals = ARRAY_SIZE(stm32_signals); + spin_lock_init(&priv->lock); + platform_set_drvdata(pdev, priv); + /* STM32 Timers can have either 1 global, or 4 dedicated interrupts (optional) */ + if (priv->nr_irqs == 1) { + /* All events reported through the global interrupt */ + ret = devm_request_irq(&pdev->dev, ddata->irq[0], stm32_timer_cnt_isr, + 0, dev_name(dev), counter); + if (ret) { + dev_err(dev, "Failed to request irq %d (err %d)\n", + ddata->irq[0], ret); + return ret; + } + } else { + for (i = 0; i < priv->nr_irqs; i++) { + /* + * Only take care of update IRQ for overflow events, and cc for + * capture events. + */ + if (i != STM32_TIMERS_IRQ_UP && i != STM32_TIMERS_IRQ_CC) + continue; + + ret = devm_request_irq(&pdev->dev, ddata->irq[i], stm32_timer_cnt_isr, + 0, dev_name(dev), counter); + if (ret) { + dev_err(dev, "Failed to request irq %d (err %d)\n", + ddata->irq[i], ret); + return ret; + } + } + } + /* Reset input selector to its default input */ regmap_write(priv->regmap, TIM_TISEL, 0x0); diff --git a/drivers/counter/ti-ecap-capture.c b/drivers/counter/ti-ecap-capture.c index fb1cb17746..675447315c 100644 --- a/drivers/counter/ti-ecap-capture.c +++ b/drivers/counter/ti-ecap-capture.c @@ -369,7 +369,7 @@ static const enum counter_synapse_action ecap_cnt_input_actions[] = { }; static struct counter_comp ecap_cnt_clock_ext[] = { - COUNTER_COMP_SIGNAL_U64("frequency", ecap_cnt_clk_get_freq, NULL), + COUNTER_COMP_FREQUENCY(ecap_cnt_clk_get_freq), }; static const enum counter_signal_polarity ecap_cnt_pol_avail[] = { @@ -537,15 +537,13 @@ static int ecap_cnt_probe(struct platform_device *pdev) return 0; } -static int ecap_cnt_remove(struct platform_device *pdev) +static void ecap_cnt_remove(struct platform_device *pdev) { struct counter_device *counter_dev = platform_get_drvdata(pdev); struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev); if (ecap_dev->enabled) ecap_cnt_capture_disable(counter_dev); - - return 0; } static int ecap_cnt_suspend(struct device *dev) @@ -600,7 +598,7 @@ MODULE_DEVICE_TABLE(of, ecap_cnt_of_match); static struct platform_driver ecap_cnt_driver = { .probe = ecap_cnt_probe, - .remove = ecap_cnt_remove, + .remove_new = ecap_cnt_remove, .driver = { .name = "ecap-capture", .of_match_table = ecap_cnt_of_match, diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c index 4d3de4a358..825ae22c3e 100644 --- a/drivers/counter/ti-eqep.c +++ b/drivers/counter/ti-eqep.c @@ -431,7 +431,7 @@ static int ti_eqep_probe(struct platform_device *pdev) return 0; } -static int ti_eqep_remove(struct platform_device *pdev) +static void ti_eqep_remove(struct platform_device *pdev) { struct counter_device *counter = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; @@ -439,8 +439,6 @@ static int ti_eqep_remove(struct platform_device *pdev) counter_unregister(counter); pm_runtime_put_sync(dev); pm_runtime_disable(dev); - - return 0; } static const struct of_device_id ti_eqep_of_match[] = { @@ -451,7 +449,7 @@ MODULE_DEVICE_TABLE(of, ti_eqep_of_match); static struct platform_driver ti_eqep_driver = { .probe = ti_eqep_probe, - .remove = ti_eqep_remove, + .remove_new = ti_eqep_remove, .driver = { .name = "ti-eqep-cnt", .of_match_table = ti_eqep_of_match, diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c index fc275d41d5..66b73c308c 100644 --- a/drivers/cpufreq/amd-pstate-ut.c +++ b/drivers/cpufreq/amd-pstate-ut.c @@ -202,6 +202,7 @@ static void amd_pstate_ut_check_freq(u32 index) int cpu = 0; struct cpufreq_policy *policy = NULL; struct amd_cpudata *cpudata = NULL; + u32 nominal_freq_khz; for_each_possible_cpu(cpu) { policy = cpufreq_cpu_get(cpu); @@ -209,13 +210,14 @@ static void amd_pstate_ut_check_freq(u32 index) break; cpudata = policy->driver_data; - if (!((cpudata->max_freq >= cpudata->nominal_freq) && - (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) && + nominal_freq_khz = cpudata->nominal_freq*1000; + if (!((cpudata->max_freq >= nominal_freq_khz) && + (nominal_freq_khz > cpudata->lowest_nonlinear_freq) && (cpudata->lowest_nonlinear_freq > cpudata->min_freq) && (cpudata->min_freq > 0))) { amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n", - __func__, cpu, cpudata->max_freq, cpudata->nominal_freq, + __func__, cpu, cpudata->max_freq, nominal_freq_khz, cpudata->lowest_nonlinear_freq, cpudata->min_freq); goto skip_test; } @@ -229,13 +231,13 @@ static void amd_pstate_ut_check_freq(u32 index) if (cpudata->boost_supported) { if ((policy->max == cpudata->max_freq) || - (policy->max == cpudata->nominal_freq)) + (policy->max == nominal_freq_khz)) amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; else { amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n", __func__, cpu, policy->max, cpudata->max_freq, - cpudata->nominal_freq); + nominal_freq_khz); goto skip_test; } } else { diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 6af175e6c0..a092b13ffb 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -247,6 +247,26 @@ static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata) return index; } +static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, + u32 des_perf, u32 max_perf, bool fast_switch) +{ + if (fast_switch) + wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached)); + else + wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, + READ_ONCE(cpudata->cppc_req_cached)); +} + +DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); + +static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, + u32 min_perf, u32 des_perf, + u32 max_perf, bool fast_switch) +{ + static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, + max_perf, fast_switch); +} + static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp) { int ret; @@ -263,6 +283,9 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp) if (!ret) cpudata->epp_cached = epp; } else { + amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U, + cpudata->max_limit_perf, false); + perf_ctrls.energy_perf = epp; ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1); if (ret) { @@ -452,16 +475,6 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) return static_call(amd_pstate_init_perf)(cpudata); } -static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, - u32 des_perf, u32 max_perf, bool fast_switch) -{ - if (fast_switch) - wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached)); - else - wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, - READ_ONCE(cpudata->cppc_req_cached)); -} - static void cppc_update_perf(struct amd_cpudata *cpudata, u32 min_perf, u32 des_perf, u32 max_perf, bool fast_switch) @@ -475,16 +488,6 @@ static void cppc_update_perf(struct amd_cpudata *cpudata, cppc_set_perf(cpudata->cpu, &perf_ctrls); } -DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); - -static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, - u32 min_perf, u32 des_perf, - u32 max_perf, bool fast_switch) -{ - static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, - max_perf, fast_switch); -} - static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) { u64 aperf, mperf, tsc; @@ -688,26 +691,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu, cpufreq_cpu_put(policy); } -static int amd_get_min_freq(struct amd_cpudata *cpudata) -{ - return READ_ONCE(cpudata->min_freq); -} - -static int amd_get_max_freq(struct amd_cpudata *cpudata) -{ - return READ_ONCE(cpudata->max_freq); -} - -static int amd_get_nominal_freq(struct amd_cpudata *cpudata) -{ - return READ_ONCE(cpudata->nominal_freq); -} - -static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata) -{ - return READ_ONCE(cpudata->lowest_nonlinear_freq); -} - static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) { struct amd_cpudata *cpudata = policy->driver_data; @@ -860,7 +843,37 @@ free_cpufreq_put: mutex_unlock(&amd_pstate_driver_lock); } -/** +/* + * Get pstate transition delay time from ACPI tables that firmware set + * instead of using hardcode value directly. + */ +static u32 amd_pstate_get_transition_delay_us(unsigned int cpu) +{ + u32 transition_delay_ns; + + transition_delay_ns = cppc_get_transition_latency(cpu); + if (transition_delay_ns == CPUFREQ_ETERNAL) + return AMD_PSTATE_TRANSITION_DELAY; + + return transition_delay_ns / NSEC_PER_USEC; +} + +/* + * Get pstate transition latency value from ACPI tables that firmware + * set instead of using hardcode value directly. + */ +static u32 amd_pstate_get_transition_latency(unsigned int cpu) +{ + u32 transition_latency; + + transition_latency = cppc_get_transition_latency(cpu); + if (transition_latency == CPUFREQ_ETERNAL) + return AMD_PSTATE_TRANSITION_LATENCY; + + return transition_latency; +} + +/* * amd_pstate_init_freq: Initialize the max_freq, min_freq, * nominal_freq and lowest_nonlinear_freq for * the @cpudata object. @@ -881,7 +894,6 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata) u32 boost_ratio, lowest_nonlinear_ratio; struct cppc_perf_caps cppc_perf; - ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); if (ret) return ret; @@ -917,7 +929,7 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata) static int amd_pstate_cpu_init(struct cpufreq_policy *policy) { - int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; + int min_freq, max_freq, nominal_freq, ret; struct device *dev; struct amd_cpudata *cpudata; @@ -946,20 +958,21 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) if (ret) goto free_cpudata1; - min_freq = amd_get_min_freq(cpudata); - max_freq = amd_get_max_freq(cpudata); - nominal_freq = amd_get_nominal_freq(cpudata); - lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata); + min_freq = READ_ONCE(cpudata->min_freq); + max_freq = READ_ONCE(cpudata->max_freq); + nominal_freq = READ_ONCE(cpudata->nominal_freq); - if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) { - dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n", - min_freq, max_freq); + if (min_freq <= 0 || max_freq <= 0 || + nominal_freq <= 0 || min_freq > max_freq) { + dev_err(dev, + "min_freq(%d) or max_freq(%d) or nominal_freq (%d) value is incorrect, check _CPC in ACPI tables\n", + min_freq, max_freq, nominal_freq); ret = -EINVAL; goto free_cpudata1; } - policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY; - policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY; + policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu); + policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu); policy->min = min_freq; policy->max = max_freq; @@ -1052,7 +1065,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy, int max_freq; struct amd_cpudata *cpudata = policy->driver_data; - max_freq = amd_get_max_freq(cpudata); + max_freq = READ_ONCE(cpudata->max_freq); if (max_freq < 0) return max_freq; @@ -1065,7 +1078,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli int freq; struct amd_cpudata *cpudata = policy->driver_data; - freq = amd_get_lowest_nonlinear_freq(cpudata); + freq = READ_ONCE(cpudata->lowest_nonlinear_freq); if (freq < 0) return freq; @@ -1376,7 +1389,7 @@ static bool amd_pstate_acpi_pm_profile_undefined(void) static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) { - int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; + int min_freq, max_freq, nominal_freq, ret; struct amd_cpudata *cpudata; struct device *dev; u64 value; @@ -1407,13 +1420,14 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) if (ret) goto free_cpudata1; - min_freq = amd_get_min_freq(cpudata); - max_freq = amd_get_max_freq(cpudata); - nominal_freq = amd_get_nominal_freq(cpudata); - lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata); - if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) { - dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n", - min_freq, max_freq); + min_freq = READ_ONCE(cpudata->min_freq); + max_freq = READ_ONCE(cpudata->max_freq); + nominal_freq = READ_ONCE(cpudata->nominal_freq); + if (min_freq <= 0 || max_freq <= 0 || + nominal_freq <= 0 || min_freq > max_freq) { + dev_err(dev, + "min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect, check _CPC in ACPI tables\n", + min_freq, max_freq, nominal_freq); ret = -EINVAL; goto free_cpudata1; } diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h index bc341f3590..e6a28e7f4d 100644 --- a/drivers/cpufreq/amd-pstate.h +++ b/drivers/cpufreq/amd-pstate.h @@ -42,13 +42,17 @@ struct amd_aperf_mperf { * @lowest_perf: the absolute lowest performance level of the processor * @prefcore_ranking: the preferred core ranking, the higher value indicates a higher * priority. - * @max_freq: the frequency that mapped to highest_perf - * @min_freq: the frequency that mapped to lowest_perf - * @nominal_freq: the frequency that mapped to nominal_perf - * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf + * @min_limit_perf: Cached value of the performance corresponding to policy->min + * @max_limit_perf: Cached value of the performance corresponding to policy->max + * @min_limit_freq: Cached value of policy->min (in khz) + * @max_limit_freq: Cached value of policy->max (in khz) + * @max_freq: the frequency (in khz) that mapped to highest_perf + * @min_freq: the frequency (in khz) that mapped to lowest_perf + * @nominal_freq: the frequency (in khz) that mapped to nominal_perf + * @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf * @cur: Difference of Aperf/Mperf/tsc count between last and current sample * @prev: Last Aperf/Mperf/tsc count value read from register - * @freq: current cpu frequency value + * @freq: current cpu frequency value (in khz) * @boost_supported: check whether the Processor or SBIOS supports boost mode * @hw_prefcore: check whether HW supports preferred core featue. * Only when hw_prefcore and early prefcore param are true, diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index b993a49808..c74dd1e01e 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -104,6 +104,9 @@ static const struct of_device_id allowlist[] __initconst = { */ static const struct of_device_id blocklist[] __initconst = { { .compatible = "allwinner,sun50i-h6", }, + { .compatible = "allwinner,sun50i-h616", }, + { .compatible = "allwinner,sun50i-h618", }, + { .compatible = "allwinner,sun50i-h700", }, { .compatible = "apple,arm-platform", }, @@ -195,19 +198,18 @@ static const struct of_device_id blocklist[] __initconst = { static bool __init cpu0_node_has_opp_v2_prop(void) { - struct device_node *np = of_cpu_device_node_get(0); + struct device_node *np __free(device_node) = of_cpu_device_node_get(0); bool ret = false; if (of_property_present(np, "operating-points-v2")) ret = true; - of_node_put(np); return ret; } static int __init cpufreq_dt_platdev_init(void) { - struct device_node *np = of_find_node_by_path("/"); + struct device_node *np __free(device_node) = of_find_node_by_path("/"); const struct of_device_id *match; const void *data = NULL; @@ -223,11 +225,9 @@ static int __init cpufreq_dt_platdev_init(void) if (cpu0_node_has_opp_v2_prop() && !of_match_node(blocklist, np)) goto create_pdev; - of_node_put(np); return -ENODEV; create_pdev: - of_node_put(np); return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt", -1, data, sizeof(struct cpufreq_dt_platform_data))); diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 2d83bbc65d..907e22632f 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -68,12 +68,9 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index) */ static const char *find_supply_name(struct device *dev) { - struct device_node *np; + struct device_node *np __free(device_node) = of_node_get(dev->of_node); struct property *pp; int cpu = dev->id; - const char *name = NULL; - - np = of_node_get(dev->of_node); /* This must be valid for sure */ if (WARN_ON(!np)) @@ -82,22 +79,16 @@ static const char *find_supply_name(struct device *dev) /* Try "cpu0" for older DTs */ if (!cpu) { pp = of_find_property(np, "cpu0-supply", NULL); - if (pp) { - name = "cpu0"; - goto node_put; - } + if (pp) + return "cpu0"; } pp = of_find_property(np, "cpu-supply", NULL); - if (pp) { - name = "cpu"; - goto node_put; - } + if (pp) + return "cpu"; dev_dbg(dev, "no regulator for cpu%d\n", cpu); -node_put: - of_node_put(np); - return name; + return NULL; } static int cpufreq_init(struct cpufreq_policy *policy) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index d7630d9cdb..9e5060b278 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2586,6 +2586,40 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) } EXPORT_SYMBOL(cpufreq_get_policy); +DEFINE_PER_CPU(unsigned long, cpufreq_pressure); + +/** + * cpufreq_update_pressure() - Update cpufreq pressure for CPUs + * @policy: cpufreq policy of the CPUs. + * + * Update the value of cpufreq pressure for all @cpus in the policy. + */ +static void cpufreq_update_pressure(struct cpufreq_policy *policy) +{ + unsigned long max_capacity, capped_freq, pressure; + u32 max_freq; + int cpu; + + cpu = cpumask_first(policy->related_cpus); + max_freq = arch_scale_freq_ref(cpu); + capped_freq = policy->max; + + /* + * Handle properly the boost frequencies, which should simply clean + * the cpufreq pressure value. + */ + if (max_freq <= capped_freq) { + pressure = 0; + } else { + max_capacity = arch_scale_cpu_capacity(cpu); + pressure = max_capacity - + mult_frac(max_capacity, capped_freq, max_freq); + } + + for_each_cpu(cpu, policy->related_cpus) + WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure); +} + /** * cpufreq_set_policy - Modify cpufreq policy parameters. * @policy: Policy object to modify. @@ -2641,6 +2675,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H); trace_cpu_frequency_limits(policy); + cpufreq_update_pressure(policy); + policy->cached_target_freq = UINT_MAX; pr_debug("new min and max freqs are %u - %u kHz\n", diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index c17dc51a5a..10e80d912b 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -70,7 +70,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, struct cpufreq_frequency_table *table) { struct cpufreq_frequency_table *pos; - unsigned int freq, next_larger = ~0; + unsigned int freq, prev_smaller = 0; bool found = false; pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", @@ -86,12 +86,12 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, break; } - if ((next_larger > freq) && (freq > policy->max)) - next_larger = freq; + if ((prev_smaller < freq) && (freq <= policy->max)) + prev_smaller = freq; } if (!found) { - policy->max = next_larger; + policy->max = prev_smaller; cpufreq_verify_within_cpu_limits(policy); } @@ -194,7 +194,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, } if (optimal.driver_data > i) { if (suboptimal.driver_data > i) { - WARN(1, "Invalid frequency table: %d\n", policy->cpu); + WARN(1, "Invalid frequency table: %u\n", policy->cpu); return 0; } @@ -254,7 +254,7 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf, if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ)) continue; - count += sprintf(&buf[count], "%d ", pos->frequency); + count += sprintf(&buf[count], "%u ", pos->frequency); } count += sprintf(&buf[count], "\n"); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 3405bf69b1..c31914a987 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -173,7 +173,6 @@ struct vid_data { * based on the MSR_IA32_MISC_ENABLE value and whether or * not the maximum reported turbo P-state is different from * the maximum reported non-turbo one. - * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo * P-state capacity. * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo @@ -182,7 +181,6 @@ struct vid_data { struct global_params { bool no_turbo; bool turbo_disabled; - bool turbo_disabled_mf; int max_perf_pct; int min_perf_pct; }; @@ -213,7 +211,7 @@ struct global_params { * @epp_policy: Last saved policy used to set EPP/EPB * @epp_default: Power on default HWP energy performance * preference/bias - * @epp_cached Cached HWP energy-performance preference value + * @epp_cached: Cached HWP energy-performance preference value * @hwp_req_cached: Cached value of the last HWP Request MSR * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR * @last_io_update: Last time when IO wake flag was set @@ -292,11 +290,11 @@ struct pstate_funcs { static struct pstate_funcs pstate_funcs __read_mostly; -static int hwp_active __read_mostly; -static int hwp_mode_bdw __read_mostly; -static bool per_cpu_limits __read_mostly; +static bool hwp_active __ro_after_init; +static int hwp_mode_bdw __ro_after_init; +static bool per_cpu_limits __ro_after_init; +static bool hwp_forced __ro_after_init; static bool hwp_boost __read_mostly; -static bool hwp_forced __read_mostly; static struct cpufreq_driver *intel_pstate_driver __read_mostly; @@ -593,12 +591,13 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq); } -static inline void update_turbo_state(void) +static bool turbo_is_disabled(void) { u64 misc_en; rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); - global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE; + + return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); } static int min_perf_pct_min(void) @@ -1153,12 +1152,16 @@ static void intel_pstate_update_policies(void) static void __intel_pstate_update_max_freq(struct cpudata *cpudata, struct cpufreq_policy *policy) { - policy->cpuinfo.max_freq = global.turbo_disabled_mf ? + if (hwp_active) + intel_pstate_get_hwp_cap(cpudata); + + policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ? cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; + refresh_frequency_limits(policy); } -static void intel_pstate_update_max_freq(unsigned int cpu) +static void intel_pstate_update_limits(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); @@ -1170,25 +1173,12 @@ static void intel_pstate_update_max_freq(unsigned int cpu) cpufreq_cpu_release(policy); } -static void intel_pstate_update_limits(unsigned int cpu) +static void intel_pstate_update_limits_for_all(void) { - mutex_lock(&intel_pstate_driver_lock); - - update_turbo_state(); - /* - * If turbo has been turned on or off globally, policy limits for - * all CPUs need to be updated to reflect that. - */ - if (global.turbo_disabled_mf != global.turbo_disabled) { - global.turbo_disabled_mf = global.turbo_disabled; - arch_set_max_freq_ratio(global.turbo_disabled); - for_each_possible_cpu(cpu) - intel_pstate_update_max_freq(cpu); - } else { - cpufreq_update_policy(cpu); - } + int cpu; - mutex_unlock(&intel_pstate_driver_lock); + for_each_possible_cpu(cpu) + intel_pstate_update_limits(cpu); } /************************** sysfs begin ************************/ @@ -1286,11 +1276,7 @@ static ssize_t show_no_turbo(struct kobject *kobj, return -EAGAIN; } - update_turbo_state(); - if (global.turbo_disabled) - ret = sprintf(buf, "%u\n", global.turbo_disabled); - else - ret = sprintf(buf, "%u\n", global.no_turbo); + ret = sprintf(buf, "%u\n", global.no_turbo); mutex_unlock(&intel_pstate_driver_lock); @@ -1301,32 +1287,39 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; - int ret; + bool no_turbo; - ret = sscanf(buf, "%u", &input); - if (ret != 1) + if (sscanf(buf, "%u", &input) != 1) return -EINVAL; mutex_lock(&intel_pstate_driver_lock); if (!intel_pstate_driver) { - mutex_unlock(&intel_pstate_driver_lock); - return -EAGAIN; + count = -EAGAIN; + goto unlock_driver; } - mutex_lock(&intel_pstate_limits_lock); + no_turbo = !!clamp_t(int, input, 0, 1); - update_turbo_state(); - if (global.turbo_disabled) { - pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); - mutex_unlock(&intel_pstate_limits_lock); - mutex_unlock(&intel_pstate_driver_lock); - return -EPERM; + WRITE_ONCE(global.turbo_disabled, turbo_is_disabled()); + if (global.turbo_disabled && !no_turbo) { + pr_notice("Turbo disabled by BIOS or unavailable on processor\n"); + count = -EPERM; + if (global.no_turbo) + goto unlock_driver; + else + no_turbo = 1; + } + + if (no_turbo == global.no_turbo) { + goto unlock_driver; } - global.no_turbo = clamp_t(int, input, 0, 1); + WRITE_ONCE(global.no_turbo, no_turbo); + + mutex_lock(&intel_pstate_limits_lock); - if (global.no_turbo) { + if (no_turbo) { struct cpudata *cpu = all_cpu_data[0]; int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; @@ -1337,9 +1330,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, mutex_unlock(&intel_pstate_limits_lock); - intel_pstate_update_policies(); - arch_set_max_freq_ratio(global.no_turbo); + intel_pstate_update_limits_for_all(); + arch_set_max_freq_ratio(no_turbo); +unlock_driver: mutex_unlock(&intel_pstate_driver_lock); return count; @@ -1620,7 +1614,6 @@ static void intel_pstate_notify_work(struct work_struct *work) struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu); if (policy) { - intel_pstate_get_hwp_cap(cpudata); __intel_pstate_update_max_freq(cpudata, policy); cpufreq_cpu_release(policy); @@ -1635,11 +1628,10 @@ static cpumask_t hwp_intr_enable_mask; void notify_hwp_interrupt(void) { unsigned int this_cpu = smp_processor_id(); - struct cpudata *cpudata; unsigned long flags; u64 value; - if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) + if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) return; rdmsrl_safe(MSR_HWP_STATUS, &value); @@ -1651,24 +1643,8 @@ void notify_hwp_interrupt(void) if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask)) goto ack_intr; - /* - * Currently we never free all_cpu_data. And we can't reach here - * without this allocated. But for safety for future changes, added - * check. - */ - if (unlikely(!READ_ONCE(all_cpu_data))) - goto ack_intr; - - /* - * The free is done during cleanup, when cpufreq registry is failed. - * We wouldn't be here if it fails on init or switch status. But for - * future changes, added check. - */ - cpudata = READ_ONCE(all_cpu_data[this_cpu]); - if (unlikely(!cpudata)) - goto ack_intr; - - schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10)); + schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work, + msecs_to_jiffies(10)); spin_unlock_irqrestore(&hwp_notify_lock, flags); @@ -1681,7 +1657,7 @@ ack_intr: static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) { - unsigned long flags; + bool cancel_work; if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) return; @@ -1689,22 +1665,22 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); - spin_lock_irqsave(&hwp_notify_lock, flags); - if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask)) - cancel_delayed_work(&cpudata->hwp_notify_work); - spin_unlock_irqrestore(&hwp_notify_lock, flags); + spin_lock_irq(&hwp_notify_lock); + cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask); + spin_unlock_irq(&hwp_notify_lock); + + if (cancel_work) + cancel_delayed_work_sync(&cpudata->hwp_notify_work); } static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) { /* Enable HWP notification interrupt for guaranteed performance change */ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { - unsigned long flags; - - spin_lock_irqsave(&hwp_notify_lock, flags); + spin_lock_irq(&hwp_notify_lock); INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); - spin_unlock_irqrestore(&hwp_notify_lock, flags); + spin_unlock_irq(&hwp_notify_lock); /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); @@ -1790,7 +1766,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate) u32 vid; val = (u64)pstate << 8; - if (global.no_turbo && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) val |= (u64)1 << 32; vid_fp = cpudata->vid.min + mul_fp( @@ -1955,7 +1931,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate) u64 val; val = (u64)pstate << 8; - if (global.no_turbo && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) val |= (u64)1 << 32; return val; @@ -2028,14 +2004,6 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu) intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); } -static void intel_pstate_max_within_limits(struct cpudata *cpu) -{ - int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); - - update_turbo_state(); - intel_pstate_set_pstate(cpu, pstate); -} - static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu); @@ -2261,7 +2229,7 @@ static inline int32_t get_target_pstate(struct cpudata *cpu) sample->busy_scaled = busy_frac * 100; - target = global.no_turbo || global.turbo_disabled ? + target = READ_ONCE(global.no_turbo) ? cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; target += target >> 2; target = mul_fp(target, busy_frac); @@ -2305,8 +2273,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu) struct sample *sample; int target_pstate; - update_turbo_state(); - target_pstate = get_target_pstate(cpu); target_pstate = intel_pstate_prepare_request(cpu, target_pstate); trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); @@ -2436,6 +2402,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); +#ifdef CONFIG_ACPI static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(BROADWELL_D, core_funcs), X86_MATCH(BROADWELL_X, core_funcs), @@ -2444,6 +2411,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), {} }; +#endif static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { X86_MATCH(KABYLAKE, core_funcs), @@ -2525,7 +2493,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu) static int intel_pstate_get_max_freq(struct cpudata *cpu) { - return global.turbo_disabled || global.no_turbo ? + return READ_ONCE(global.no_turbo) ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; } @@ -2610,12 +2578,14 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) intel_pstate_update_perf_limits(cpu, policy->min, policy->max); if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { + int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); + /* * NOHZ_FULL CPUs need this as the governor callback may not * be invoked on them. */ intel_pstate_clear_update_util_hook(policy->cpu); - intel_pstate_max_within_limits(cpu); + intel_pstate_set_pstate(cpu, pstate); } else { intel_pstate_set_update_util_hook(policy->cpu); } @@ -2658,10 +2628,9 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, { int max_freq; - update_turbo_state(); if (hwp_active) { intel_pstate_get_hwp_cap(cpu); - max_freq = global.no_turbo || global.turbo_disabled ? + max_freq = READ_ONCE(global.no_turbo) ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; } else { max_freq = intel_pstate_get_max_freq(cpu); @@ -2755,9 +2724,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_freq; - update_turbo_state(); - global.turbo_disabled_mf = global.turbo_disabled; - policy->cpuinfo.max_freq = global.turbo_disabled ? + policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; policy->min = policy->cpuinfo.min_freq; @@ -2922,8 +2889,6 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, struct cpufreq_freqs freqs; int target_pstate; - update_turbo_state(); - freqs.old = policy->cur; freqs.new = target_freq; @@ -2945,8 +2910,6 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, struct cpudata *cpu = all_cpu_data[policy->cpu]; int target_pstate; - update_turbo_state(); - target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq); target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); @@ -2964,9 +2927,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum, int old_pstate = cpu->pstate.current_pstate; int cap_pstate, min_pstate, max_pstate, target_pstate; - update_turbo_state(); - cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) : - HWP_HIGHEST_PERF(hwp_cap); + cap_pstate = READ_ONCE(global.no_turbo) ? + HWP_GUARANTEED_PERF(hwp_cap) : + HWP_HIGHEST_PERF(hwp_cap); /* Optimization: Avoid unnecessary divisions. */ @@ -3134,10 +3097,8 @@ static void intel_pstate_driver_cleanup(void) if (intel_pstate_driver == &intel_pstate) intel_pstate_clear_update_util_hook(cpu); - spin_lock(&hwp_notify_lock); kfree(all_cpu_data[cpu]); WRITE_ONCE(all_cpu_data[cpu], NULL); - spin_unlock(&hwp_notify_lock); } } cpus_read_unlock(); @@ -3154,6 +3115,10 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) memset(&global, 0, sizeof(global)); global.max_perf_pct = 100; + global.turbo_disabled = turbo_is_disabled(); + global.no_turbo = global.turbo_disabled; + + arch_set_max_freq_ratio(global.turbo_disabled); intel_pstate_driver = driver; ret = cpufreq_register_driver(intel_pstate_driver); @@ -3465,7 +3430,7 @@ static int __init intel_pstate_init(void) * deal with it. */ if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { - WRITE_ONCE(hwp_active, 1); + hwp_active = true; hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; intel_cpufreq.attr = hwp_cpufreq_attrs; diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index a0a61919bc..518606adf1 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -707,6 +707,15 @@ static const struct mtk_cpufreq_platform_data mt7623_platform_data = { .ccifreq_supported = false, }; +static const struct mtk_cpufreq_platform_data mt7988_platform_data = { + .min_volt_shift = 100000, + .max_volt_shift = 200000, + .proc_max_volt = 900000, + .sram_min_volt = 0, + .sram_max_volt = 1150000, + .ccifreq_supported = true, +}; + static const struct mtk_cpufreq_platform_data mt8183_platform_data = { .min_volt_shift = 100000, .max_volt_shift = 200000, @@ -740,6 +749,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = { { .compatible = "mediatek,mt2712", .data = &mt2701_platform_data }, { .compatible = "mediatek,mt7622", .data = &mt7622_platform_data }, { .compatible = "mediatek,mt7623", .data = &mt7623_platform_data }, + { .compatible = "mediatek,mt7988a", .data = &mt7988_platform_data }, { .compatible = "mediatek,mt8167", .data = &mt8516_platform_data }, { .compatible = "mediatek,mt817x", .data = &mt2701_platform_data }, { .compatible = "mediatek,mt8173", .data = &mt2701_platform_data }, diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index df3567c1e9..6c9f0888a2 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c @@ -120,9 +120,9 @@ static int cpu_750fx_cpu_speed(int low_speed) /* tweak L2 for high voltage */ if (has_cpu_l2lve) { - hid2 = mfspr(SPRN_HID2); + hid2 = mfspr(SPRN_HID2_750FX); hid2 &= ~0x2000; - mtspr(SPRN_HID2, hid2); + mtspr(SPRN_HID2_750FX, hid2); } } #ifdef CONFIG_PPC_BOOK3S_32 @@ -131,9 +131,9 @@ static int cpu_750fx_cpu_speed(int low_speed) if (low_speed == 1) { /* tweak L2 for low voltage */ if (has_cpu_l2lve) { - hid2 = mfspr(SPRN_HID2); + hid2 = mfspr(SPRN_HID2_750FX); hid2 |= 0x2000; - mtspr(SPRN_HID2, hid2); + mtspr(SPRN_HID2_750FX, hid2); } /* ramping down, set voltage last */ diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index 70b0f21968..ec8df5496a 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -347,8 +347,8 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) throttled_freq = freq_hz / HZ_PER_KHZ; - /* Update thermal pressure (the boost frequencies are accepted) */ - arch_update_thermal_pressure(policy->related_cpus, throttled_freq); + /* Update HW pressure (the boost frequencies are accepted) */ + arch_update_hw_pressure(policy->related_cpus, throttled_freq); /* * In the unlikely case policy is unregistered do not enable diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index ea05d9d674..5004e1dbc7 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -480,23 +480,30 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()), GFP_KERNEL); - if (!drv) + if (!drv) { + of_node_put(np); return -ENOMEM; + } match = pdev->dev.platform_data; drv->data = match->data; - if (!drv->data) + if (!drv->data) { + of_node_put(np); return -ENODEV; + } if (drv->data->get_version) { speedbin_nvmem = of_nvmem_cell_get(np, NULL); - if (IS_ERR(speedbin_nvmem)) + if (IS_ERR(speedbin_nvmem)) { + of_node_put(np); return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), "Could not get nvmem cell\n"); + } ret = drv->data->get_version(cpu_dev, speedbin_nvmem, &pvs_name, drv); if (ret) { + of_node_put(np); nvmem_cell_put(speedbin_nvmem); return ret; } diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c index 32a9c88f8f..ef83e4bf26 100644 --- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -10,6 +10,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -18,26 +19,155 @@ #include #include -#define MAX_NAME_LEN 7 - #define NVMEM_MASK 0x7 #define NVMEM_SHIFT 5 static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev; +struct sunxi_cpufreq_data { + u32 (*efuse_xlate)(u32 speedbin); +}; + +static u32 sun50i_h6_efuse_xlate(u32 speedbin) +{ + u32 efuse_value; + + efuse_value = (speedbin >> NVMEM_SHIFT) & NVMEM_MASK; + + /* + * We treat unexpected efuse values as if the SoC was from + * the slowest bin. Expected efuse values are 1-3, slowest + * to fastest. + */ + if (efuse_value >= 1 && efuse_value <= 3) + return efuse_value - 1; + else + return 0; +} + +static int get_soc_id_revision(void) +{ +#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY + return arm_smccc_get_soc_id_revision(); +#else + return SMCCC_RET_NOT_SUPPORTED; +#endif +} + +/* + * Judging by the OPP tables in the vendor BSP, the quality order of the + * returned speedbin index is 4 -> 0/2 -> 3 -> 1, from worst to best. + * 0 and 2 seem identical from the OPP tables' point of view. + */ +static u32 sun50i_h616_efuse_xlate(u32 speedbin) +{ + int ver_bits = get_soc_id_revision(); + u32 value = 0; + + switch (speedbin & 0xffff) { + case 0x2000: + value = 0; + break; + case 0x2400: + case 0x7400: + case 0x2c00: + case 0x7c00: + if (ver_bits != SMCCC_RET_NOT_SUPPORTED && ver_bits <= 1) { + /* ic version A/B */ + value = 1; + } else { + /* ic version C and later version */ + value = 2; + } + break; + case 0x5000: + case 0x5400: + case 0x6000: + value = 3; + break; + case 0x5c00: + value = 4; + break; + case 0x5d00: + value = 0; + break; + default: + pr_warn("sun50i-cpufreq-nvmem: unknown speed bin 0x%x, using default bin 0\n", + speedbin & 0xffff); + value = 0; + break; + } + + return value; +} + +static struct sunxi_cpufreq_data sun50i_h6_cpufreq_data = { + .efuse_xlate = sun50i_h6_efuse_xlate, +}; + +static struct sunxi_cpufreq_data sun50i_h616_cpufreq_data = { + .efuse_xlate = sun50i_h616_efuse_xlate, +}; + +static const struct of_device_id cpu_opp_match_list[] = { + { .compatible = "allwinner,sun50i-h6-operating-points", + .data = &sun50i_h6_cpufreq_data, + }, + { .compatible = "allwinner,sun50i-h616-operating-points", + .data = &sun50i_h616_cpufreq_data, + }, + {} +}; + +/** + * dt_has_supported_hw() - Check if any OPPs use opp-supported-hw + * + * If we ask the cpufreq framework to use the opp-supported-hw feature, it + * will ignore every OPP node without that DT property. If none of the OPPs + * have it, the driver will fail probing, due to the lack of OPPs. + * + * Returns true if we have at least one OPP with the opp-supported-hw property. + */ +static bool dt_has_supported_hw(void) +{ + bool has_opp_supported_hw = false; + struct device_node *np; + struct device *cpu_dev; + + cpu_dev = get_cpu_device(0); + if (!cpu_dev) + return false; + + np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); + if (!np) + return false; + + for_each_child_of_node_scoped(np, opp) { + if (of_find_property(opp, "opp-supported-hw", NULL)) { + has_opp_supported_hw = true; + break; + } + } + + of_node_put(np); + + return has_opp_supported_hw; +} + /** * sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value - * @versions: Set to the value parsed from efuse * - * Returns 0 if success. + * Returns non-negative speed bin index on success, a negative error + * value otherwise. */ -static int sun50i_cpufreq_get_efuse(u32 *versions) +static int sun50i_cpufreq_get_efuse(void) { + const struct sunxi_cpufreq_data *opp_data; struct nvmem_cell *speedbin_nvmem; + const struct of_device_id *match; struct device_node *np; struct device *cpu_dev; - u32 *speedbin, efuse_value; - size_t len; + u32 *speedbin; int ret; cpu_dev = get_cpu_device(0); @@ -48,12 +178,12 @@ static int sun50i_cpufreq_get_efuse(u32 *versions) if (!np) return -ENOENT; - ret = of_device_is_compatible(np, - "allwinner,sun50i-h6-operating-points"); - if (!ret) { + match = of_match_node(cpu_opp_match_list, np); + if (!match) { of_node_put(np); return -ENOENT; } + opp_data = match->data; speedbin_nvmem = of_nvmem_cell_get(np, NULL); of_node_put(np); @@ -61,33 +191,25 @@ static int sun50i_cpufreq_get_efuse(u32 *versions) return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), "Could not get nvmem cell\n"); - speedbin = nvmem_cell_read(speedbin_nvmem, &len); + speedbin = nvmem_cell_read(speedbin_nvmem, NULL); nvmem_cell_put(speedbin_nvmem); if (IS_ERR(speedbin)) return PTR_ERR(speedbin); - efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK; - - /* - * We treat unexpected efuse values as if the SoC was from - * the slowest bin. Expected efuse values are 1-3, slowest - * to fastest. - */ - if (efuse_value >= 1 && efuse_value <= 3) - *versions = efuse_value - 1; - else - *versions = 0; + ret = opp_data->efuse_xlate(*speedbin); kfree(speedbin); - return 0; + + return ret; }; static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev) { int *opp_tokens; - char name[MAX_NAME_LEN]; - unsigned int cpu; - u32 speed = 0; + char name[] = "speedXXXXXXXXXXX"; /* Integers can take 11 chars max */ + unsigned int cpu, supported_hw; + struct dev_pm_opp_config config = {}; + int speed; int ret; opp_tokens = kcalloc(num_possible_cpus(), sizeof(*opp_tokens), @@ -95,13 +217,24 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev) if (!opp_tokens) return -ENOMEM; - ret = sun50i_cpufreq_get_efuse(&speed); - if (ret) { + speed = sun50i_cpufreq_get_efuse(); + if (speed < 0) { kfree(opp_tokens); - return ret; + return speed; } - snprintf(name, MAX_NAME_LEN, "speed%d", speed); + /* + * We need at least one OPP with the "opp-supported-hw" property, + * or else the upper layers will ignore every OPP and will bail out. + */ + if (dt_has_supported_hw()) { + supported_hw = 1U << speed; + config.supported_hw = &supported_hw; + config.supported_hw_count = 1; + } + + snprintf(name, sizeof(name), "speed%d", speed); + config.prop_name = name; for_each_possible_cpu(cpu) { struct device *cpu_dev = get_cpu_device(cpu); @@ -111,12 +244,11 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev) goto free_opp; } - opp_tokens[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name); - if (opp_tokens[cpu] < 0) { - ret = opp_tokens[cpu]; - pr_err("Failed to set prop name\n"); + ret = dev_pm_opp_set_config(cpu_dev, &config); + if (ret < 0) goto free_opp; - } + + opp_tokens[cpu] = ret; } cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1, @@ -131,7 +263,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev) free_opp: for_each_possible_cpu(cpu) - dev_pm_opp_put_prop_name(opp_tokens[cpu]); + dev_pm_opp_clear_config(opp_tokens[cpu]); kfree(opp_tokens); return ret; @@ -145,7 +277,7 @@ static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev) platform_device_unregister(cpufreq_dt_pdev); for_each_possible_cpu(cpu) - dev_pm_opp_put_prop_name(opp_tokens[cpu]); + dev_pm_opp_clear_config(opp_tokens[cpu]); kfree(opp_tokens); } @@ -160,6 +292,9 @@ static struct platform_driver sun50i_cpufreq_driver = { static const struct of_device_id sun50i_cpufreq_match_list[] = { { .compatible = "allwinner,sun50i-h6" }, + { .compatible = "allwinner,sun50i-h616" }, + { .compatible = "allwinner,sun50i-h618" }, + { .compatible = "allwinner,sun50i-h700" }, {} }; MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list); diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c index aae951d4e7..514146d98b 100644 --- a/drivers/cpufreq/tegra124-cpufreq.c +++ b/drivers/cpufreq/tegra124-cpufreq.c @@ -52,12 +52,15 @@ out: static int tegra124_cpufreq_probe(struct platform_device *pdev) { + struct device_node *np __free(device_node) = of_cpu_device_node_get(0); struct tegra124_cpufreq_priv *priv; - struct device_node *np; struct device *cpu_dev; struct platform_device_info cpufreq_dt_devinfo = {}; int ret; + if (!np) + return -ENODEV; + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -66,15 +69,9 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev) if (!cpu_dev) return -ENODEV; - np = of_cpu_device_node_get(0); - if (!np) - return -ENODEV; - priv->cpu_clk = of_clk_get_by_name(np, "cpu_g"); - if (IS_ERR(priv->cpu_clk)) { - ret = PTR_ERR(priv->cpu_clk); - goto out_put_np; - } + if (IS_ERR(priv->cpu_clk)) + return PTR_ERR(priv->cpu_clk); priv->dfll_clk = of_clk_get_by_name(np, "dfll"); if (IS_ERR(priv->dfll_clk)) { @@ -110,8 +107,6 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); - of_node_put(np); - return 0; out_put_pllp_clk: @@ -122,8 +117,6 @@ out_put_dfll_clk: clk_put(priv->dfll_clk); out_put_cpu_clk: clk_put(priv->cpu_clk); -out_put_np: - of_node_put(np); return ret; } diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 46c41e2ca7..5af85c4cba 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -347,12 +347,10 @@ static const struct of_device_id ti_cpufreq_of_match[] = { static const struct of_device_id *ti_cpufreq_match_node(void) { - struct device_node *np; + struct device_node *np __free(device_node) = of_find_node_by_path("/"); const struct of_device_id *match; - np = of_find_node_by_path("/"); match = of_match_node(ti_cpufreq_of_match, np); - of_node_put(np); return match; } @@ -419,7 +417,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev) ret = dev_pm_opp_set_config(opp_data->cpu_dev, &config); if (ret < 0) { - dev_err(opp_data->cpu_dev, "Failed to set OPP config\n"); + dev_err_probe(opp_data->cpu_dev, ret, "Failed to set OPP config\n"); goto fail_put_node; } diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index 9acde71558..bb8761c8a4 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -439,13 +439,8 @@ static int cpuidle_coupled_clear_pokes(int cpu) static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled) { - cpumask_t cpus; - int ret; - - cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); - ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus); - - return ret; + return cpumask_first_and_and(cpu_online_mask, &coupled->coupled_cpus, + &cpuidle_coupled_poke_pending) < nr_cpu_ids; } /** @@ -626,9 +621,7 @@ out: static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled) { - cpumask_t cpus; - cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); - coupled->online_count = cpumask_weight(&cpus); + coupled->online_count = cpumask_weight_and(cpu_online_mask, &coupled->coupled_cpus); } /** diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c index 13bf743f88..602c4dfdd7 100644 --- a/drivers/cpuidle/cpuidle-kirkwood.c +++ b/drivers/cpuidle/cpuidle-kirkwood.c @@ -59,15 +59,14 @@ static int kirkwood_cpuidle_probe(struct platform_device *pdev) return cpuidle_register(&kirkwood_idle_driver, NULL); } -static int kirkwood_cpuidle_remove(struct platform_device *pdev) +static void kirkwood_cpuidle_remove(struct platform_device *pdev) { cpuidle_unregister(&kirkwood_idle_driver); - return 0; } static struct platform_driver kirkwood_cpuidle_driver = { .probe = kirkwood_cpuidle_probe, - .remove = kirkwood_cpuidle_remove, + .remove_new = kirkwood_cpuidle_remove, .driver = { .name = "kirkwood_cpuidle", }, diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c index b88af1262f..fae9587943 100644 --- a/drivers/cpuidle/cpuidle-psci-domain.c +++ b/drivers/cpuidle/cpuidle-psci-domain.c @@ -20,6 +20,7 @@ #include #include "cpuidle-psci.h" +#include "dt_idle_genpd.h" struct psci_pd_provider { struct list_head link; @@ -200,4 +201,4 @@ static int __init psci_idle_init_domains(void) { return platform_driver_register(&psci_cpuidle_domain_driver); } -subsys_initcall(psci_idle_init_domains); +core_initcall(psci_idle_init_domains); diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index bf68920d03..782030a277 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -28,6 +28,7 @@ #include "cpuidle-psci.h" #include "dt_idle_states.h" +#include "dt_idle_genpd.h" struct psci_cpuidle_data { u32 *psci_states; @@ -224,7 +225,7 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv, if (IS_ENABLED(CONFIG_PREEMPT_RT)) return 0; - data->dev = psci_dt_attach_cpu(cpu); + data->dev = dt_idle_attach_cpu(cpu, "psci"); if (IS_ERR_OR_NULL(data->dev)) return PTR_ERR_OR_ZERO(data->dev); @@ -311,7 +312,7 @@ static void psci_cpu_deinit_idle(int cpu) { struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu); - psci_dt_detach_cpu(data->dev); + dt_idle_detach_cpu(data->dev); psci_cpuidle_use_cpuhp = false; } diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h index 4e132640ed..ef004ec7a7 100644 --- a/drivers/cpuidle/cpuidle-psci.h +++ b/drivers/cpuidle/cpuidle-psci.h @@ -3,29 +3,9 @@ #ifndef __CPUIDLE_PSCI_H #define __CPUIDLE_PSCI_H -struct device; struct device_node; void psci_set_domain_state(u32 state); int psci_dt_parse_state_node(struct device_node *np, u32 *state); -#ifdef CONFIG_ARM_PSCI_CPUIDLE_DOMAIN - -#include "dt_idle_genpd.h" - -static inline struct device *psci_dt_attach_cpu(int cpu) -{ - return dt_idle_attach_cpu(cpu, "psci"); -} - -static inline void psci_dt_detach_cpu(struct device *dev) -{ - dt_idle_detach_cpu(dev); -} - -#else -static inline struct device *psci_dt_attach_cpu(int cpu) { return NULL; } -static inline void psci_dt_detach_cpu(struct device *dev) { } -#endif - #endif /* __CPUIDLE_PSCI_H */ diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index 8e9058c4ea..6617eb494a 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c @@ -44,6 +44,7 @@ static DEFINE_PER_CPU(struct ladder_device, ladder_devices); /** * ladder_do_selection - prepares private data for a state change + * @dev: the CPU * @ldev: the ladder device * @old_idx: the current state index * @new_idx: the new target state index diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 3d02702456..94f23c6fc9 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -67,6 +67,7 @@ config CRYPTO_DEV_GEODE config ZCRYPT tristate "Support for s390 cryptographic adapters" depends on S390 + depends on AP select HW_RANDOM help Select this option if you want to enable support for @@ -74,23 +75,6 @@ config ZCRYPT to 8 in Coprocessor (CEXxC), EP11 Coprocessor (CEXxP) or Accelerator (CEXxA) mode. -config ZCRYPT_DEBUG - bool "Enable debug features for s390 cryptographic adapters" - default n - depends on DEBUG_KERNEL - depends on ZCRYPT - help - Say 'Y' here to enable some additional debug features on the - s390 cryptographic adapters driver. - - There will be some more sysfs attributes displayed for ap cards - and queues and some flags on crypto requests are interpreted as - debugging messages to force error injection. - - Do not enable on production level kernel build. - - If unsure, say N. - config PKEY tristate "Kernel API for protected key handling" depends on S390 @@ -660,6 +644,14 @@ config CRYPTO_DEV_ROCKCHIP_DEBUG This will create /sys/kernel/debug/rk3288_crypto/stats for displaying the number of requests per algorithm and other internal stats. +config CRYPTO_DEV_TEGRA + tristate "Enable Tegra Security Engine" + depends on TEGRA_HOST1X + select CRYPTO_ENGINE + + help + Select this to enable Tegra Security Engine which accelerates various + AES encryption/decryption and HASH algorithms. config CRYPTO_DEV_ZYNQMP_AES tristate "Support for Xilinx ZynqMP AES hw accelerator" diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 95331bc645..ad4ccef67d 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o obj-$(CONFIG_CRYPTO_DEV_SL3516) += gemini/ obj-y += stm32/ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o +obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra/ obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/ #obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c index 83a9093eff..a895e4289e 100644 --- a/drivers/crypto/atmel-i2c.c +++ b/drivers/crypto/atmel-i2c.c @@ -51,7 +51,7 @@ static void atmel_i2c_checksum(struct atmel_i2c_cmd *cmd) *__crc16 = cpu_to_le16(bitrev16(crc16(0, data, len))); } -void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd) +void atmel_i2c_init_read_config_cmd(struct atmel_i2c_cmd *cmd) { cmd->word_addr = COMMAND; cmd->opcode = OPCODE_READ; @@ -68,7 +68,31 @@ void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd) cmd->msecs = MAX_EXEC_TIME_READ; cmd->rxsize = READ_RSP_SIZE; } -EXPORT_SYMBOL(atmel_i2c_init_read_cmd); +EXPORT_SYMBOL(atmel_i2c_init_read_config_cmd); + +int atmel_i2c_init_read_otp_cmd(struct atmel_i2c_cmd *cmd, u16 addr) +{ + if (addr < 0 || addr > OTP_ZONE_SIZE) + return -1; + + cmd->word_addr = COMMAND; + cmd->opcode = OPCODE_READ; + /* + * Read the word from OTP zone that may contain e.g. serial + * numbers or similar if persistently pre-initialized and locked + */ + cmd->param1 = OTP_ZONE; + cmd->param2 = cpu_to_le16(addr); + cmd->count = READ_COUNT; + + atmel_i2c_checksum(cmd); + + cmd->msecs = MAX_EXEC_TIME_READ; + cmd->rxsize = READ_RSP_SIZE; + + return 0; +} +EXPORT_SYMBOL(atmel_i2c_init_read_otp_cmd); void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd) { @@ -301,7 +325,7 @@ static int device_sanity_check(struct i2c_client *client) if (!cmd) return -ENOMEM; - atmel_i2c_init_read_cmd(cmd); + atmel_i2c_init_read_config_cmd(cmd); ret = atmel_i2c_send_receive(client, cmd); if (ret) diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h index c0bd429ee2..72f04c1568 100644 --- a/drivers/crypto/atmel-i2c.h +++ b/drivers/crypto/atmel-i2c.h @@ -64,6 +64,10 @@ struct atmel_i2c_cmd { /* Definitions for eeprom organization */ #define CONFIGURATION_ZONE 0 +#define OTP_ZONE 1 + +/* Definitions for eeprom zone sizes */ +#define OTP_ZONE_SIZE 64 /* Definitions for Indexes common to all commands */ #define RSP_DATA_IDX 1 /* buffer index of data in response */ @@ -124,6 +128,7 @@ struct atmel_ecc_driver_data { * @wake_token : wake token array of zeros * @wake_token_sz : size in bytes of the wake_token * @tfm_count : number of active crypto transformations on i2c client + * @hwrng : hold the hardware generated rng * * Reads and writes from/to the i2c client are sequential. The first byte * transmitted to the device is treated as the byte size. Any attempt to send @@ -177,7 +182,8 @@ void atmel_i2c_flush_queue(void); int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd); -void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd); +void atmel_i2c_init_read_config_cmd(struct atmel_i2c_cmd *cmd); +int atmel_i2c_init_read_otp_cmd(struct atmel_i2c_cmd *cmd, u16 addr); void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd); void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid); int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd, diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index c77f482d2a..2034f60315 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c @@ -91,6 +91,62 @@ static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max, return max; } +static int atmel_sha204a_otp_read(struct i2c_client *client, u16 addr, u8 *otp) +{ + struct atmel_i2c_cmd cmd; + int ret = -1; + + if (atmel_i2c_init_read_otp_cmd(&cmd, addr) < 0) { + dev_err(&client->dev, "failed, invalid otp address %04X\n", + addr); + return ret; + } + + ret = atmel_i2c_send_receive(client, &cmd); + + if (cmd.data[0] == 0xff) { + dev_err(&client->dev, "failed, device not ready\n"); + return -EINVAL; + } + + memcpy(otp, cmd.data+1, 4); + + return ret; +} + +static ssize_t otp_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u16 addr; + u8 otp[OTP_ZONE_SIZE]; + char *str = buf; + struct i2c_client *client = to_i2c_client(dev); + int i; + + for (addr = 0; addr < OTP_ZONE_SIZE/4; addr++) { + if (atmel_sha204a_otp_read(client, addr, otp + addr * 4) < 0) { + dev_err(dev, "failed to read otp zone\n"); + break; + } + } + + for (i = 0; i < addr*2; i++) + str += sprintf(str, "%02X", otp[i]); + str += sprintf(str, "\n"); + return str - buf; +} +static DEVICE_ATTR_RO(otp); + +static struct attribute *atmel_sha204a_attrs[] = { + &dev_attr_otp.attr, + NULL +}; + +static const struct attribute_group atmel_sha204a_groups = { + .name = "atsha204a", + .attrs = atmel_sha204a_attrs, +}; + static int atmel_sha204a_probe(struct i2c_client *client) { struct atmel_i2c_client_priv *i2c_priv; @@ -111,6 +167,16 @@ static int atmel_sha204a_probe(struct i2c_client *client) if (ret) dev_warn(&client->dev, "failed to register RNG (%d)\n", ret); + /* otp read out */ + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + return -ENODEV; + + ret = sysfs_create_group(&client->dev.kobj, &atmel_sha204a_groups); + if (ret) { + dev_err(&client->dev, "failed to register sysfs entry\n"); + return ret; + } + return ret; } @@ -123,6 +189,8 @@ static void atmel_sha204a_remove(struct i2c_client *client) return; } + sysfs_remove_group(&client->dev.kobj, &atmel_sha204a_groups); + kfree((void *)i2c_priv->hwrng.priv); } diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index bdf367f3f6..bd418dea58 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -512,6 +512,7 @@ static const struct of_device_id caam_match[] = { MODULE_DEVICE_TABLE(of, caam_match); struct caam_imx_data { + bool page0_access; const struct clk_bulk_data *clks; int num_clks; }; @@ -524,6 +525,7 @@ static const struct clk_bulk_data caam_imx6_clks[] = { }; static const struct caam_imx_data caam_imx6_data = { + .page0_access = true, .clks = caam_imx6_clks, .num_clks = ARRAY_SIZE(caam_imx6_clks), }; @@ -534,6 +536,7 @@ static const struct clk_bulk_data caam_imx7_clks[] = { }; static const struct caam_imx_data caam_imx7_data = { + .page0_access = true, .clks = caam_imx7_clks, .num_clks = ARRAY_SIZE(caam_imx7_clks), }; @@ -545,6 +548,7 @@ static const struct clk_bulk_data caam_imx6ul_clks[] = { }; static const struct caam_imx_data caam_imx6ul_data = { + .page0_access = true, .clks = caam_imx6ul_clks, .num_clks = ARRAY_SIZE(caam_imx6ul_clks), }; @@ -554,15 +558,19 @@ static const struct clk_bulk_data caam_vf610_clks[] = { }; static const struct caam_imx_data caam_vf610_data = { + .page0_access = true, .clks = caam_vf610_clks, .num_clks = ARRAY_SIZE(caam_vf610_clks), }; +static const struct caam_imx_data caam_imx8ulp_data; + static const struct soc_device_attribute caam_imx_soc_table[] = { { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data }, { .soc_id = "i.MX6*", .data = &caam_imx6_data }, { .soc_id = "i.MX7*", .data = &caam_imx7_data }, { .soc_id = "i.MX8M*", .data = &caam_imx7_data }, + { .soc_id = "i.MX8ULP", .data = &caam_imx8ulp_data }, { .soc_id = "VF*", .data = &caam_vf610_data }, { .family = "Freescale i.MX" }, { /* sentinel */ } @@ -860,6 +868,7 @@ static int caam_probe(struct platform_device *pdev) int pg_size; int BLOCK_OFFSET = 0; bool reg_access = true; + const struct caam_imx_data *imx_soc_data; ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL); if (!ctrlpriv) @@ -894,12 +903,20 @@ static int caam_probe(struct platform_device *pdev) return -EINVAL; } + imx_soc_data = imx_soc_match->data; + reg_access = reg_access && imx_soc_data->page0_access; + /* + * CAAM clocks cannot be controlled from kernel. + */ + if (!imx_soc_data->num_clks) + goto iomap_ctrl; + ret = init_clocks(dev, imx_soc_match->data); if (ret) return ret; } - +iomap_ctrl: /* Get configuration properties from device tree */ /* First, get register page */ ctrl = devm_of_iomap(dev, nprop, 0, NULL); diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 2102377f72..1912bee22d 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1642,10 +1642,16 @@ fw_err: static int __sev_snp_shutdown_locked(int *error, bool panic) { - struct sev_device *sev = psp_master->sev_data; + struct psp_device *psp = psp_master; + struct sev_device *sev; struct sev_data_snp_shutdown_ex data; int ret; + if (!psp || !psp->sev_data) + return 0; + + sev = psp->sev_data; + if (!sev->snp_initialized) return 0; diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index 6351a45287..1b9b7bccde 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -13,6 +13,7 @@ #define QM_DFX_COMMON_LEN 0xC3 #define QM_DFX_REGS_LEN 4UL #define QM_DBG_TMP_BUF_LEN 22 +#define QM_XQC_ADDR_MASK GENMASK(31, 0) #define CURRENT_FUN_MASK GENMASK(5, 0) #define CURRENT_Q_MASK GENMASK(31, 16) #define QM_SQE_ADDR_MASK GENMASK(7, 0) @@ -167,7 +168,6 @@ static void dump_show(struct hisi_qm *qm, void *info, static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) { struct device *dev = &qm->pdev->dev; - struct qm_sqc *sqc_curr; struct qm_sqc sqc; u32 qp_id; int ret; @@ -183,6 +183,8 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1); if (!ret) { + sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK); + sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK); dump_show(qm, &sqc, sizeof(struct qm_sqc), name); return 0; @@ -190,9 +192,10 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) down_read(&qm->qps_lock); if (qm->sqc) { - sqc_curr = qm->sqc + qp_id; - - dump_show(qm, sqc_curr, sizeof(*sqc_curr), "SOFT SQC"); + memcpy(&sqc, qm->sqc + qp_id * sizeof(struct qm_sqc), sizeof(struct qm_sqc)); + sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK); + sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK); + dump_show(qm, &sqc, sizeof(struct qm_sqc), "SOFT SQC"); } up_read(&qm->qps_lock); @@ -202,7 +205,6 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name) { struct device *dev = &qm->pdev->dev; - struct qm_cqc *cqc_curr; struct qm_cqc cqc; u32 qp_id; int ret; @@ -218,6 +220,8 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name) ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1); if (!ret) { + cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK); + cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK); dump_show(qm, &cqc, sizeof(struct qm_cqc), name); return 0; @@ -225,9 +229,10 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name) down_read(&qm->qps_lock); if (qm->cqc) { - cqc_curr = qm->cqc + qp_id; - - dump_show(qm, cqc_curr, sizeof(*cqc_curr), "SOFT CQC"); + memcpy(&cqc, qm->cqc + qp_id * sizeof(struct qm_cqc), sizeof(struct qm_cqc)); + cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK); + cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK); + dump_show(qm, &cqc, sizeof(struct qm_cqc), "SOFT CQC"); } up_read(&qm->qps_lock); @@ -263,6 +268,10 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name) if (ret) return ret; + aeqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK); + aeqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK); + eqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK); + eqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK); dump_show(qm, xeqc, size, name); return ret; @@ -310,27 +319,26 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s, static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name) { - u16 sq_depth = qm->qp_array->cq_depth; - void *sqe, *sqe_curr; + u16 sq_depth = qm->qp_array->sq_depth; struct hisi_qp *qp; u32 qp_id, sqe_id; + void *sqe; int ret; ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth); if (ret) return ret; - sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL); + sqe = kzalloc(qm->sqe_size, GFP_KERNEL); if (!sqe) return -ENOMEM; qp = &qm->qp_array[qp_id]; - memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth); - sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size); - memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, + memcpy(sqe, qp->sqe + sqe_id * qm->sqe_size, qm->sqe_size); + memset(sqe + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, qm->debug.sqe_mask_len); - dump_show(qm, sqe_curr, qm->sqe_size, name); + dump_show(qm, sqe, qm->sqe_size, name); kfree(sqe); @@ -1090,12 +1098,12 @@ static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, { struct debugfs_file *file = qm->debug.files + index; - debugfs_create_file(qm_debug_file_name[index], 0600, dir, file, - &qm_debug_fops); - file->index = index; mutex_init(&file->lock); file->debug = &qm->debug; + + debugfs_create_file(qm_debug_file_name[index], 0600, dir, file, + &qm_debug_fops); } static int qm_debugfs_atomic64_set(void *data, u64 val) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index d93aa6630a..10aa4da933 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -106,7 +106,7 @@ #define HPRE_SHAPER_TYPE_RATE 640 #define HPRE_VIA_MSI_DSM 1 #define HPRE_SQE_MASK_OFFSET 8 -#define HPRE_SQE_MASK_LEN 24 +#define HPRE_SQE_MASK_LEN 44 #define HPRE_CTX_Q_NUM_DEF 1 #define HPRE_DFX_BASE 0x301000 @@ -1074,41 +1074,40 @@ static int hpre_debugfs_init(struct hisi_qm *qm) struct device *dev = &qm->pdev->dev; int ret; - qm->debug.debug_root = debugfs_create_dir(dev_name(dev), - hpre_debugfs_root); - - qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; - qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs)); if (ret) { dev_warn(dev, "Failed to init HPRE diff regs!\n"); - goto debugfs_remove; + return ret; } + qm->debug.debug_root = debugfs_create_dir(dev_name(dev), + hpre_debugfs_root); + qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; + qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; + hisi_qm_debug_init(qm); if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) { ret = hpre_ctrl_debug_init(qm); if (ret) - goto failed_to_create; + goto debugfs_remove; } hpre_dfx_debug_init(qm); return 0; -failed_to_create: - hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); debugfs_remove: debugfs_remove_recursive(qm->debug.debug_root); + hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); return ret; } static void hpre_debugfs_exit(struct hisi_qm *qm) { - hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); - debugfs_remove_recursive(qm->debug.debug_root); + + hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); } static int hpre_pre_store_cap_reg(struct hisi_qm *qm) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 13e413533f..3dac8d8e85 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -645,6 +645,9 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op tmp_xqc = qm->xqc_buf.aeqc; xqc_dma = qm->xqc_buf.aeqc_dma; break; + default: + dev_err(&qm->pdev->dev, "unknown mailbox cmd %u\n", cmd); + return -EINVAL; } /* Setting xqc will fail if master OOO is blocked. */ diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index fabea0d650..75aad04ffe 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -99,8 +99,8 @@ #define SEC_DBGFS_VAL_MAX_LEN 20 #define SEC_SINGLE_PORT_MAX_TRANS 0x2060 -#define SEC_SQE_MASK_OFFSET 64 -#define SEC_SQE_MASK_LEN 48 +#define SEC_SQE_MASK_OFFSET 16 +#define SEC_SQE_MASK_LEN 108 #define SEC_SHAPER_TYPE_RATE 400 #define SEC_DFX_BASE 0x301000 @@ -901,37 +901,36 @@ static int sec_debugfs_init(struct hisi_qm *qm) struct device *dev = &qm->pdev->dev; int ret; - qm->debug.debug_root = debugfs_create_dir(dev_name(dev), - sec_debugfs_root); - qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; - qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; - ret = hisi_qm_regs_debugfs_init(qm, sec_diff_regs, ARRAY_SIZE(sec_diff_regs)); if (ret) { dev_warn(dev, "Failed to init SEC diff regs!\n"); - goto debugfs_remove; + return ret; } + qm->debug.debug_root = debugfs_create_dir(dev_name(dev), + sec_debugfs_root); + qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; + qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; + hisi_qm_debug_init(qm); ret = sec_debug_init(qm); if (ret) - goto failed_to_create; + goto debugfs_remove; return 0; -failed_to_create: - hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); debugfs_remove: - debugfs_remove_recursive(sec_debugfs_root); + debugfs_remove_recursive(qm->debug.debug_root); + hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); return ret; } static void sec_debugfs_exit(struct hisi_qm *qm) { - hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); - debugfs_remove_recursive(qm->debug.debug_root); + + hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); } static int sec_show_last_regs_init(struct hisi_qm *qm) @@ -1324,7 +1323,8 @@ static struct pci_driver sec_pci_driver = { .probe = sec_probe, .remove = sec_remove, .err_handler = &sec_err_handler, - .sriov_configure = hisi_qm_sriov_configure, + .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ? + hisi_qm_sriov_configure : NULL, .shutdown = hisi_qm_dev_shutdown, .driver.pm = &sec_pm_ops, }; diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 0beca257c2..568acd0aee 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -161,9 +161,6 @@ static struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool, struct mem_block *block; u32 block_index, offset; - if (!pool || !hw_sgl_dma || index >= pool->count) - return ERR_PTR(-EINVAL); - block = pool->mem_block; block_index = index / pool->sgl_num_per_block; offset = index % pool->sgl_num_per_block; @@ -230,7 +227,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sg; int sg_n; - if (!dev || !sgl || !pool || !hw_sgl_dma) + if (!dev || !sgl || !pool || !hw_sgl_dma || index >= pool->count) return ERR_PTR(-EINVAL); sg_n = sg_nents(sgl); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index c065fd8671..c94a7b20d0 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -887,36 +887,34 @@ static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm) static int hisi_zip_debugfs_init(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; - struct dentry *dev_d; int ret; - dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root); - - qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET; - qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN; - qm->debug.debug_root = dev_d; ret = hisi_qm_regs_debugfs_init(qm, hzip_diff_regs, ARRAY_SIZE(hzip_diff_regs)); if (ret) { dev_warn(dev, "Failed to init ZIP diff regs!\n"); - goto debugfs_remove; + return ret; } + qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET; + qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN; + qm->debug.debug_root = debugfs_create_dir(dev_name(dev), + hzip_debugfs_root); + hisi_qm_debug_init(qm); if (qm->fun_type == QM_HW_PF) { ret = hisi_zip_ctrl_debug_init(qm); if (ret) - goto failed_to_create; + goto debugfs_remove; } hisi_zip_dfx_debug_init(qm); return 0; -failed_to_create: - hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); debugfs_remove: - debugfs_remove_recursive(hzip_debugfs_root); + debugfs_remove_recursive(qm->debug.debug_root); + hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); return ret; } @@ -940,10 +938,10 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm) static void hisi_zip_debugfs_exit(struct hisi_qm *qm) { - hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); - debugfs_remove_recursive(qm->debug.debug_root); + hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); + if (qm->fun_type == QM_HW_PF) { hisi_zip_debug_regs_clear(qm); qm->debug.curr_qm_qp_num = 0; diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h index 2524091a5f..56985e3952 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto.h +++ b/drivers/crypto/intel/iaa/iaa_crypto.h @@ -49,10 +49,10 @@ struct iaa_wq { struct iaa_device *iaa_device; - u64 comp_calls; - u64 comp_bytes; - u64 decomp_calls; - u64 decomp_bytes; + atomic64_t comp_calls; + atomic64_t comp_bytes; + atomic64_t decomp_calls; + atomic64_t decomp_bytes; }; struct iaa_device_compression_mode { @@ -73,10 +73,10 @@ struct iaa_device { int n_wq; struct list_head wqs; - u64 comp_calls; - u64 comp_bytes; - u64 decomp_calls; - u64 decomp_bytes; + atomic64_t comp_calls; + atomic64_t comp_bytes; + atomic64_t decomp_calls; + atomic64_t decomp_bytes; }; struct wq_table_entry { diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index b2191ade90..e810d286ee 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -347,18 +347,16 @@ int add_iaa_compression_mode(const char *name, goto free; if (ll_table) { - mode->ll_table = kzalloc(ll_table_size, GFP_KERNEL); + mode->ll_table = kmemdup(ll_table, ll_table_size, GFP_KERNEL); if (!mode->ll_table) goto free; - memcpy(mode->ll_table, ll_table, ll_table_size); mode->ll_table_size = ll_table_size; } if (d_table) { - mode->d_table = kzalloc(d_table_size, GFP_KERNEL); + mode->d_table = kmemdup(d_table, d_table_size, GFP_KERNEL); if (!mode->d_table) goto free; - memcpy(mode->d_table, d_table, d_table_size); mode->d_table_size = d_table_size; } @@ -922,7 +920,7 @@ static void rebalance_wq_table(void) for_each_node_with_cpus(node) { node_cpus = cpumask_of_node(node); - for (cpu = 0; cpu < nr_cpus_per_node; cpu++) { + for (cpu = 0; cpu < cpumask_weight(node_cpus); cpu++) { int node_cpu = cpumask_nth(cpu, node_cpus); if (WARN_ON(node_cpu >= nr_cpu_ids)) { @@ -1079,8 +1077,8 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc, update_total_comp_bytes_out(ctx->req->dlen); update_wq_comp_bytes(iaa_wq->wq, ctx->req->dlen); } else { - update_total_decomp_bytes_in(ctx->req->dlen); - update_wq_decomp_bytes(iaa_wq->wq, ctx->req->dlen); + update_total_decomp_bytes_in(ctx->req->slen); + update_wq_decomp_bytes(iaa_wq->wq, ctx->req->slen); } if (ctx->compress && compression_ctx->verify_compress) { @@ -1498,7 +1496,6 @@ static int iaa_comp_acompress(struct acomp_req *req) u32 compression_crc; struct idxd_wq *wq; struct device *dev; - u64 start_time_ns; int order = -1; compression_ctx = crypto_tfm_ctx(tfm); @@ -1572,10 +1569,8 @@ static int iaa_comp_acompress(struct acomp_req *req) " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, req->dst, req->dlen, sg_dma_len(req->dst)); - start_time_ns = iaa_get_ts(); ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, &req->dlen, &compression_crc, disable_async); - update_max_comp_delay_ns(start_time_ns); if (ret == -EINPROGRESS) return ret; @@ -1622,7 +1617,6 @@ static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) struct iaa_wq *iaa_wq; struct device *dev; struct idxd_wq *wq; - u64 start_time_ns; int order = -1; cpu = get_cpu(); @@ -1679,10 +1673,8 @@ alloc_dest: dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, req->dst, req->dlen, sg_dma_len(req->dst)); - start_time_ns = iaa_get_ts(); ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, dst_addr, &req->dlen, true); - update_max_decomp_delay_ns(start_time_ns); if (ret == -EOVERFLOW) { dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); req->dlen *= 2; @@ -1713,7 +1705,6 @@ static int iaa_comp_adecompress(struct acomp_req *req) int nr_sgs, cpu, ret = 0; struct iaa_wq *iaa_wq; struct device *dev; - u64 start_time_ns; struct idxd_wq *wq; if (!iaa_crypto_enabled) { @@ -1773,10 +1764,8 @@ static int iaa_comp_adecompress(struct acomp_req *req) " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, req->dst, req->dlen, sg_dma_len(req->dst)); - start_time_ns = iaa_get_ts(); ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, dst_addr, &req->dlen, false); - update_max_decomp_delay_ns(start_time_ns); if (ret == -EINPROGRESS) return ret; @@ -2014,7 +2003,7 @@ static int __init iaa_crypto_init_module(void) int ret = 0; int node; - nr_cpus = num_online_cpus(); + nr_cpus = num_possible_cpus(); for_each_node_with_cpus(node) nr_nodes++; if (!nr_nodes) { diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c index c9f83af4b3..f5cc3d29ca 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_stats.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c @@ -17,141 +17,117 @@ #include "iaa_crypto.h" #include "iaa_crypto_stats.h" -static u64 total_comp_calls; -static u64 total_decomp_calls; -static u64 total_sw_decomp_calls; -static u64 max_comp_delay_ns; -static u64 max_decomp_delay_ns; -static u64 total_comp_bytes_out; -static u64 total_decomp_bytes_in; -static u64 total_completion_einval_errors; -static u64 total_completion_timeout_errors; -static u64 total_completion_comp_buf_overflow_errors; +static atomic64_t total_comp_calls; +static atomic64_t total_decomp_calls; +static atomic64_t total_sw_decomp_calls; +static atomic64_t total_comp_bytes_out; +static atomic64_t total_decomp_bytes_in; +static atomic64_t total_completion_einval_errors; +static atomic64_t total_completion_timeout_errors; +static atomic64_t total_completion_comp_buf_overflow_errors; static struct dentry *iaa_crypto_debugfs_root; void update_total_comp_calls(void) { - total_comp_calls++; + atomic64_inc(&total_comp_calls); } void update_total_comp_bytes_out(int n) { - total_comp_bytes_out += n; + atomic64_add(n, &total_comp_bytes_out); } void update_total_decomp_calls(void) { - total_decomp_calls++; + atomic64_inc(&total_decomp_calls); } void update_total_sw_decomp_calls(void) { - total_sw_decomp_calls++; + atomic64_inc(&total_sw_decomp_calls); } void update_total_decomp_bytes_in(int n) { - total_decomp_bytes_in += n; + atomic64_add(n, &total_decomp_bytes_in); } void update_completion_einval_errs(void) { - total_completion_einval_errors++; + atomic64_inc(&total_completion_einval_errors); } void update_completion_timeout_errs(void) { - total_completion_timeout_errors++; + atomic64_inc(&total_completion_timeout_errors); } void update_completion_comp_buf_overflow_errs(void) { - total_completion_comp_buf_overflow_errors++; -} - -void update_max_comp_delay_ns(u64 start_time_ns) -{ - u64 time_diff; - - time_diff = ktime_get_ns() - start_time_ns; - - if (time_diff > max_comp_delay_ns) - max_comp_delay_ns = time_diff; -} - -void update_max_decomp_delay_ns(u64 start_time_ns) -{ - u64 time_diff; - - time_diff = ktime_get_ns() - start_time_ns; - - if (time_diff > max_decomp_delay_ns) - max_decomp_delay_ns = time_diff; + atomic64_inc(&total_completion_comp_buf_overflow_errors); } void update_wq_comp_calls(struct idxd_wq *idxd_wq) { struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); - wq->comp_calls++; - wq->iaa_device->comp_calls++; + atomic64_inc(&wq->comp_calls); + atomic64_inc(&wq->iaa_device->comp_calls); } void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) { struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); - wq->comp_bytes += n; - wq->iaa_device->comp_bytes += n; + atomic64_add(n, &wq->comp_bytes); + atomic64_add(n, &wq->iaa_device->comp_bytes); } void update_wq_decomp_calls(struct idxd_wq *idxd_wq) { struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); - wq->decomp_calls++; - wq->iaa_device->decomp_calls++; + atomic64_inc(&wq->decomp_calls); + atomic64_inc(&wq->iaa_device->decomp_calls); } void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) { struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); - wq->decomp_bytes += n; - wq->iaa_device->decomp_bytes += n; + atomic64_add(n, &wq->decomp_bytes); + atomic64_add(n, &wq->iaa_device->decomp_bytes); } static void reset_iaa_crypto_stats(void) { - total_comp_calls = 0; - total_decomp_calls = 0; - total_sw_decomp_calls = 0; - max_comp_delay_ns = 0; - max_decomp_delay_ns = 0; - total_comp_bytes_out = 0; - total_decomp_bytes_in = 0; - total_completion_einval_errors = 0; - total_completion_timeout_errors = 0; - total_completion_comp_buf_overflow_errors = 0; + atomic64_set(&total_comp_calls, 0); + atomic64_set(&total_decomp_calls, 0); + atomic64_set(&total_sw_decomp_calls, 0); + atomic64_set(&total_comp_bytes_out, 0); + atomic64_set(&total_decomp_bytes_in, 0); + atomic64_set(&total_completion_einval_errors, 0); + atomic64_set(&total_completion_timeout_errors, 0); + atomic64_set(&total_completion_comp_buf_overflow_errors, 0); } static void reset_wq_stats(struct iaa_wq *wq) { - wq->comp_calls = 0; - wq->comp_bytes = 0; - wq->decomp_calls = 0; - wq->decomp_bytes = 0; + atomic64_set(&wq->comp_calls, 0); + atomic64_set(&wq->comp_bytes, 0); + atomic64_set(&wq->decomp_calls, 0); + atomic64_set(&wq->decomp_bytes, 0); } static void reset_device_stats(struct iaa_device *iaa_device) { struct iaa_wq *iaa_wq; - iaa_device->comp_calls = 0; - iaa_device->comp_bytes = 0; - iaa_device->decomp_calls = 0; - iaa_device->decomp_bytes = 0; + atomic64_set(&iaa_device->comp_calls, 0); + atomic64_set(&iaa_device->comp_bytes, 0); + atomic64_set(&iaa_device->decomp_calls, 0); + atomic64_set(&iaa_device->decomp_bytes, 0); list_for_each_entry(iaa_wq, &iaa_device->wqs, list) reset_wq_stats(iaa_wq); @@ -160,10 +136,14 @@ static void reset_device_stats(struct iaa_device *iaa_device) static void wq_show(struct seq_file *m, struct iaa_wq *iaa_wq) { seq_printf(m, " name: %s\n", iaa_wq->wq->name); - seq_printf(m, " comp_calls: %llu\n", iaa_wq->comp_calls); - seq_printf(m, " comp_bytes: %llu\n", iaa_wq->comp_bytes); - seq_printf(m, " decomp_calls: %llu\n", iaa_wq->decomp_calls); - seq_printf(m, " decomp_bytes: %llu\n\n", iaa_wq->decomp_bytes); + seq_printf(m, " comp_calls: %llu\n", + atomic64_read(&iaa_wq->comp_calls)); + seq_printf(m, " comp_bytes: %llu\n", + atomic64_read(&iaa_wq->comp_bytes)); + seq_printf(m, " decomp_calls: %llu\n", + atomic64_read(&iaa_wq->decomp_calls)); + seq_printf(m, " decomp_bytes: %llu\n\n", + atomic64_read(&iaa_wq->decomp_bytes)); } static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device) @@ -173,30 +153,41 @@ static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device) seq_puts(m, "iaa device:\n"); seq_printf(m, " id: %d\n", iaa_device->idxd->id); seq_printf(m, " n_wqs: %d\n", iaa_device->n_wq); - seq_printf(m, " comp_calls: %llu\n", iaa_device->comp_calls); - seq_printf(m, " comp_bytes: %llu\n", iaa_device->comp_bytes); - seq_printf(m, " decomp_calls: %llu\n", iaa_device->decomp_calls); - seq_printf(m, " decomp_bytes: %llu\n", iaa_device->decomp_bytes); + seq_printf(m, " comp_calls: %llu\n", + atomic64_read(&iaa_device->comp_calls)); + seq_printf(m, " comp_bytes: %llu\n", + atomic64_read(&iaa_device->comp_bytes)); + seq_printf(m, " decomp_calls: %llu\n", + atomic64_read(&iaa_device->decomp_calls)); + seq_printf(m, " decomp_bytes: %llu\n", + atomic64_read(&iaa_device->decomp_bytes)); seq_puts(m, " wqs:\n"); list_for_each_entry(iaa_wq, &iaa_device->wqs, list) wq_show(m, iaa_wq); } -static void global_stats_show(struct seq_file *m) +static int global_stats_show(struct seq_file *m, void *v) { seq_puts(m, "global stats:\n"); - seq_printf(m, " total_comp_calls: %llu\n", total_comp_calls); - seq_printf(m, " total_decomp_calls: %llu\n", total_decomp_calls); - seq_printf(m, " total_sw_decomp_calls: %llu\n", total_sw_decomp_calls); - seq_printf(m, " total_comp_bytes_out: %llu\n", total_comp_bytes_out); - seq_printf(m, " total_decomp_bytes_in: %llu\n", total_decomp_bytes_in); + seq_printf(m, " total_comp_calls: %llu\n", + atomic64_read(&total_comp_calls)); + seq_printf(m, " total_decomp_calls: %llu\n", + atomic64_read(&total_decomp_calls)); + seq_printf(m, " total_sw_decomp_calls: %llu\n", + atomic64_read(&total_sw_decomp_calls)); + seq_printf(m, " total_comp_bytes_out: %llu\n", + atomic64_read(&total_comp_bytes_out)); + seq_printf(m, " total_decomp_bytes_in: %llu\n", + atomic64_read(&total_decomp_bytes_in)); seq_printf(m, " total_completion_einval_errors: %llu\n", - total_completion_einval_errors); + atomic64_read(&total_completion_einval_errors)); seq_printf(m, " total_completion_timeout_errors: %llu\n", - total_completion_timeout_errors); + atomic64_read(&total_completion_timeout_errors)); seq_printf(m, " total_completion_comp_buf_overflow_errors: %llu\n\n", - total_completion_comp_buf_overflow_errors); + atomic64_read(&total_completion_comp_buf_overflow_errors)); + + return 0; } static int wq_stats_show(struct seq_file *m, void *v) @@ -205,8 +196,6 @@ static int wq_stats_show(struct seq_file *m, void *v) mutex_lock(&iaa_devices_lock); - global_stats_show(m); - list_for_each_entry(iaa_device, &iaa_devices, list) device_stats_show(m, iaa_device); @@ -243,6 +232,18 @@ static const struct file_operations wq_stats_fops = { .release = single_release, }; +static int global_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, global_stats_show, file); +} + +static const struct file_operations global_stats_fops = { + .open = global_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + DEFINE_DEBUGFS_ATTRIBUTE(wq_stats_reset_fops, NULL, iaa_crypto_stats_reset, "%llu\n"); int __init iaa_crypto_debugfs_init(void) @@ -252,20 +253,8 @@ int __init iaa_crypto_debugfs_init(void) iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL); - debugfs_create_u64("max_comp_delay_ns", 0644, - iaa_crypto_debugfs_root, &max_comp_delay_ns); - debugfs_create_u64("max_decomp_delay_ns", 0644, - iaa_crypto_debugfs_root, &max_decomp_delay_ns); - debugfs_create_u64("total_comp_calls", 0644, - iaa_crypto_debugfs_root, &total_comp_calls); - debugfs_create_u64("total_decomp_calls", 0644, - iaa_crypto_debugfs_root, &total_decomp_calls); - debugfs_create_u64("total_sw_decomp_calls", 0644, - iaa_crypto_debugfs_root, &total_sw_decomp_calls); - debugfs_create_u64("total_comp_bytes_out", 0644, - iaa_crypto_debugfs_root, &total_comp_bytes_out); - debugfs_create_u64("total_decomp_bytes_in", 0644, - iaa_crypto_debugfs_root, &total_decomp_bytes_in); + debugfs_create_file("global_stats", 0644, iaa_crypto_debugfs_root, NULL, + &global_stats_fops); debugfs_create_file("wq_stats", 0644, iaa_crypto_debugfs_root, NULL, &wq_stats_fops); debugfs_create_file("stats_reset", 0644, iaa_crypto_debugfs_root, NULL, diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.h b/drivers/crypto/intel/iaa/iaa_crypto_stats.h index c916ca83f0..3787a5f507 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_stats.h +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.h @@ -13,8 +13,6 @@ void update_total_comp_bytes_out(int n); void update_total_decomp_calls(void); void update_total_sw_decomp_calls(void); void update_total_decomp_bytes_in(int n); -void update_max_comp_delay_ns(u64 start_time_ns); -void update_max_decomp_delay_ns(u64 start_time_ns); void update_completion_einval_errs(void); void update_completion_timeout_errs(void); void update_completion_comp_buf_overflow_errs(void); @@ -24,8 +22,6 @@ void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n); void update_wq_decomp_calls(struct idxd_wq *idxd_wq); void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n); -static inline u64 iaa_get_ts(void) { return ktime_get_ns(); } - #else static inline int iaa_crypto_debugfs_init(void) { return 0; } static inline void iaa_crypto_debugfs_cleanup(void) {} @@ -35,8 +31,6 @@ static inline void update_total_comp_bytes_out(int n) {} static inline void update_total_decomp_calls(void) {} static inline void update_total_sw_decomp_calls(void) {} static inline void update_total_decomp_bytes_in(int n) {} -static inline void update_max_comp_delay_ns(u64 start_time_ns) {} -static inline void update_max_decomp_delay_ns(u64 start_time_ns) {} static inline void update_completion_einval_errs(void) {} static inline void update_completion_timeout_errs(void) {} static inline void update_completion_comp_buf_overflow_errs(void) {} @@ -46,8 +40,6 @@ static inline void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) {} static inline void update_wq_decomp_calls(struct idxd_wq *idxd_wq) {} static inline void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) {} -static inline u64 iaa_get_ts(void) { return 0; } - #endif // CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS #endif diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile index a90fbe00b3..45728659fb 100644 --- a/drivers/crypto/intel/qat/qat_420xx/Makefile +++ b/drivers/crypto/intel/qat/qat_420xx/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(srctree)/$(src)/../qat_common +ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 1d0ef47a9f..78f0ea4925 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -10,12 +10,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include "adf_420xx_hw_data.h" #include "icp_qat_hw.h" @@ -487,6 +489,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) adf_gen4_init_dc_ops(&hw_data->dc_ops); adf_gen4_init_ras_ops(&hw_data->ras_ops); adf_gen4_init_tl_data(&hw_data->tl_data); + adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops); adf_init_rl_data(&hw_data->rl_data); } diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile index ff9c8b5897..9ba202079a 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/Makefile +++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) -ccflags-y := -I $(srctree)/$(src)/../qat_common +ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o qat_4xxx-objs := adf_drv.o adf_4xxx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index fb34fd7f03..9fd7ec53b9 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -10,12 +10,14 @@ #include #include #include +#include #include #include #include #include "adf_gen4_ras.h" #include #include +#include #include "adf_4xxx_hw_data.h" #include "icp_qat_hw.h" @@ -454,6 +456,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map; hw_data->disable_iov = adf_disable_sriov; hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; + hw_data->bank_state_save = adf_gen4_bank_state_save; + hw_data->bank_state_restore = adf_gen4_bank_state_restore; hw_data->enable_pm = adf_gen4_enable_pm; hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; hw_data->dev_config = adf_gen4_dev_config; @@ -469,6 +473,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) adf_gen4_init_dc_ops(&hw_data->dc_ops); adf_gen4_init_ras_ops(&hw_data->ras_ops); adf_gen4_init_tl_data(&hw_data->tl_data); + adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops); adf_init_rl_data(&hw_data->rl_data); } diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile index 92ef416ccc..7a06ad519b 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/Makefile +++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(srctree)/$(src)/../qat_common +ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c index a882e0ea22..201f9412c5 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include "adf_c3xxx_hw_data.h" diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile index b6d76825a9..7ef633058c 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(srctree)/$(src)/../qat_common +ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c index 84d9486e04..a512ca4efd 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile index d581f7c87d..cc9255b3b1 100644 --- a/drivers/crypto/intel/qat/qat_c62x/Makefile +++ b/drivers/crypto/intel/qat/qat_c62x/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(srctree)/$(src)/../qat_common +ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c index 48cf3eb7c7..6b5b0cf9c7 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include "adf_c62x_hw_data.h" diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile index 446c3d6386..256786662d 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/Makefile +++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(srctree)/$(src)/../qat_common +ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c index 751d7aa57f..4aaaaf9217 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 5915cde8a7..eac73cbfdd 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -14,16 +14,20 @@ intel_qat-objs := adf_cfg.o \ adf_hw_arbiter.o \ adf_sysfs.o \ adf_sysfs_ras_counters.o \ + adf_gen2_hw_csr_data.o \ adf_gen2_hw_data.o \ adf_gen2_config.o \ adf_gen4_config.o \ + adf_gen4_hw_csr_data.o \ adf_gen4_hw_data.o \ + adf_gen4_vf_mig.o \ adf_gen4_pm.o \ adf_gen2_dc.o \ adf_gen4_dc.o \ adf_gen4_ras.o \ adf_gen4_timer.o \ adf_clock.o \ + adf_mstate_mgr.o \ qat_crypto.o \ qat_compression.o \ qat_comp_algs.o \ @@ -35,7 +39,8 @@ intel_qat-objs := adf_cfg.o \ adf_sysfs_rl.o \ qat_uclo.o \ qat_hal.o \ - qat_bl.o + qat_bl.o \ + qat_mig_dev.o intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ adf_fw_counters.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 08658c3a01..7830ecb1a1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -9,6 +9,7 @@ #include #include #include +#include #include "adf_cfg_common.h" #include "adf_rl.h" #include "adf_telemetry.h" @@ -140,6 +141,40 @@ struct admin_info { u32 mailbox_offset; }; +struct ring_config { + u64 base; + u32 config; + u32 head; + u32 tail; + u32 reserved0; +}; + +struct bank_state { + u32 ringstat0; + u32 ringstat1; + u32 ringuostat; + u32 ringestat; + u32 ringnestat; + u32 ringnfstat; + u32 ringfstat; + u32 ringcstat0; + u32 ringcstat1; + u32 ringcstat2; + u32 ringcstat3; + u32 iaintflagen; + u32 iaintflagreg; + u32 iaintflagsrcsel0; + u32 iaintflagsrcsel1; + u32 iaintcolen; + u32 iaintcolctl; + u32 iaintflagandcolen; + u32 ringexpstat; + u32 ringexpintenable; + u32 ringsrvarben; + u32 reserved0; + struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK]; +}; + struct adf_hw_csr_ops { u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size); u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank, @@ -150,22 +185,49 @@ struct adf_hw_csr_ops { u32 ring); void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank, u32 ring, u32 value); + u32 (*read_csr_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_uo_stat)(void __iomem *csr_base_addr, u32 bank); u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_ne_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_nf_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_f_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_c_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_exp_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank); + void (*write_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank, + u32 value); + u32 (*read_csr_ring_config)(void __iomem *csr_base_addr, u32 bank, + u32 ring); void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank, u32 ring, u32 value); + dma_addr_t (*read_csr_ring_base)(void __iomem *csr_base_addr, u32 bank, + u32 ring); void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank, u32 ring, dma_addr_t addr); + u32 (*read_csr_int_en)(void __iomem *csr_base_addr, u32 bank); + void (*write_csr_int_en)(void __iomem *csr_base_addr, u32 bank, + u32 value); + u32 (*read_csr_int_flag)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank); + void (*write_csr_int_srcsel_w_val)(void __iomem *csr_base_addr, + u32 bank, u32 value); + u32 (*read_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_int_flag_and_col)(void __iomem *csr_base_addr, + u32 bank); void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*get_int_col_ctl_enable_mask)(void); }; struct adf_cfg_device_data; @@ -197,6 +259,20 @@ struct adf_dc_ops { void (*build_deflate_ctx)(void *ctx); }; +struct qat_migdev_ops { + int (*init)(struct qat_mig_dev *mdev); + void (*cleanup)(struct qat_mig_dev *mdev); + void (*reset)(struct qat_mig_dev *mdev); + int (*open)(struct qat_mig_dev *mdev); + void (*close)(struct qat_mig_dev *mdev); + int (*suspend)(struct qat_mig_dev *mdev); + int (*resume)(struct qat_mig_dev *mdev); + int (*save_state)(struct qat_mig_dev *mdev); + int (*save_setup)(struct qat_mig_dev *mdev); + int (*load_state)(struct qat_mig_dev *mdev); + int (*load_setup)(struct qat_mig_dev *mdev, int size); +}; + struct adf_dev_err_mask { u32 cppagentcmdpar_mask; u32 parerr_ath_cph_mask; @@ -244,6 +320,10 @@ struct adf_hw_device_data { void (*enable_ints)(struct adf_accel_dev *accel_dev); void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev); int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr); + int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state); + int (*bank_state_restore)(struct adf_accel_dev *accel_dev, + u32 bank_number, struct bank_state *state); void (*reset_device)(struct adf_accel_dev *accel_dev); void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); @@ -260,6 +340,7 @@ struct adf_hw_device_data { struct adf_dev_err_mask dev_err_mask; struct adf_rl_hw_data rl_data; struct adf_tl_hw_data tl_data; + struct qat_migdev_ops vfmig_ops; const char *fw_name; const char *fw_mmp_name; u32 fuses; @@ -316,6 +397,7 @@ struct adf_hw_device_data { #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops) #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops) #define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops) +#define GET_VFMIG_OPS(accel_dev) (&(accel_dev)->hw_device->vfmig_ops) #define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev @@ -330,11 +412,17 @@ struct adf_fw_loader_data { struct adf_accel_vf_info { struct adf_accel_dev *accel_dev; struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */ + struct mutex pfvf_mig_lock; /* protects PFVF state for migration */ struct ratelimit_state vf2pf_ratelimit; u32 vf_nr; bool init; bool restarting; u8 vf_compat_ver; + /* + * Private area used for device migration. + * Memory allocation and free is managed by migration driver. + */ + void *mig_priv; }; struct adf_dc_data { diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.c b/drivers/crypto/intel/qat/qat_common/adf_cfg.c index 8836f015c3..2cf102ad4c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.c @@ -290,17 +290,19 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, * 3. if the key exists with the same value, then return without doing * anything (the newly created key_val is freed). */ + down_write(&cfg->lock); if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) { if (strncmp(temp_val, key_val->val, sizeof(temp_val))) { adf_cfg_keyval_remove(key, section); } else { kfree(key_val); - return 0; + goto out; } } - down_write(&cfg->lock); adf_cfg_keyval_add(key_val, section); + +out: up_write(&cfg->lock); return 0; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 57328249c8..3bec9e20ba 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -248,6 +248,16 @@ static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev) return pmisc->virt_addr; } +static inline void __iomem *adf_get_etr_base(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *etr; + + etr = &GET_BARS(accel_dev)[hw_data->get_etr_bar_id(hw_data)]; + + return etr->virt_addr; +} + static inline void __iomem *adf_get_aram_base(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c new file mode 100644 index 0000000000..650c9edd8a --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include "adf_gen2_hw_csr_data.h" + +static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) +{ + return BUILD_RING_BASE_ADDR(addr, size); +} + +static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); +} + +static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); +} + +static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_E_STAT(csr_base_addr, bank); +} + +static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, + u32 ring, u32 value) +{ + WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); +} + +static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, + dma_addr_t addr) +{ + WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); +} + +static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); +} + +static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) +{ + WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); +} + +static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); +} + +static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); +} + +static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); +} + +static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); +} + +void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) +{ + csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; + csr_ops->read_csr_ring_head = read_csr_ring_head; + csr_ops->write_csr_ring_head = write_csr_ring_head; + csr_ops->read_csr_ring_tail = read_csr_ring_tail; + csr_ops->write_csr_ring_tail = write_csr_ring_tail; + csr_ops->read_csr_e_stat = read_csr_e_stat; + csr_ops->write_csr_ring_config = write_csr_ring_config; + csr_ops->write_csr_ring_base = write_csr_ring_base; + csr_ops->write_csr_int_flag = write_csr_int_flag; + csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; + csr_ops->write_csr_int_col_en = write_csr_int_col_en; + csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; + csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; + csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; +} +EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h new file mode 100644 index 0000000000..55058b0f9e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef ADF_GEN2_HW_CSR_DATA_H_ +#define ADF_GEN2_HW_CSR_DATA_H_ + +#include +#include "adf_accel_devices.h" + +#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL +#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL +#define ADF_RING_CSR_RING_CONFIG 0x000 +#define ADF_RING_CSR_RING_LBASE 0x040 +#define ADF_RING_CSR_RING_UBASE 0x080 +#define ADF_RING_CSR_RING_HEAD 0x0C0 +#define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_INT_FLAG 0x170 +#define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_SRCSEL_2 0x178 +#define ADF_RING_CSR_INT_COL_EN 0x17C +#define ADF_RING_CSR_INT_COL_CTL 0x180 +#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 +#define ADF_RING_BUNDLE_SIZE 0x1000 +#define ADF_ARB_REG_SLOT 0x1000 +#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C + +#define BUILD_RING_BASE_ADDR(addr, size) \ + (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) +#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2)) +#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2)) +#define READ_CSR_E_STAT(csr_base_addr, bank) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_E_STAT) +#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) +#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ +do { \ + u32 l_base = 0, u_base = 0; \ + l_base = (u32)((value) & 0xFFFFFFFF); \ + u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \ +} while (0) + +#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) +#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) +#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_FLAG, value) +#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ +do { \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ +} while (0) +#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_COL_EN, value) +#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_COL_CTL, \ + ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) +#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_FLAG_AND_COL, value) + +#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ + (ADF_ARB_REG_SLOT * (index)), value) + +void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c index d1884547b5..1f64bf49b2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c @@ -111,103 +111,6 @@ void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_gen2_enable_ints); -static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) -{ - return BUILD_RING_BASE_ADDR(addr, size); -} - -static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) -{ - return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); -} - -static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); -} - -static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) -{ - return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); -} - -static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); -} - -static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) -{ - return READ_CSR_E_STAT(csr_base_addr, bank); -} - -static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, - u32 ring, u32 value) -{ - WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); -} - -static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, - dma_addr_t addr) -{ - WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); -} - -static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value) -{ - WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); -} - -static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) -{ - WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); -} - -static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); -} - -static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); -} - -static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); -} - -static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); -} - -void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) -{ - csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; - csr_ops->read_csr_ring_head = read_csr_ring_head; - csr_ops->write_csr_ring_head = write_csr_ring_head; - csr_ops->read_csr_ring_tail = read_csr_ring_tail; - csr_ops->write_csr_ring_tail = write_csr_ring_tail; - csr_ops->read_csr_e_stat = read_csr_e_stat; - csr_ops->write_csr_ring_config = write_csr_ring_config; - csr_ops->write_csr_ring_base = write_csr_ring_base; - csr_ops->write_csr_int_flag = write_csr_int_flag; - csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; - csr_ops->write_csr_int_col_en = write_csr_int_col_en; - csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; - csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; - csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; -} -EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops); - u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h index 6bd341061d..708e918612 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h @@ -6,78 +6,9 @@ #include "adf_accel_devices.h" #include "adf_cfg_common.h" -/* Transport access */ -#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL -#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL -#define ADF_RING_CSR_RING_CONFIG 0x000 -#define ADF_RING_CSR_RING_LBASE 0x040 -#define ADF_RING_CSR_RING_UBASE 0x080 -#define ADF_RING_CSR_RING_HEAD 0x0C0 -#define ADF_RING_CSR_RING_TAIL 0x100 -#define ADF_RING_CSR_E_STAT 0x14C -#define ADF_RING_CSR_INT_FLAG 0x170 -#define ADF_RING_CSR_INT_SRCSEL 0x174 -#define ADF_RING_CSR_INT_SRCSEL_2 0x178 -#define ADF_RING_CSR_INT_COL_EN 0x17C -#define ADF_RING_CSR_INT_COL_CTL 0x180 -#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 -#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 -#define ADF_RING_BUNDLE_SIZE 0x1000 #define ADF_GEN2_RX_RINGS_OFFSET 8 #define ADF_GEN2_TX_RINGS_MASK 0xFF -#define BUILD_RING_BASE_ADDR(addr, size) \ - (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) -#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2)) -#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2)) -#define READ_CSR_E_STAT(csr_base_addr, bank) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_E_STAT) -#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) -#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ -do { \ - u32 l_base = 0, u_base = 0; \ - l_base = (u32)((value) & 0xFFFFFFFF); \ - u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \ -} while (0) - -#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) -#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) -#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_FLAG, value) -#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ -do { \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ -} while (0) -#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_COL_EN, value) -#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_COL_CTL, \ - ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) -#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_FLAG_AND_COL, value) - /* AE to function map */ #define AE2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190) #define AE2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310) @@ -106,12 +37,6 @@ do { \ #define ADF_ARB_OFFSET 0x30000 #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180 #define ADF_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) -#define ADF_ARB_REG_SLOT 0x1000 -#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C - -#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \ - ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ - (ADF_ARB_REG_SLOT * (index)), value) /* Power gating */ #define ADF_POWERGATE_DC BIT(23) @@ -158,7 +83,6 @@ u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self); void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev); void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable, int num_a_regs, int num_b_regs); -void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info); void adf_gen2_get_arb_info(struct arb_info *arb_info); void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c new file mode 100644 index 0000000000..6609c248aa --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include "adf_gen4_hw_csr_data.h" + +static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) +{ + return BUILD_RING_BASE_ADDR(addr, size); +} + +static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); +} + +static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); +} + +static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_STAT(csr_base_addr, bank); +} + +static u32 read_csr_uo_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_UO_STAT(csr_base_addr, bank); +} + +static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_E_STAT(csr_base_addr, bank); +} + +static u32 read_csr_ne_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_NE_STAT(csr_base_addr, bank); +} + +static u32 read_csr_nf_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_NF_STAT(csr_base_addr, bank); +} + +static u32 read_csr_f_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_F_STAT(csr_base_addr, bank); +} + +static u32 read_csr_c_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_C_STAT(csr_base_addr, bank); +} + +static u32 read_csr_exp_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_EXP_STAT(csr_base_addr, bank); +} + +static u32 read_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_EXP_INT_EN(csr_base_addr, bank); +} + +static void write_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value); +} + +static u32 read_csr_ring_config(void __iomem *csr_base_addr, u32 bank, + u32 ring) +{ + return READ_CSR_RING_CONFIG(csr_base_addr, bank, ring); +} + +static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); +} + +static dma_addr_t read_csr_ring_base(void __iomem *csr_base_addr, u32 bank, + u32 ring) +{ + return READ_CSR_RING_BASE(csr_base_addr, bank, ring); +} + +static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, + dma_addr_t addr) +{ + WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); +} + +static u32 read_csr_int_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_EN(csr_base_addr, bank); +} + +static void write_csr_int_en(void __iomem *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_EN(csr_base_addr, bank, value); +} + +static u32 read_csr_int_flag(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_FLAG(csr_base_addr, bank); +} + +static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); +} + +static u32 read_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_SRCSEL(csr_base_addr, bank); +} + +static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) +{ + WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); +} + +static void write_csr_int_srcsel_w_val(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_SRCSEL_W_VAL(csr_base_addr, bank, value); +} + +static u32 read_csr_int_col_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_COL_EN(csr_base_addr, bank); +} + +static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); +} + +static u32 read_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_COL_CTL(csr_base_addr, bank); +} + +static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); +} + +static u32 read_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank); +} + +static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); +} + +static u32 read_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank); +} + +static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); +} + +static u32 get_int_col_ctl_enable_mask(void) +{ + return ADF_RING_CSR_INT_COL_CTL_ENABLE; +} + +void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) +{ + csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; + csr_ops->read_csr_ring_head = read_csr_ring_head; + csr_ops->write_csr_ring_head = write_csr_ring_head; + csr_ops->read_csr_ring_tail = read_csr_ring_tail; + csr_ops->write_csr_ring_tail = write_csr_ring_tail; + csr_ops->read_csr_stat = read_csr_stat; + csr_ops->read_csr_uo_stat = read_csr_uo_stat; + csr_ops->read_csr_e_stat = read_csr_e_stat; + csr_ops->read_csr_ne_stat = read_csr_ne_stat; + csr_ops->read_csr_nf_stat = read_csr_nf_stat; + csr_ops->read_csr_f_stat = read_csr_f_stat; + csr_ops->read_csr_c_stat = read_csr_c_stat; + csr_ops->read_csr_exp_stat = read_csr_exp_stat; + csr_ops->read_csr_exp_int_en = read_csr_exp_int_en; + csr_ops->write_csr_exp_int_en = write_csr_exp_int_en; + csr_ops->read_csr_ring_config = read_csr_ring_config; + csr_ops->write_csr_ring_config = write_csr_ring_config; + csr_ops->read_csr_ring_base = read_csr_ring_base; + csr_ops->write_csr_ring_base = write_csr_ring_base; + csr_ops->read_csr_int_en = read_csr_int_en; + csr_ops->write_csr_int_en = write_csr_int_en; + csr_ops->read_csr_int_flag = read_csr_int_flag; + csr_ops->write_csr_int_flag = write_csr_int_flag; + csr_ops->read_csr_int_srcsel = read_csr_int_srcsel; + csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; + csr_ops->write_csr_int_srcsel_w_val = write_csr_int_srcsel_w_val; + csr_ops->read_csr_int_col_en = read_csr_int_col_en; + csr_ops->write_csr_int_col_en = write_csr_int_col_en; + csr_ops->read_csr_int_col_ctl = read_csr_int_col_ctl; + csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; + csr_ops->read_csr_int_flag_and_col = read_csr_int_flag_and_col; + csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; + csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en; + csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; + csr_ops->get_int_col_ctl_enable_mask = get_int_col_ctl_enable_mask; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h new file mode 100644 index 0000000000..6f33e7c87c --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef ADF_GEN4_HW_CSR_DATA_H_ +#define ADF_GEN4_HW_CSR_DATA_H_ + +#include +#include "adf_accel_devices.h" + +#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL +#define ADF_RING_CSR_RING_CONFIG 0x1000 +#define ADF_RING_CSR_RING_LBASE 0x1040 +#define ADF_RING_CSR_RING_UBASE 0x1080 +#define ADF_RING_CSR_RING_HEAD 0x0C0 +#define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_STAT 0x140 +#define ADF_RING_CSR_UO_STAT 0x148 +#define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_NE_STAT 0x150 +#define ADF_RING_CSR_NF_STAT 0x154 +#define ADF_RING_CSR_F_STAT 0x158 +#define ADF_RING_CSR_C_STAT 0x15C +#define ADF_RING_CSR_INT_FLAG_EN 0x16C +#define ADF_RING_CSR_INT_FLAG 0x170 +#define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_COL_EN 0x17C +#define ADF_RING_CSR_INT_COL_CTL 0x180 +#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_EXP_STAT 0x188 +#define ADF_RING_CSR_EXP_INT_EN 0x18C +#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 +#define ADF_RING_CSR_ADDR_OFFSET 0x100000 +#define ADF_RING_BUNDLE_SIZE 0x2000 +#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C + +#define BUILD_RING_BASE_ADDR(addr, size) \ + ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6) +#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2)) +#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2)) +#define READ_CSR_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_STAT) +#define READ_CSR_UO_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_UO_STAT) +#define READ_CSR_E_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT) +#define READ_CSR_NE_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_NE_STAT) +#define READ_CSR_NF_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_NF_STAT) +#define READ_CSR_F_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_F_STAT) +#define READ_CSR_C_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_C_STAT) +#define READ_CSR_EXP_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_EXP_STAT) +#define READ_CSR_EXP_INT_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_EXP_INT_EN) +#define WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_EXP_INT_EN, value) +#define READ_CSR_RING_CONFIG(csr_base_addr, bank, ring) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_CONFIG + ((ring) << 2)) +#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) +#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ +do { \ + void __iomem *_csr_base_addr = csr_base_addr; \ + u32 _bank = bank; \ + u32 _ring = ring; \ + dma_addr_t _value = value; \ + u32 l_base = 0, u_base = 0; \ + l_base = lower_32_bits(_value); \ + u_base = upper_32_bits(_value); \ + ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (_bank) + \ + ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base); \ + ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (_bank) + \ + ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \ +} while (0) + +static inline u64 read_base(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + u32 l_base, u_base; + + /* + * Use special IO wrapper for ring base as LBASE and UBASE are + * not physically contigious + */ + l_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + + ADF_RING_CSR_RING_LBASE + (ring << 2)); + u_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + + ADF_RING_CSR_RING_UBASE + (ring << 2)); + + return (u64)u_base << 32 | (u64)l_base; +} + +#define READ_CSR_RING_BASE(csr_base_addr, bank, ring) \ + read_base((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, (bank), (ring)) + +#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) +#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) +#define READ_CSR_INT_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_FLAG_EN) +#define WRITE_CSR_INT_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG_EN, (value)) +#define READ_CSR_INT_FLAG(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_FLAG) +#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG, (value)) +#define READ_CSR_INT_SRCSEL(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_SRCSEL) +#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK) +#define WRITE_CSR_INT_SRCSEL_W_VAL(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_SRCSEL, (value)) +#define READ_CSR_INT_COL_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_COL_EN) +#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_COL_EN, (value)) +#define READ_CSR_INT_COL_CTL(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_COL_CTL) +#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_COL_CTL, \ + ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) +#define READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG_AND_COL) +#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG_AND_COL, (value)) + +#define READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_SRV_ARB_EN) +#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_SRV_ARB_EN, (value)) + +void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index d28e192194..41a0979e68 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2020 Intel Corporation */ #include +#include #include "adf_accel_devices.h" #include "adf_cfg_services.h" #include "adf_common_drv.h" @@ -8,103 +9,6 @@ #include "adf_gen4_hw_data.h" #include "adf_gen4_pm.h" -static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) -{ - return BUILD_RING_BASE_ADDR(addr, size); -} - -static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) -{ - return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); -} - -static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); -} - -static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) -{ - return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); -} - -static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); -} - -static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) -{ - return READ_CSR_E_STAT(csr_base_addr, bank); -} - -static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); -} - -static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, - dma_addr_t addr) -{ - WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); -} - -static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); -} - -static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) -{ - WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); -} - -static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value) -{ - WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); -} - -static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); -} - -static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); -} - -static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); -} - -void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) -{ - csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; - csr_ops->read_csr_ring_head = read_csr_ring_head; - csr_ops->write_csr_ring_head = write_csr_ring_head; - csr_ops->read_csr_ring_tail = read_csr_ring_tail; - csr_ops->write_csr_ring_tail = write_csr_ring_tail; - csr_ops->read_csr_e_stat = read_csr_e_stat; - csr_ops->write_csr_ring_config = write_csr_ring_config; - csr_ops->write_csr_ring_base = write_csr_ring_base; - csr_ops->write_csr_int_flag = write_csr_int_flag; - csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; - csr_ops->write_csr_int_col_en = write_csr_int_col_en; - csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; - csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; - csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; -} -EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops); - u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self) { return ADF_GEN4_ACCELERATORS_MASK; @@ -321,8 +225,7 @@ static int reset_ring_pair(void __iomem *csr, u32 bank_number) int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; - u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data); - void __iomem *csr; + void __iomem *csr = adf_get_etr_base(accel_dev); int ret; if (bank_number >= hw_data->num_banks) @@ -331,7 +234,6 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) dev_dbg(&GET_DEV(accel_dev), "ring pair reset for bank:%d\n", bank_number); - csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr; ret = reset_ring_pair(csr, bank_number); if (ret) dev_err(&GET_DEV(accel_dev), @@ -489,3 +391,281 @@ set_mask: return ring_to_svc_map; } EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map); + +/* + * adf_gen4_bank_quiesce_coal_timer() - quiesce bank coalesced interrupt timer + * @accel_dev: Pointer to the device structure + * @bank_idx: Offset to the bank within this device + * @timeout_ms: Timeout in milliseconds for the operation + * + * This function tries to quiesce the coalesced interrupt timer of a bank if + * it has been enabled and triggered. + * + * Returns 0 on success, error code otherwise + * + */ +int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev, + u32 bank_idx, int timeout_ms) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); + void __iomem *csr_misc = adf_get_pmisc_base(accel_dev); + void __iomem *csr_etr = adf_get_etr_base(accel_dev); + u32 int_col_ctl, int_col_mask, int_col_en; + u32 e_stat, intsrc; + u64 wait_us; + int ret; + + if (timeout_ms < 0) + return -EINVAL; + + int_col_ctl = csr_ops->read_csr_int_col_ctl(csr_etr, bank_idx); + int_col_mask = csr_ops->get_int_col_ctl_enable_mask(); + if (!(int_col_ctl & int_col_mask)) + return 0; + + int_col_en = csr_ops->read_csr_int_col_en(csr_etr, bank_idx); + int_col_en &= BIT(ADF_WQM_CSR_RP_IDX_RX); + + e_stat = csr_ops->read_csr_e_stat(csr_etr, bank_idx); + if (!(~e_stat & int_col_en)) + return 0; + + wait_us = 2 * ((int_col_ctl & ~int_col_mask) << 8) * USEC_PER_SEC; + do_div(wait_us, hw_data->clock_frequency); + wait_us = min(wait_us, (u64)timeout_ms * USEC_PER_MSEC); + dev_dbg(&GET_DEV(accel_dev), + "wait for bank %d - coalesced timer expires in %llu us (max=%u ms estat=0x%x intcolen=0x%x)\n", + bank_idx, wait_us, timeout_ms, e_stat, int_col_en); + + ret = read_poll_timeout(ADF_CSR_RD, intsrc, intsrc, + ADF_COALESCED_POLL_DELAY_US, wait_us, true, + csr_misc, ADF_WQM_CSR_RPINTSOU(bank_idx)); + if (ret) + dev_warn(&GET_DEV(accel_dev), + "coalesced timer for bank %d expired (%llu us)\n", + bank_idx, wait_us); + + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen4_bank_quiesce_coal_timer); + +static int drain_bank(void __iomem *csr, u32 bank_number, int timeout_us) +{ + u32 status; + + ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number), + ADF_WQM_CSR_RPRESETCTL_DRAIN); + + return read_poll_timeout(ADF_CSR_RD, status, + status & ADF_WQM_CSR_RPRESETSTS_STATUS, + ADF_RPRESET_POLL_DELAY_US, timeout_us, true, + csr, ADF_WQM_CSR_RPRESETSTS(bank_number)); +} + +void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev, + u32 bank_number) +{ + void __iomem *csr = adf_get_etr_base(accel_dev); + + ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number), + ADF_WQM_CSR_RPRESETSTS_STATUS); +} + +int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev, + u32 bank_number, int timeout_us) +{ + void __iomem *csr = adf_get_etr_base(accel_dev); + int ret; + + dev_dbg(&GET_DEV(accel_dev), "Drain bank %d\n", bank_number); + + ret = drain_bank(csr, bank_number, timeout_us); + if (ret) + dev_err(&GET_DEV(accel_dev), "Bank drain failed (timeout)\n"); + else + dev_dbg(&GET_DEV(accel_dev), "Bank drain successful\n"); + + return ret; +} + +static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base, + u32 bank, struct bank_state *state, u32 num_rings) +{ + u32 i; + + state->ringstat0 = ops->read_csr_stat(base, bank); + state->ringuostat = ops->read_csr_uo_stat(base, bank); + state->ringestat = ops->read_csr_e_stat(base, bank); + state->ringnestat = ops->read_csr_ne_stat(base, bank); + state->ringnfstat = ops->read_csr_nf_stat(base, bank); + state->ringfstat = ops->read_csr_f_stat(base, bank); + state->ringcstat0 = ops->read_csr_c_stat(base, bank); + state->iaintflagen = ops->read_csr_int_en(base, bank); + state->iaintflagreg = ops->read_csr_int_flag(base, bank); + state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank); + state->iaintcolen = ops->read_csr_int_col_en(base, bank); + state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank); + state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank); + state->ringexpstat = ops->read_csr_exp_stat(base, bank); + state->ringexpintenable = ops->read_csr_exp_int_en(base, bank); + state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank); + + for (i = 0; i < num_rings; i++) { + state->rings[i].head = ops->read_csr_ring_head(base, bank, i); + state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i); + state->rings[i].config = ops->read_csr_ring_config(base, bank, i); + state->rings[i].base = ops->read_csr_ring_base(base, bank, i); + } +} + +#define CHECK_STAT(op, expect_val, name, args...) \ +({ \ + u32 __expect_val = (expect_val); \ + u32 actual_val = op(args); \ + (__expect_val == actual_val) ? 0 : \ + (pr_err("QAT: Fail to restore %s register. Expected 0x%x, actual 0x%x\n", \ + name, __expect_val, actual_val), -EINVAL); \ +}) + +static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base, + u32 bank, struct bank_state *state, u32 num_rings, + int tx_rx_gap) +{ + u32 val, tmp_val, i; + int ret; + + for (i = 0; i < num_rings; i++) + ops->write_csr_ring_base(base, bank, i, state->rings[i].base); + + for (i = 0; i < num_rings; i++) + ops->write_csr_ring_config(base, bank, i, state->rings[i].config); + + for (i = 0; i < num_rings / 2; i++) { + int tx = i * (tx_rx_gap + 1); + int rx = tx + tx_rx_gap; + + ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head); + ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail); + + /* + * The TX ring head needs to be updated again to make sure that + * the HW will not consider the ring as full when it is empty + * and the correct state flags are set to match the recovered state. + */ + if (state->ringestat & BIT(tx)) { + val = ops->read_csr_int_srcsel(base, bank); + val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK; + ops->write_csr_int_srcsel_w_val(base, bank, val); + ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head); + } + + ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail); + val = ops->read_csr_int_srcsel(base, bank); + val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH; + ops->write_csr_int_srcsel_w_val(base, bank, val); + + ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head); + val = ops->read_csr_int_srcsel(base, bank); + val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH; + ops->write_csr_int_srcsel_w_val(base, bank, val); + + /* + * The RX ring tail needs to be updated again to make sure that + * the HW will not consider the ring as empty when it is full + * and the correct state flags are set to match the recovered state. + */ + if (state->ringfstat & BIT(rx)) + ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail); + } + + ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen); + ops->write_csr_int_en(base, bank, state->iaintflagen); + ops->write_csr_int_col_en(base, bank, state->iaintcolen); + ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0); + ops->write_csr_exp_int_en(base, bank, state->ringexpintenable); + ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl); + ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben); + + /* Check that all ring statuses match the saved state. */ + ret = CHECK_STAT(ops->read_csr_stat, state->ringstat0, "ringstat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_e_stat, state->ringestat, "ringestat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_ne_stat, state->ringnestat, "ringnestat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_f_stat, state->ringfstat, "ringfstat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_c_stat, state->ringcstat0, "ringcstat", + base, bank); + if (ret) + return ret; + + tmp_val = ops->read_csr_exp_stat(base, bank); + val = state->ringexpstat; + if (tmp_val && !val) { + pr_err("QAT: Bank was restored with exception: 0x%x\n", val); + return -EINVAL; + } + + return 0; +} + +int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); + void __iomem *csr_base = adf_get_etr_base(accel_dev); + + if (bank_number >= hw_data->num_banks || !state) + return -EINVAL; + + dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number); + + bank_state_save(csr_ops, csr_base, bank_number, state, + hw_data->num_rings_per_bank); + + return 0; +} +EXPORT_SYMBOL_GPL(adf_gen4_bank_state_save); + +int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); + void __iomem *csr_base = adf_get_etr_base(accel_dev); + int ret; + + if (bank_number >= hw_data->num_banks || !state) + return -EINVAL; + + dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number); + + ret = bank_state_restore(csr_ops, csr_base, bank_number, state, + hw_data->num_rings_per_bank, hw_data->tx_rx_gap); + if (ret) + dev_err(&GET_DEV(accel_dev), + "Unable to restore state of bank %d\n", bank_number); + + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index c6e80df5a8..8b10926ced 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ /* Copyright(c) 2020 Intel Corporation */ -#ifndef ADF_GEN4_HW_CSR_DATA_H_ -#define ADF_GEN4_HW_CSR_DATA_H_ +#ifndef ADF_GEN4_HW_DATA_H_ +#define ADF_GEN4_HW_DATA_H_ #include @@ -54,95 +54,6 @@ #define ADF_GEN4_ADMINMSGLR_OFFSET 0x500578 #define ADF_GEN4_MAILBOX_BASE_OFFSET 0x600970 -/* Transport access */ -#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL -#define ADF_RING_CSR_RING_CONFIG 0x1000 -#define ADF_RING_CSR_RING_LBASE 0x1040 -#define ADF_RING_CSR_RING_UBASE 0x1080 -#define ADF_RING_CSR_RING_HEAD 0x0C0 -#define ADF_RING_CSR_RING_TAIL 0x100 -#define ADF_RING_CSR_E_STAT 0x14C -#define ADF_RING_CSR_INT_FLAG 0x170 -#define ADF_RING_CSR_INT_SRCSEL 0x174 -#define ADF_RING_CSR_INT_COL_CTL 0x180 -#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 -#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 -#define ADF_RING_CSR_INT_COL_EN 0x17C -#define ADF_RING_CSR_ADDR_OFFSET 0x100000 -#define ADF_RING_BUNDLE_SIZE 0x2000 - -#define BUILD_RING_BASE_ADDR(addr, size) \ - ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6) -#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ - ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2)) -#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ - ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2)) -#define READ_CSR_E_STAT(csr_base_addr, bank) \ - ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT) -#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) -#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ -do { \ - void __iomem *_csr_base_addr = csr_base_addr; \ - u32 _bank = bank; \ - u32 _ring = ring; \ - dma_addr_t _value = value; \ - u32 l_base = 0, u_base = 0; \ - l_base = lower_32_bits(_value); \ - u_base = upper_32_bits(_value); \ - ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (_bank) + \ - ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base); \ - ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (_bank) + \ - ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \ -} while (0) - -#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) -#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) -#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_FLAG, (value)) -#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK) -#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_COL_EN, (value)) -#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_COL_CTL, \ - ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) -#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_FLAG_AND_COL, (value)) - -/* Arbiter configuration */ -#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C - -#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_SRV_ARB_EN, (value)) - /* Default ring mapping */ #define ADF_GEN4_DEFAULT_RING_TO_SRV_MAP \ (ASYM << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \ @@ -166,10 +77,20 @@ do { \ #define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC) #define ADF_RPRESET_POLL_DELAY_US 20 #define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0) +#define ADF_WQM_CSR_RPRESETCTL_DRAIN BIT(2) #define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + ((bank) << 3)) #define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0) #define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4) +/* Ring interrupt */ +#define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2) +#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0) +#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4 +#define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC) +#define ADF_COALESCED_POLL_DELAY_US 1000 +#define ADF_WQM_CSR_RPINTSOU(bank) (0x200000 + ((bank) << 12)) +#define ADF_WQM_CSR_RP_IDX_RX 1 + /* Error source registers */ #define ADF_GEN4_ERRSOU0 (0x41A200) #define ADF_GEN4_ERRSOU1 (0x41A204) @@ -197,6 +118,19 @@ do { \ /* Arbiter threads mask with error value */ #define ADF_GEN4_ENA_THD_MASK_ERROR GENMASK(ADF_NUM_THREADS_PER_AE, 0) +/* PF2VM communication channel */ +#define ADF_GEN4_PF2VM_OFFSET(i) (0x40B010 + (i) * 0x20) +#define ADF_GEN4_VM2PF_OFFSET(i) (0x40B014 + (i) * 0x20) +#define ADF_GEN4_VINTMSKPF2VM_OFFSET(i) (0x40B00C + (i) * 0x20) +#define ADF_GEN4_VINTSOUPF2VM_OFFSET(i) (0x40B008 + (i) * 0x20) +#define ADF_GEN4_VINTMSK_OFFSET(i) (0x40B004 + (i) * 0x20) +#define ADF_GEN4_VINTSOU_OFFSET(i) (0x40B000 + (i) * 0x20) + +struct adf_gen4_vfmig { + struct adf_mstate_mgr *mstate_mgr; + bool bank_stopped[ADF_GEN4_NUM_BANKS_PER_VF]; +}; + void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); enum icp_qat_gen4_slice_mask { @@ -230,11 +164,20 @@ u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self); enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self); u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self); int adf_gen4_init_device(struct adf_accel_dev *accel_dev); -void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev); void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev); u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev); +int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev, + u32 bank_idx, int timeout_ms); +int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev, + u32 bank_number, int timeout_us); +void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev, + u32 bank_number); +int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state); +int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, + u32 bank_number, struct bank_state *state); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c index 8e8efe93f3..21474d402d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c @@ -6,12 +6,10 @@ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_gen4_pfvf.h" +#include "adf_gen4_hw_data.h" #include "adf_pfvf_pf_proto.h" #include "adf_pfvf_utils.h" -#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20)) -#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20)) - /* VF2PF interrupt source registers */ #define ADF_4XXX_VM2PF_SOU 0x41A180 #define ADF_4XXX_VM2PF_MSK 0x41A1C0 @@ -29,12 +27,12 @@ static const struct pfvf_csr_format csr_gen4_fmt = { static u32 adf_gen4_pf_get_pf2vf_offset(u32 i) { - return ADF_4XXX_PF2VM_OFFSET(i); + return ADF_GEN4_PF2VM_OFFSET(i); } static u32 adf_gen4_pf_get_vf2pf_offset(u32 i) { - return ADF_4XXX_VM2PF_OFFSET(i); + return ADF_GEN4_VM2PF_OFFSET(i); } static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c new file mode 100644 index 0000000000..a62eb5e8db --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c @@ -0,0 +1,1010 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include +#include +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_gen4_hw_data.h" +#include "adf_gen4_pfvf.h" +#include "adf_pfvf_utils.h" +#include "adf_mstate_mgr.h" +#include "adf_gen4_vf_mig.h" + +#define ADF_GEN4_VF_MSTATE_SIZE 4096 +#define ADF_GEN4_PFVF_RSP_TIMEOUT_US 5000 + +static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev); +static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len); + +static int adf_gen4_vfmig_init_device(struct qat_mig_dev *mdev) +{ + u8 *state; + + state = kmalloc(ADF_GEN4_VF_MSTATE_SIZE, GFP_KERNEL); + if (!state) + return -ENOMEM; + + mdev->state = state; + mdev->state_size = ADF_GEN4_VF_MSTATE_SIZE; + mdev->setup_size = 0; + mdev->remote_setup_size = 0; + + return 0; +} + +static void adf_gen4_vfmig_cleanup_device(struct qat_mig_dev *mdev) +{ + kfree(mdev->state); + mdev->state = NULL; +} + +static void adf_gen4_vfmig_reset_device(struct qat_mig_dev *mdev) +{ + mdev->setup_size = 0; + mdev->remote_setup_size = 0; +} + +static int adf_gen4_vfmig_open_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + + vf_info = &accel_dev->pf.vf_info[mdev->vf_id]; + + vfmig = kzalloc(sizeof(*vfmig), GFP_KERNEL); + if (!vfmig) + return -ENOMEM; + + vfmig->mstate_mgr = adf_mstate_mgr_new(mdev->state, mdev->state_size); + if (!vfmig->mstate_mgr) { + kfree(vfmig); + return -ENOMEM; + } + vf_info->mig_priv = vfmig; + mdev->setup_size = 0; + mdev->remote_setup_size = 0; + + return 0; +} + +static void adf_gen4_vfmig_close_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + + vf_info = &accel_dev->pf.vf_info[mdev->vf_id]; + if (vf_info->mig_priv) { + vfmig = vf_info->mig_priv; + adf_mstate_mgr_destroy(vfmig->mstate_mgr); + kfree(vfmig); + vf_info->mig_priv = NULL; + } +} + +static int adf_gen4_vfmig_suspend_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vf_mig; + u32 vf_nr = mdev->vf_id; + int ret, i; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vf_mig = vf_info->mig_priv; + + /* Stop all inflight jobs */ + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf; + + ret = adf_gen4_bank_drain_start(accel_dev, pf_bank_nr, + ADF_RPRESET_POLL_TIMEOUT_US); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to drain bank %d for vf_nr %d\n", i, + vf_nr); + return ret; + } + vf_mig->bank_stopped[i] = true; + + adf_gen4_bank_quiesce_coal_timer(accel_dev, pf_bank_nr, + ADF_COALESCED_POLL_TIMEOUT_US); + } + + return 0; +} + +static int adf_gen4_vfmig_resume_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vf_mig; + u32 vf_nr = mdev->vf_id; + int i; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vf_mig = vf_info->mig_priv; + + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf; + + if (vf_mig->bank_stopped[i]) { + adf_gen4_bank_drain_finish(accel_dev, pf_bank_nr); + vf_mig->bank_stopped[i] = false; + } + } + + return 0; +} + +struct adf_vf_bank_info { + struct adf_accel_dev *accel_dev; + u32 vf_nr; + u32 bank_nr; +}; + +struct mig_user_sla { + enum adf_base_services srv; + u64 rp_mask; + u32 cir; + u32 pir; +}; + +static int adf_mstate_sla_check(struct adf_mstate_mgr *sub_mgr, u8 *src_buf, + u32 src_size, void *opaque) +{ + struct adf_mstate_vreginfo _sinfo = { src_buf, src_size }; + struct adf_mstate_vreginfo *sinfo = &_sinfo, *dinfo = opaque; + u32 src_sla_cnt = sinfo->size / sizeof(struct mig_user_sla); + u32 dst_sla_cnt = dinfo->size / sizeof(struct mig_user_sla); + struct mig_user_sla *src_slas = sinfo->addr; + struct mig_user_sla *dst_slas = dinfo->addr; + int i, j; + + for (i = 0; i < src_sla_cnt; i++) { + for (j = 0; j < dst_sla_cnt; j++) { + if (src_slas[i].srv != dst_slas[j].srv || + src_slas[i].rp_mask != dst_slas[j].rp_mask) + continue; + + if (src_slas[i].cir > dst_slas[j].cir || + src_slas[i].pir > dst_slas[j].pir) { + pr_err("QAT: DST VF rate limiting mismatch.\n"); + return -EINVAL; + } + break; + } + + if (j == dst_sla_cnt) { + pr_err("QAT: SRC VF rate limiting mismatch - SRC srv %d and rp_mask 0x%llx.\n", + src_slas[i].srv, src_slas[i].rp_mask); + return -EINVAL; + } + } + + return 0; +} + +static inline int adf_mstate_check_cap_size(u32 src_sz, u32 dst_sz, u32 max_sz) +{ + if (src_sz > max_sz || dst_sz > max_sz) + return -EINVAL; + else + return 0; +} + +static int adf_mstate_compatver_check(struct adf_mstate_mgr *sub_mgr, + u8 *src_buf, u32 src_sz, void *opaque) +{ + struct adf_mstate_vreginfo *info = opaque; + u8 compat = 0; + u8 *pcompat; + + if (src_sz != info->size) { + pr_debug("QAT: State mismatch (compat version size), current %u, expected %u\n", + src_sz, info->size); + return -EINVAL; + } + + memcpy(info->addr, src_buf, info->size); + pcompat = info->addr; + if (*pcompat == 0) { + pr_warn("QAT: Unable to determine the version of VF\n"); + return 0; + } + + compat = adf_vf_compat_checker(*pcompat); + if (compat == ADF_PF2VF_VF_INCOMPATIBLE) { + pr_debug("QAT: SRC VF driver (ver=%u) is incompatible with DST PF driver (ver=%u)\n", + *pcompat, ADF_PFVF_COMPAT_THIS_VERSION); + return -EINVAL; + } + + if (compat == ADF_PF2VF_VF_COMPAT_UNKNOWN) + pr_debug("QAT: SRC VF driver (ver=%u) is newer than DST PF driver (ver=%u)\n", + *pcompat, ADF_PFVF_COMPAT_THIS_VERSION); + + return 0; +} + +/* + * adf_mstate_capmask_compare() - compare QAT device capability mask + * @sinfo: Pointer to source capability info + * @dinfo: Pointer to target capability info + * + * This function compares the capability mask between source VF and target VF + * + * Returns: 0 if target capability mask is identical to source capability mask, + * 1 if target mask can represent all the capabilities represented by source mask, + * -1 if target mask can't represent all the capabilities represented by source + * mask. + */ +static int adf_mstate_capmask_compare(struct adf_mstate_vreginfo *sinfo, + struct adf_mstate_vreginfo *dinfo) +{ + u64 src = 0, dst = 0; + + if (adf_mstate_check_cap_size(sinfo->size, dinfo->size, sizeof(u64))) { + pr_debug("QAT: Unexpected capability size %u %u %zu\n", + sinfo->size, dinfo->size, sizeof(u64)); + return -1; + } + + memcpy(&src, sinfo->addr, sinfo->size); + memcpy(&dst, dinfo->addr, dinfo->size); + + pr_debug("QAT: Check cap compatibility of cap %llu %llu\n", src, dst); + + if (src == dst) + return 0; + + if ((src | dst) == dst) + return 1; + + return -1; +} + +static int adf_mstate_capmask_superset(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa) +{ + struct adf_mstate_vreginfo sinfo = { buf, size }; + + if (adf_mstate_capmask_compare(&sinfo, opa) >= 0) + return 0; + + return -EINVAL; +} + +static int adf_mstate_capmask_equal(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa) +{ + struct adf_mstate_vreginfo sinfo = { buf, size }; + + if (adf_mstate_capmask_compare(&sinfo, opa) == 0) + return 0; + + return -EINVAL; +} + +static int adf_mstate_set_vreg(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa) +{ + struct adf_mstate_vreginfo *info = opa; + + if (size != info->size) { + pr_debug("QAT: Unexpected cap size %u %u\n", size, info->size); + return -EINVAL; + } + memcpy(info->addr, buf, info->size); + + return 0; +} + +static u32 adf_gen4_vfmig_get_slas(struct adf_accel_dev *accel_dev, u32 vf_nr, + struct mig_user_sla *pmig_slas) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla **sla_type_arr = NULL; + u64 rp_mask, rp_index; + u32 max_num_sla; + u32 sla_cnt = 0; + int i, j; + + if (!accel_dev->rate_limiting) + return 0; + + rp_index = vf_nr * hw_data->num_banks_per_vf; + max_num_sla = adf_rl_get_sla_arr_of_type(rl_data, RL_LEAF, &sla_type_arr); + + for (i = 0; i < max_num_sla; i++) { + if (!sla_type_arr[i]) + continue; + + rp_mask = 0; + for (j = 0; j < sla_type_arr[i]->ring_pairs_cnt; j++) + rp_mask |= BIT(sla_type_arr[i]->ring_pairs_ids[j]); + + if (rp_mask & GENMASK_ULL(rp_index + 3, rp_index)) { + pmig_slas->rp_mask = rp_mask; + pmig_slas->cir = sla_type_arr[i]->cir; + pmig_slas->pir = sla_type_arr[i]->pir; + pmig_slas->srv = sla_type_arr[i]->srv; + pmig_slas++; + sla_cnt++; + } + } + + return sla_cnt; +} + +static int adf_gen4_vfmig_load_etr_regs(struct adf_mstate_mgr *sub_mgr, + u8 *state, u32 size, void *opa) +{ + struct adf_vf_bank_info *vf_bank_info = opa; + struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 pf_bank_nr; + int ret; + + pf_bank_nr = vf_bank_info->bank_nr + vf_bank_info->vf_nr * hw_data->num_banks_per_vf; + ret = hw_data->bank_state_restore(accel_dev, pf_bank_nr, + (struct bank_state *)state); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load regs for vf%d bank%d\n", + vf_bank_info->vf_nr, vf_bank_info->bank_nr); + return ret; + } + + return 0; +} + +static int adf_gen4_vfmig_load_etr_bank(struct adf_accel_dev *accel_dev, + u32 vf_nr, u32 bank_nr, + struct adf_mstate_mgr *mstate_mgr) +{ + struct adf_vf_bank_info vf_bank_info = {accel_dev, vf_nr, bank_nr}; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + char bank_ids[ADF_MSTATE_ID_LEN]; + + snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr); + subsec = adf_mstate_sect_lookup(mstate_mgr, bank_ids, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to lookup sec %s for vf%d bank%d\n", + ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS, + adf_gen4_vfmig_load_etr_regs, + &vf_bank_info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to add sec %s for vf%d bank%d\n", + ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr); + return -EINVAL; + } + + return 0; +} + +static int adf_gen4_vfmig_load_etr(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec; + int ret, i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL, + NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_ETRB_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + ret = adf_gen4_vfmig_load_etr_bank(accel_dev, vf_nr, i, + &sub_sects_mgr); + if (ret) + return ret; + } + + return 0; +} + +static int adf_gen4_vfmig_load_misc(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + void __iomem *csr = adf_get_pmisc_base(accel_dev); + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + struct { + char *id; + u64 ofs; + } misc_states[] = { + {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)}, + }; + int i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL, + NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_MISCB_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < ARRAY_SIZE(misc_states); i++) { + struct adf_mstate_vreginfo info; + u32 regv; + + info.addr = ®v; + info.size = sizeof(regv); + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, + misc_states[i].id, + adf_mstate_set_vreg, + &info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to load sec %s\n", misc_states[i].id); + return -EINVAL; + } + ADF_CSR_WR(csr, misc_states[i].ofs, regv); + } + + return 0; +} + +static int adf_gen4_vfmig_load_generic(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct mig_user_sla dst_slas[RL_RP_CNT_PER_LEAF_MAX] = { }; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + u32 dst_sla_cnt; + struct { + char *id; + int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa); + struct adf_mstate_vreginfo info; + } gen_states[] = { + {ADF_MSTATE_IOV_INIT_IDS, adf_mstate_set_vreg, + {&vf_info->init, sizeof(vf_info->init)}}, + {ADF_MSTATE_COMPAT_VER_IDS, adf_mstate_compatver_check, + {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}}, + {ADF_MSTATE_SLA_IDS, adf_mstate_sla_check, {dst_slas, 0}}, + }; + int i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_GEN_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < ARRAY_SIZE(gen_states); i++) { + if (gen_states[i].info.addr == dst_slas) { + dst_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, dst_slas); + gen_states[i].info.size = dst_sla_cnt * sizeof(struct mig_user_sla); + } + + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, + gen_states[i].id, + gen_states[i].action, + &gen_states[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + gen_states[i].id); + return -EINVAL; + } + } + + return 0; +} + +static int adf_gen4_vfmig_load_config(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + struct { + char *id; + int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa); + struct adf_mstate_vreginfo info; + } setups[] = { + {ADF_MSTATE_GEN_CAP_IDS, adf_mstate_capmask_superset, + {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}}, + {ADF_MSTATE_GEN_SVCMAP_IDS, adf_mstate_capmask_equal, + {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}}, + {ADF_MSTATE_GEN_EXTDC_IDS, adf_mstate_capmask_superset, + {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}}, + }; + int i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_CONFIG_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < ARRAY_SIZE(setups); i++) { + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, setups[i].id, + setups[i].action, &setups[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + setups[i].id); + return -EINVAL; + } + } + + return 0; +} + +static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state, + u32 size, void *opa) +{ + struct adf_vf_bank_info *vf_bank_info = opa; + struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 pf_bank_nr; + int ret; + + pf_bank_nr = vf_bank_info->bank_nr; + pf_bank_nr += vf_bank_info->vf_nr * hw_data->num_banks_per_vf; + + ret = hw_data->bank_state_save(accel_dev, pf_bank_nr, + (struct bank_state *)state); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save regs for vf%d bank%d\n", + vf_bank_info->vf_nr, vf_bank_info->bank_nr); + return ret; + } + + return sizeof(struct bank_state); +} + +static int adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev *accel_dev, + u32 vf_nr, u32 bank_nr, + struct adf_mstate_mgr *mstate_mgr) +{ + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_vf_bank_info vf_bank_info; + struct adf_mstate_mgr sub_sects_mgr; + char bank_ids[ADF_MSTATE_ID_LEN]; + + snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr); + + subsec = adf_mstate_sect_add(mstate_mgr, bank_ids, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to add sec %s for vf%d bank%d\n", + ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + vf_bank_info.accel_dev = accel_dev; + vf_bank_info.vf_nr = vf_nr; + vf_bank_info.bank_nr = bank_nr; + l2_subsec = adf_mstate_sect_add(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS, + adf_gen4_vfmig_save_etr_regs, + &vf_bank_info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to add sec %s for vf%d bank%d\n", + ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr); + return -EINVAL; + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_etr(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec; + int ret, i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_ETRB_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + ret = adf_gen4_vfmig_save_etr_bank(accel_dev, vf_nr, i, + &sub_sects_mgr); + if (ret) + return ret; + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_misc(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + void __iomem *csr = adf_get_pmisc_base(accel_dev); + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + struct { + char *id; + u64 offset; + } misc_states[] = { + {ADF_MSTATE_VINTSRC_IDS, ADF_GEN4_VINTSOU_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTSRC_PF2VM_IDS, ADF_GEN4_VINTSOUPF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)}, + }; + ktime_t time_exp; + int i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_MISCB_IDS); + return -EINVAL; + } + + time_exp = ktime_add_us(ktime_get(), ADF_GEN4_PFVF_RSP_TIMEOUT_US); + while (!mutex_trylock(&vf_info->pfvf_mig_lock)) { + if (ktime_after(ktime_get(), time_exp)) { + dev_err(&GET_DEV(accel_dev), "Failed to get pfvf mig lock\n"); + return -ETIMEDOUT; + } + usleep_range(500, 1000); + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < ARRAY_SIZE(misc_states); i++) { + struct adf_mstate_vreginfo info; + u32 regv; + + info.addr = ®v; + info.size = sizeof(regv); + regv = ADF_CSR_RD(csr, misc_states[i].offset); + + l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, + misc_states[i].id, + &info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + misc_states[i].id); + mutex_unlock(&vf_info->pfvf_mig_lock); + return -EINVAL; + } + } + + mutex_unlock(&vf_info->pfvf_mig_lock); + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_generic(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct mig_user_sla src_slas[RL_RP_CNT_PER_LEAF_MAX] = { }; + u32 src_sla_cnt; + struct { + char *id; + struct adf_mstate_vreginfo info; + } gen_states[] = { + {ADF_MSTATE_IOV_INIT_IDS, + {&vf_info->init, sizeof(vf_info->init)}}, + {ADF_MSTATE_COMPAT_VER_IDS, + {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}}, + {ADF_MSTATE_SLA_IDS, {src_slas, 0}}, + }; + int i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_GEN_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < ARRAY_SIZE(gen_states); i++) { + if (gen_states[i].info.addr == src_slas) { + src_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, src_slas); + gen_states[i].info.size = src_sla_cnt * sizeof(struct mig_user_sla); + } + + l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, + gen_states[i].id, + &gen_states[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + gen_states[i].id); + return -EINVAL; + } + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_config(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct { + char *id; + struct adf_mstate_vreginfo info; + } setups[] = { + {ADF_MSTATE_GEN_CAP_IDS, + {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}}, + {ADF_MSTATE_GEN_SVCMAP_IDS, + {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}}, + {ADF_MSTATE_GEN_EXTDC_IDS, + {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}}, + }; + int i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_CONFIG_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < ARRAY_SIZE(setups); i++) { + l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, setups[i].id, + &setups[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + setups[i].id); + return -EINVAL; + } + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + ret = adf_gen4_vfmig_save_setup(mdev); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save setup for vf_nr %d\n", vf_nr); + return ret; + } + + adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state + mdev->setup_size, + mdev->state_size - mdev->setup_size); + if (!adf_mstate_preamble_add(vfmig->mstate_mgr)) + return -EINVAL; + + ret = adf_gen4_vfmig_save_generic(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save generic state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_save_misc(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save misc bar state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_save_etr(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save etr bar state for vf_nr %d\n", vf_nr); + return ret; + } + + adf_mstate_preamble_update(vfmig->mstate_mgr); + + return 0; +} + +static int adf_gen4_vfmig_load_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + ret = adf_gen4_vfmig_load_setup(mdev, mdev->state_size); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Failed to load setup for vf_nr %d\n", + vf_nr); + return ret; + } + + ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr, + mdev->state + mdev->remote_setup_size, + mdev->state_size - mdev->remote_setup_size, + NULL, NULL); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Invalid state for vf_nr %d\n", + vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_load_generic(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load general state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_load_misc(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load misc bar state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_load_etr(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load etr bar state for vf_nr %d\n", vf_nr); + return ret; + } + + return 0; +} + +static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + if (mdev->setup_size) + return 0; + + adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size); + if (!adf_mstate_preamble_add(vfmig->mstate_mgr)) + return -EINVAL; + + ret = adf_gen4_vfmig_save_config(accel_dev, mdev->vf_id); + if (ret) + return ret; + + adf_mstate_preamble_update(vfmig->mstate_mgr); + mdev->setup_size = adf_mstate_state_size(vfmig->mstate_mgr); + + return 0; +} + +static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + u32 setup_size; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + if (mdev->remote_setup_size) + return 0; + + if (len < sizeof(struct adf_mstate_preh)) + return -EAGAIN; + + adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size); + setup_size = adf_mstate_state_size_from_remote(vfmig->mstate_mgr); + if (setup_size > mdev->state_size) + return -EINVAL; + + if (len < setup_size) + return -EAGAIN; + + ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr, mdev->state, + setup_size, NULL, NULL); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Invalid setup for vf_nr %d\n", + vf_nr); + return ret; + } + + mdev->remote_setup_size = setup_size; + + ret = adf_gen4_vfmig_load_config(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load config for vf_nr %d\n", vf_nr); + return ret; + } + + return 0; +} + +void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops) +{ + vfmig_ops->init = adf_gen4_vfmig_init_device; + vfmig_ops->cleanup = adf_gen4_vfmig_cleanup_device; + vfmig_ops->reset = adf_gen4_vfmig_reset_device; + vfmig_ops->open = adf_gen4_vfmig_open_device; + vfmig_ops->close = adf_gen4_vfmig_close_device; + vfmig_ops->suspend = adf_gen4_vfmig_suspend_device; + vfmig_ops->resume = adf_gen4_vfmig_resume_device; + vfmig_ops->save_state = adf_gen4_vfmig_save_state; + vfmig_ops->load_state = adf_gen4_vfmig_load_state; + vfmig_ops->load_setup = adf_gen4_vfmig_load_setup; + vfmig_ops->save_setup = adf_gen4_vfmig_save_setup; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_vf_mig_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h new file mode 100644 index 0000000000..72216d078e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef ADF_GEN4_VF_MIG_H_ +#define ADF_GEN4_VF_MIG_H_ + +#include "adf_accel_devices.h" + +void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c new file mode 100644 index 0000000000..41cc763a74 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ + +#include +#include +#include "adf_mstate_mgr.h" + +#define ADF_MSTATE_MAGIC 0xADF5CAEA +#define ADF_MSTATE_VERSION 0x1 + +struct adf_mstate_sect_h { + u8 id[ADF_MSTATE_ID_LEN]; + u32 size; + u32 sub_sects; + u8 state[]; +}; + +u32 adf_mstate_state_size(struct adf_mstate_mgr *mgr) +{ + return mgr->state - mgr->buf; +} + +static inline u32 adf_mstate_avail_room(struct adf_mstate_mgr *mgr) +{ + return mgr->buf + mgr->size - mgr->state; +} + +void adf_mstate_mgr_init(struct adf_mstate_mgr *mgr, u8 *buf, u32 size) +{ + mgr->buf = buf; + mgr->state = buf; + mgr->size = size; + mgr->n_sects = 0; +}; + +struct adf_mstate_mgr *adf_mstate_mgr_new(u8 *buf, u32 size) +{ + struct adf_mstate_mgr *mgr; + + mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + if (!mgr) + return NULL; + + adf_mstate_mgr_init(mgr, buf, size); + + return mgr; +} + +void adf_mstate_mgr_destroy(struct adf_mstate_mgr *mgr) +{ + kfree(mgr); +} + +void adf_mstate_mgr_init_from_parent(struct adf_mstate_mgr *mgr, + struct adf_mstate_mgr *p_mgr) +{ + adf_mstate_mgr_init(mgr, p_mgr->state, + p_mgr->size - adf_mstate_state_size(p_mgr)); +} + +void adf_mstate_mgr_init_from_psect(struct adf_mstate_mgr *mgr, + struct adf_mstate_sect_h *p_sect) +{ + adf_mstate_mgr_init(mgr, p_sect->state, p_sect->size); + mgr->n_sects = p_sect->sub_sects; +} + +static void adf_mstate_preamble_init(struct adf_mstate_preh *preamble) +{ + preamble->magic = ADF_MSTATE_MAGIC; + preamble->version = ADF_MSTATE_VERSION; + preamble->preh_len = sizeof(*preamble); + preamble->size = 0; + preamble->n_sects = 0; +} + +/* default preambles checker */ +static int adf_mstate_preamble_def_checker(struct adf_mstate_preh *preamble, + void *opaque) +{ + struct adf_mstate_mgr *mgr = opaque; + + if (preamble->magic != ADF_MSTATE_MAGIC || + preamble->version > ADF_MSTATE_VERSION || + preamble->preh_len > mgr->size) { + pr_debug("QAT: LM - Invalid state (magic=%#x, version=%#x, hlen=%u), state_size=%u\n", + preamble->magic, preamble->version, preamble->preh_len, + mgr->size); + return -EINVAL; + } + + return 0; +} + +struct adf_mstate_preh *adf_mstate_preamble_add(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_preh *pre = (struct adf_mstate_preh *)mgr->buf; + + if (adf_mstate_avail_room(mgr) < sizeof(*pre)) { + pr_err("QAT: LM - Not enough space for preamble\n"); + return NULL; + } + + adf_mstate_preamble_init(pre); + mgr->state += pre->preh_len; + + return pre; +} + +int adf_mstate_preamble_update(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_preh *preamble = (struct adf_mstate_preh *)mgr->buf; + + preamble->size = adf_mstate_state_size(mgr) - preamble->preh_len; + preamble->n_sects = mgr->n_sects; + + return 0; +} + +static void adf_mstate_dump_sect(struct adf_mstate_sect_h *sect, + const char *prefix) +{ + pr_debug("QAT: LM - %s QAT state section %s\n", prefix, sect->id); + print_hex_dump_debug("h-", DUMP_PREFIX_OFFSET, 16, 2, sect, + sizeof(*sect), true); + print_hex_dump_debug("s-", DUMP_PREFIX_OFFSET, 16, 2, sect->state, + sect->size, true); +} + +static inline void __adf_mstate_sect_update(struct adf_mstate_mgr *mgr, + struct adf_mstate_sect_h *sect, + u32 size, + u32 n_subsects) +{ + sect->size += size; + sect->sub_sects += n_subsects; + mgr->n_sects++; + mgr->state += sect->size; + + adf_mstate_dump_sect(sect, "Add"); +} + +void adf_mstate_sect_update(struct adf_mstate_mgr *p_mgr, + struct adf_mstate_mgr *curr_mgr, + struct adf_mstate_sect_h *sect) +{ + __adf_mstate_sect_update(p_mgr, sect, adf_mstate_state_size(curr_mgr), + curr_mgr->n_sects); +} + +static struct adf_mstate_sect_h *adf_mstate_sect_add_header(struct adf_mstate_mgr *mgr, + const char *id) +{ + struct adf_mstate_sect_h *sect = (struct adf_mstate_sect_h *)(mgr->state); + + if (adf_mstate_avail_room(mgr) < sizeof(*sect)) { + pr_debug("QAT: LM - Not enough space for header of QAT state sect %s\n", id); + return NULL; + } + + strscpy(sect->id, id, sizeof(sect->id)); + sect->size = 0; + sect->sub_sects = 0; + mgr->state += sizeof(*sect); + + return sect; +} + +struct adf_mstate_sect_h *adf_mstate_sect_add_vreg(struct adf_mstate_mgr *mgr, + const char *id, + struct adf_mstate_vreginfo *info) +{ + struct adf_mstate_sect_h *sect; + + sect = adf_mstate_sect_add_header(mgr, id); + if (!sect) + return NULL; + + if (adf_mstate_avail_room(mgr) < info->size) { + pr_debug("QAT: LM - Not enough space for QAT state sect %s, requires %u\n", + id, info->size); + return NULL; + } + + memcpy(sect->state, info->addr, info->size); + __adf_mstate_sect_update(mgr, sect, info->size, 0); + + return sect; +} + +struct adf_mstate_sect_h *adf_mstate_sect_add(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_populate populate, + void *opaque) +{ + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *sect; + int avail_room, size; + + sect = adf_mstate_sect_add_header(mgr, id); + if (!sect) + return NULL; + + if (!populate) + return sect; + + avail_room = adf_mstate_avail_room(mgr); + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mgr); + + size = (*populate)(&sub_sects_mgr, sect->state, avail_room, opaque); + if (size < 0) + return NULL; + + size += adf_mstate_state_size(&sub_sects_mgr); + if (avail_room < size) { + pr_debug("QAT: LM - Not enough space for QAT state sect %s, requires %u\n", + id, size); + return NULL; + } + __adf_mstate_sect_update(mgr, sect, size, sub_sects_mgr.n_sects); + + return sect; +} + +static int adf_mstate_sect_validate(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_sect_h *start = (struct adf_mstate_sect_h *)mgr->state; + struct adf_mstate_sect_h *sect = start; + u64 end; + int i; + + end = (uintptr_t)mgr->buf + mgr->size; + for (i = 0; i < mgr->n_sects; i++) { + uintptr_t s_start = (uintptr_t)sect->state; + uintptr_t s_end = s_start + sect->size; + + if (s_end < s_start || s_end > end) { + pr_debug("QAT: LM - Corrupted state section (index=%u, size=%u) in state_mgr (size=%u, secs=%u)\n", + i, sect->size, mgr->size, mgr->n_sects); + return -EINVAL; + } + sect = (struct adf_mstate_sect_h *)s_end; + } + + pr_debug("QAT: LM - Scanned section (last child=%s, size=%lu) in state_mgr (size=%u, secs=%u)\n", + start->id, sizeof(struct adf_mstate_sect_h) * (ulong)(sect - start), + mgr->size, mgr->n_sects); + + return 0; +} + +u32 adf_mstate_state_size_from_remote(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_preh *preh = (struct adf_mstate_preh *)mgr->buf; + + return preh->preh_len + preh->size; +} + +int adf_mstate_mgr_init_from_remote(struct adf_mstate_mgr *mgr, u8 *buf, u32 size, + adf_mstate_preamble_checker pre_checker, + void *opaque) +{ + struct adf_mstate_preh *pre; + int ret; + + adf_mstate_mgr_init(mgr, buf, size); + pre = (struct adf_mstate_preh *)(mgr->buf); + + pr_debug("QAT: LM - Dump state preambles\n"); + print_hex_dump_debug("", DUMP_PREFIX_OFFSET, 16, 2, pre, pre->preh_len, 0); + + if (pre_checker) + ret = (*pre_checker)(pre, opaque); + else + ret = adf_mstate_preamble_def_checker(pre, mgr); + if (ret) + return ret; + + mgr->state = mgr->buf + pre->preh_len; + mgr->n_sects = pre->n_sects; + + return adf_mstate_sect_validate(mgr); +} + +struct adf_mstate_sect_h *adf_mstate_sect_lookup(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_action action, + void *opaque) +{ + struct adf_mstate_sect_h *sect = (struct adf_mstate_sect_h *)mgr->state; + struct adf_mstate_mgr sub_sects_mgr; + int i, ret; + + for (i = 0; i < mgr->n_sects; i++) { + if (!strncmp(sect->id, id, sizeof(sect->id))) + goto found; + + sect = (struct adf_mstate_sect_h *)(sect->state + sect->size); + } + + return NULL; + +found: + adf_mstate_dump_sect(sect, "Found"); + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, sect); + if (sect->sub_sects && adf_mstate_sect_validate(&sub_sects_mgr)) + return NULL; + + if (!action) + return sect; + + ret = (*action)(&sub_sects_mgr, sect->state, sect->size, opaque); + if (ret) + return NULL; + + return sect; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h new file mode 100644 index 0000000000..81d263a596 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ + +#ifndef ADF_MSTATE_MGR_H +#define ADF_MSTATE_MGR_H + +#define ADF_MSTATE_ID_LEN 8 + +#define ADF_MSTATE_ETRB_IDS "ETRBAR" +#define ADF_MSTATE_MISCB_IDS "MISCBAR" +#define ADF_MSTATE_EXTB_IDS "EXTBAR" +#define ADF_MSTATE_GEN_IDS "GENER" +#define ADF_MSTATE_CONFIG_IDS "CONFIG" +#define ADF_MSTATE_SECTION_NUM 5 + +#define ADF_MSTATE_BANK_IDX_IDS "bnk" + +#define ADF_MSTATE_ETR_REGS_IDS "mregs" +#define ADF_MSTATE_VINTSRC_IDS "visrc" +#define ADF_MSTATE_VINTMSK_IDS "vimsk" +#define ADF_MSTATE_SLA_IDS "sla" +#define ADF_MSTATE_IOV_INIT_IDS "iovinit" +#define ADF_MSTATE_COMPAT_VER_IDS "compver" +#define ADF_MSTATE_GEN_CAP_IDS "gencap" +#define ADF_MSTATE_GEN_SVCMAP_IDS "svcmap" +#define ADF_MSTATE_GEN_EXTDC_IDS "extdc" +#define ADF_MSTATE_VINTSRC_PF2VM_IDS "vispv" +#define ADF_MSTATE_VINTMSK_PF2VM_IDS "vimpv" +#define ADF_MSTATE_VM2PF_IDS "vm2pf" +#define ADF_MSTATE_PF2VM_IDS "pf2vm" + +struct adf_mstate_mgr { + u8 *buf; + u8 *state; + u32 size; + u32 n_sects; +}; + +struct adf_mstate_preh { + u32 magic; + u32 version; + u16 preh_len; + u16 n_sects; + u32 size; +}; + +struct adf_mstate_vreginfo { + void *addr; + u32 size; +}; + +struct adf_mstate_sect_h; + +typedef int (*adf_mstate_preamble_checker)(struct adf_mstate_preh *preamble, void *opa); +typedef int (*adf_mstate_populate)(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa); +typedef int (*adf_mstate_action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, + void *opa); + +struct adf_mstate_mgr *adf_mstate_mgr_new(u8 *buf, u32 size); +void adf_mstate_mgr_destroy(struct adf_mstate_mgr *mgr); +void adf_mstate_mgr_init(struct adf_mstate_mgr *mgr, u8 *buf, u32 size); +void adf_mstate_mgr_init_from_parent(struct adf_mstate_mgr *mgr, + struct adf_mstate_mgr *p_mgr); +void adf_mstate_mgr_init_from_psect(struct adf_mstate_mgr *mgr, + struct adf_mstate_sect_h *p_sect); +int adf_mstate_mgr_init_from_remote(struct adf_mstate_mgr *mgr, + u8 *buf, u32 size, + adf_mstate_preamble_checker checker, + void *opaque); +struct adf_mstate_preh *adf_mstate_preamble_add(struct adf_mstate_mgr *mgr); +int adf_mstate_preamble_update(struct adf_mstate_mgr *mgr); +u32 adf_mstate_state_size(struct adf_mstate_mgr *mgr); +u32 adf_mstate_state_size_from_remote(struct adf_mstate_mgr *mgr); +void adf_mstate_sect_update(struct adf_mstate_mgr *p_mgr, + struct adf_mstate_mgr *curr_mgr, + struct adf_mstate_sect_h *sect); +struct adf_mstate_sect_h *adf_mstate_sect_add_vreg(struct adf_mstate_mgr *mgr, + const char *id, + struct adf_mstate_vreginfo *info); +struct adf_mstate_sect_h *adf_mstate_sect_add(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_populate populate, + void *opaque); +struct adf_mstate_sect_h *adf_mstate_sect_lookup(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_action action, + void *opaque); +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c index 9ab93fbfef..b9b5e744a3 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c @@ -242,13 +242,7 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, "VersionRequest received from VF%d (vers %d) to PF (vers %d)\n", vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION); - if (vf_compat_ver == 0) - compat = ADF_PF2VF_VF_INCOMPATIBLE; - else if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION) - compat = ADF_PF2VF_VF_COMPATIBLE; - else - compat = ADF_PF2VF_VF_COMPAT_UNKNOWN; - + compat = adf_vf_compat_checker(vf_compat_ver); vf_info->vf_compat_ver = vf_compat_ver; resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP; diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h index 2be048e228..1a044297d8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h @@ -28,4 +28,15 @@ u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev, struct pfvf_message msg struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 raw_msg, const struct pfvf_csr_format *fmt); +static inline u8 adf_vf_compat_checker(u8 vf_compat_ver) +{ + if (vf_compat_ver == 0) + return ADF_PF2VF_VF_INCOMPATIBLE; + + if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION) + return ADF_PF2VF_VF_COMPATIBLE; + + return ADF_PF2VF_VF_COMPAT_UNKNOWN; +} + #endif /* ADF_PFVF_UTILS_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index e10f0024f4..346ef8bee9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -183,14 +183,14 @@ static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_s } /** - * get_sla_arr_of_type() - Returns a pointer to SLA type specific array + * adf_rl_get_sla_arr_of_type() - Returns a pointer to SLA type specific array * @rl_data: pointer to ratelimiting data * @type: SLA type * @sla_arr: pointer to variable where requested pointer will be stored * * Return: Max number of elements allowed for the returned array */ -static u32 get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type, +u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type, struct rl_sla ***sla_arr) { switch (type) { @@ -778,7 +778,7 @@ static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla) rp_in_use[sla->ring_pairs_ids[i]] = false; update_budget(sla, old_cir, true); - get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); + adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); assign_node_to_parent(rl_data->accel_dev, sla, true); adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type); mark_rps_usage(sla, rl_data->rp_in_use, false); @@ -875,7 +875,7 @@ static int add_update_sla(struct adf_accel_dev *accel_dev, if (!is_update) { mark_rps_usage(sla, rl_data->rp_in_use, true); - get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); + adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); sla_type_arr[sla->node_id] = sla; rl_data->sla[sla->sla_id] = sla; } @@ -1065,7 +1065,7 @@ void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default) /* Unregister and remove all SLAs */ for (j = RL_LEAF; j >= end_type; j--) { - max_id = get_sla_arr_of_type(rl_data, j, &sla_type_arr); + max_id = adf_rl_get_sla_arr_of_type(rl_data, j, &sla_type_arr); for (i = 0; i < max_id; i++) { if (!sla_type_arr[i]) diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h index 269c6656fb..bfe750ea0e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.h +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h @@ -151,6 +151,8 @@ struct rl_sla { u16 ring_pairs_cnt; }; +u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type, + struct rl_sla ***sla_arr); int adf_rl_add_sla(struct adf_accel_dev *accel_dev, struct adf_rl_sla_input_data *sla_in); int adf_rl_update_sla(struct adf_accel_dev *accel_dev, diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index 87a70c00c4..8d645e7e04 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -26,10 +26,12 @@ static void adf_iov_send_resp(struct work_struct *work) u32 vf_nr = vf_info->vf_nr; bool ret; + mutex_lock(&vf_info->pfvf_mig_lock); ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr); if (ret) /* re-enable interrupt on PF from this VF */ adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr); + mutex_unlock(&vf_info->pfvf_mig_lock); kfree(pf2vf_resp); } @@ -62,6 +64,7 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) vf_info->vf_nr = i; mutex_init(&vf_info->pf2vf_lock); + mutex_init(&vf_info->pfvf_mig_lock); ratelimit_state_init(&vf_info->vf2pf_ratelimit, ADF_VF2PF_RATELIMIT_INTERVAL, ADF_VF2PF_RATELIMIT_BURST); @@ -138,8 +141,10 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) if (hw_data->configure_iov_threads) hw_data->configure_iov_threads(accel_dev, false); - for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) + for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) { mutex_destroy(&vf->pf2vf_lock); + mutex_destroy(&vf->pfvf_mig_lock); + } if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) { kfree(accel_dev->pf.vf_info); diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport.c b/drivers/crypto/intel/qat/qat_common/adf_transport.c index 630d0483c4..1efdf46490 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_transport.c +++ b/drivers/crypto/intel/qat/qat_common/adf_transport.c @@ -474,7 +474,6 @@ err: int adf_init_etr_data(struct adf_accel_dev *accel_dev) { struct adf_etr_data *etr_data; - struct adf_hw_device_data *hw_data = accel_dev->hw_device; void __iomem *csr_addr; u32 size; u32 num_banks = 0; @@ -495,8 +494,7 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev) } accel_dev->transport = etr_data; - i = hw_data->get_etr_bar_id(hw_data); - csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr; + csr_addr = adf_get_etr_base(accel_dev); /* accel_dev->debugfs_dir should always be non-NULL here */ etr_data->debug = debugfs_create_dir("transport", diff --git a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c index 4128200a90..85c682e248 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c @@ -110,6 +110,8 @@ struct qat_dh_ctx { unsigned int p_size; bool g2; struct qat_crypto_instance *inst; + struct crypto_kpp *ftfm; + bool fallback; } __packed __aligned(64); struct qat_asym_request { @@ -381,6 +383,36 @@ unmap_src: return ret; } +static int qat_dh_generate_public_key(struct kpp_request *req) +{ + struct kpp_request *nreq = kpp_request_ctx(req); + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + + if (ctx->fallback) { + memcpy(nreq, req, sizeof(*req)); + kpp_request_set_tfm(nreq, ctx->ftfm); + return crypto_kpp_generate_public_key(nreq); + } + + return qat_dh_compute_value(req); +} + +static int qat_dh_compute_shared_secret(struct kpp_request *req) +{ + struct kpp_request *nreq = kpp_request_ctx(req); + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + + if (ctx->fallback) { + memcpy(nreq, req, sizeof(*req)); + kpp_request_set_tfm(nreq, ctx->ftfm); + return crypto_kpp_compute_shared_secret(nreq); + } + + return qat_dh_compute_value(req); +} + static int qat_dh_check_params_length(unsigned int p_len) { switch (p_len) { @@ -398,9 +430,6 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); - if (qat_dh_check_params_length(params->p_size << 3)) - return -EINVAL; - ctx->p_size = params->p_size; ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); if (!ctx->p) @@ -454,6 +483,13 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf, if (crypto_dh_decode_key(buf, len, ¶ms) < 0) return -EINVAL; + if (qat_dh_check_params_length(params.p_size << 3)) { + ctx->fallback = true; + return crypto_kpp_set_secret(ctx->ftfm, buf, len); + } + + ctx->fallback = false; + /* Free old secret if any */ qat_dh_clear_ctx(dev, ctx); @@ -481,6 +517,9 @@ static unsigned int qat_dh_max_size(struct crypto_kpp *tfm) { struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) + return crypto_kpp_maxsize(ctx->ftfm); + return ctx->p_size; } @@ -489,11 +528,22 @@ static int qat_dh_init_tfm(struct crypto_kpp *tfm) struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); struct qat_crypto_instance *inst = qat_crypto_get_instance_node(numa_node_id()); + const char *alg = kpp_alg_name(tfm); + unsigned int reqsize; if (!inst) return -EINVAL; - kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64); + ctx->ftfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->ftfm)) + return PTR_ERR(ctx->ftfm); + + crypto_kpp_set_flags(ctx->ftfm, crypto_kpp_get_flags(tfm)); + + reqsize = max(sizeof(struct qat_asym_request) + 64, + sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->ftfm)); + + kpp_set_reqsize(tfm, reqsize); ctx->p_size = 0; ctx->g2 = false; @@ -506,6 +556,9 @@ static void qat_dh_exit_tfm(struct crypto_kpp *tfm) struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); struct device *dev = &GET_DEV(ctx->inst->accel_dev); + if (ctx->ftfm) + crypto_free_kpp(ctx->ftfm); + qat_dh_clear_ctx(dev, ctx); qat_crypto_put_instance(ctx->inst); } @@ -1265,8 +1318,8 @@ static struct akcipher_alg rsa = { static struct kpp_alg dh = { .set_secret = qat_dh_set_secret, - .generate_public_key = qat_dh_compute_value, - .compute_shared_secret = qat_dh_compute_value, + .generate_public_key = qat_dh_generate_public_key, + .compute_shared_secret = qat_dh_compute_shared_secret, .max_size = qat_dh_max_size, .init = qat_dh_init_tfm, .exit = qat_dh_exit_tfm, @@ -1276,6 +1329,7 @@ static struct kpp_alg dh = { .cra_priority = 1000, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct qat_dh_ctx), + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c index 76baed0a76..338acf29c4 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.c +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c @@ -81,7 +81,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, if (unlikely(!bufl)) return -ENOMEM; } else { - bufl = &buf->sgl_src.sgl_hdr; + bufl = container_of(&buf->sgl_src.sgl_hdr, + struct qat_alg_buf_list, hdr); memset(bufl, 0, sizeof(struct qat_alg_buf_list)); buf->sgl_src_valid = true; } @@ -139,7 +140,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, if (unlikely(!buflout)) goto err_in; } else { - buflout = &buf->sgl_dst.sgl_hdr; + buflout = container_of(&buf->sgl_dst.sgl_hdr, + struct qat_alg_buf_list, hdr); memset(buflout, 0, sizeof(struct qat_alg_buf_list)); buf->sgl_dst_valid = true; } diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h index d87e4f35ac..85bc32a9ec 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.h +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h @@ -15,14 +15,17 @@ struct qat_alg_buf { } __packed; struct qat_alg_buf_list { - u64 resrvd; - u32 num_bufs; - u32 num_mapped_bufs; + /* New members must be added within the __struct_group() macro below. */ + __struct_group(qat_alg_buf_list_hdr, hdr, __packed, + u64 resrvd; + u32 num_bufs; + u32 num_mapped_bufs; + ); struct qat_alg_buf buffers[]; } __packed; struct qat_alg_fixed_buf_list { - struct qat_alg_buf_list sgl_hdr; + struct qat_alg_buf_list_hdr sgl_hdr; struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC]; } __packed __aligned(64); diff --git a/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c new file mode 100644 index 0000000000..892c2283a5 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_common_drv.h" + +struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id) +{ + struct adf_accel_dev *accel_dev; + struct qat_migdev_ops *ops; + struct qat_mig_dev *mdev; + + accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + if (!accel_dev) + return ERR_PTR(-ENODEV); + + ops = GET_VFMIG_OPS(accel_dev); + if (!ops || !ops->init || !ops->cleanup || !ops->reset || !ops->open || + !ops->close || !ops->suspend || !ops->resume || !ops->save_state || + !ops->load_state || !ops->save_setup || !ops->load_setup) + return ERR_PTR(-EINVAL); + + mdev = kmalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) + return ERR_PTR(-ENOMEM); + + mdev->vf_id = vf_id; + mdev->parent_accel_dev = accel_dev; + + return mdev; +} +EXPORT_SYMBOL_GPL(qat_vfmig_create); + +int qat_vfmig_init(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->init(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_init); + +void qat_vfmig_cleanup(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->cleanup(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_cleanup); + +void qat_vfmig_reset(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->reset(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_reset); + +int qat_vfmig_open(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->open(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_open); + +void qat_vfmig_close(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + GET_VFMIG_OPS(accel_dev)->close(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_close); + +int qat_vfmig_suspend(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->suspend(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_suspend); + +int qat_vfmig_resume(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->resume(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_resume); + +int qat_vfmig_save_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->save_state(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_save_state); + +int qat_vfmig_save_setup(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->save_setup(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_save_setup); + +int qat_vfmig_load_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->load_state(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_load_state); + +int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->load_setup(mdev, size); +} +EXPORT_SYMBOL_GPL(qat_vfmig_load_setup); + +void qat_vfmig_destroy(struct qat_mig_dev *mdev) +{ + kfree(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_destroy); diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile index 38d6f8e162..cfd3bd7577 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile +++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(srctree)/$(src)/../qat_common +ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index af14090cc4..6e24d57e6b 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include "adf_dh895xcc_hw_data.h" diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile index 0153c85ce7..64b54e92b2 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(srctree)/$(src)/../qat_common +ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c index 70e56cc16e..f4ee4c2e00 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c index d2b8d26db9..215a1a8ba7 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c @@ -4,7 +4,8 @@ #include "otx2_cpt_devlink.h" static int otx2_cpt_dl_egrp_create(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl); struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf; @@ -13,7 +14,8 @@ static int otx2_cpt_dl_egrp_create(struct devlink *dl, u32 id, } static int otx2_cpt_dl_egrp_delete(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl); struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf; @@ -45,7 +47,8 @@ static int otx2_cpt_dl_t106_mode_get(struct devlink *dl, u32 id, } static int otx2_cpt_dl_t106_mode_set(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl); struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf; diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index 2b3ebe0db3..c82775dbb5 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -101,6 +102,7 @@ struct dcp_async_ctx { struct crypto_skcipher *fallback; unsigned int key_len; uint8_t key[AES_KEYSIZE_128]; + bool key_referenced; }; struct dcp_aes_req_ctx { @@ -155,6 +157,7 @@ static struct dcp *global_sdcp; #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13) #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12) #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11) +#define MXS_DCP_CONTROL0_OTP_KEY (1 << 10) #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8) #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9) #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6) @@ -168,6 +171,8 @@ static struct dcp *global_sdcp; #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4) #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0) +#define MXS_DCP_CONTROL1_KEY_SELECT_SHIFT 8 + static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) { int dma_err; @@ -220,17 +225,21 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, struct skcipher_request *req, int init) { - dma_addr_t key_phys, src_phys, dst_phys; + dma_addr_t key_phys = 0; + dma_addr_t src_phys, dst_phys; struct dcp *sdcp = global_sdcp; struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); + bool key_referenced = actx->key_referenced; int ret; - key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, - 2 * AES_KEYSIZE_128, DMA_TO_DEVICE); - ret = dma_mapping_error(sdcp->dev, key_phys); - if (ret) - return ret; + if (!key_referenced) { + key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, + 2 * AES_KEYSIZE_128, DMA_TO_DEVICE); + ret = dma_mapping_error(sdcp->dev, key_phys); + if (ret) + return ret; + } src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, DCP_BUF_SZ, DMA_TO_DEVICE); @@ -255,8 +264,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, MXS_DCP_CONTROL0_INTERRUPT | MXS_DCP_CONTROL0_ENABLE_CIPHER; - /* Payload contains the key. */ - desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; + if (key_referenced) + /* Set OTP key bit to select the key via KEY_SELECT. */ + desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY; + else + /* Payload contains the key. */ + desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; if (rctx->enc) desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; @@ -270,6 +283,9 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, else desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; + if (key_referenced) + desc->control1 |= sdcp->coh->aes_key[0] << MXS_DCP_CONTROL1_KEY_SELECT_SHIFT; + desc->next_cmd_addr = 0; desc->source = src_phys; desc->destination = dst_phys; @@ -284,9 +300,9 @@ aes_done_run: err_dst: dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); err_src: - dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, - DMA_TO_DEVICE); - + if (!key_referenced) + dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, + DMA_TO_DEVICE); return ret; } @@ -453,7 +469,7 @@ static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb) struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); int ret; - if (unlikely(actx->key_len != AES_KEYSIZE_128)) + if (unlikely(actx->key_len != AES_KEYSIZE_128 && !actx->key_referenced)) return mxs_dcp_block_fallback(req, enc); rctx->enc = enc; @@ -500,6 +516,7 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, * there can still be an operation in progress. */ actx->key_len = len; + actx->key_referenced = false; if (len == AES_KEYSIZE_128) { memcpy(actx->key, key, len); return 0; @@ -516,6 +533,32 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, return crypto_skcipher_setkey(actx->fallback, key, len); } +static int mxs_dcp_aes_setrefkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int len) +{ + struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); + + if (len != DCP_PAES_KEYSIZE) + return -EINVAL; + + switch (key[0]) { + case DCP_PAES_KEY_SLOT0: + case DCP_PAES_KEY_SLOT1: + case DCP_PAES_KEY_SLOT2: + case DCP_PAES_KEY_SLOT3: + case DCP_PAES_KEY_UNIQUE: + case DCP_PAES_KEY_OTP: + memcpy(actx->key, key, len); + actx->key_len = len; + actx->key_referenced = true; + break; + default: + return -EINVAL; + } + + return 0; +} + static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) { const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); @@ -539,6 +582,13 @@ static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm) crypto_free_skcipher(actx->fallback); } +static int mxs_dcp_paes_init_tfm(struct crypto_skcipher *tfm) +{ + crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx)); + + return 0; +} + /* * Hashing (SHA1/SHA256) */ @@ -889,6 +939,39 @@ static struct skcipher_alg dcp_aes_algs[] = { .ivsize = AES_BLOCK_SIZE, .init = mxs_dcp_aes_fallback_init_tfm, .exit = mxs_dcp_aes_fallback_exit_tfm, + }, { + .base.cra_name = "ecb(paes)", + .base.cra_driver_name = "ecb-paes-dcp", + .base.cra_priority = 401, + .base.cra_alignmask = 15, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct dcp_async_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = DCP_PAES_KEYSIZE, + .max_keysize = DCP_PAES_KEYSIZE, + .setkey = mxs_dcp_aes_setrefkey, + .encrypt = mxs_dcp_aes_ecb_encrypt, + .decrypt = mxs_dcp_aes_ecb_decrypt, + .init = mxs_dcp_paes_init_tfm, + }, { + .base.cra_name = "cbc(paes)", + .base.cra_driver_name = "cbc-paes-dcp", + .base.cra_priority = 401, + .base.cra_alignmask = 15, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct dcp_async_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = DCP_PAES_KEYSIZE, + .max_keysize = DCP_PAES_KEYSIZE, + .setkey = mxs_dcp_aes_setrefkey, + .encrypt = mxs_dcp_aes_cbc_encrypt, + .decrypt = mxs_dcp_aes_cbc_decrypt, + .ivsize = AES_BLOCK_SIZE, + .init = mxs_dcp_paes_init_tfm, }, }; diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 2ab90ec10e..82214cde2b 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -251,7 +251,9 @@ int nx842_crypto_compress(struct crypto_tfm *tfm, u8 *dst, unsigned int *dlen) { struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); - struct nx842_crypto_header *hdr = &ctx->header; + struct nx842_crypto_header *hdr = + container_of(&ctx->header, + struct nx842_crypto_header, hdr); struct nx842_crypto_param p; struct nx842_constraints c = *ctx->driver->constraints; unsigned int groups, hdrsize, h; @@ -490,7 +492,7 @@ int nx842_crypto_decompress(struct crypto_tfm *tfm, } memcpy(&ctx->header, src, hdr_len); - hdr = &ctx->header; + hdr = container_of(&ctx->header, struct nx842_crypto_header, hdr); for (n = 0; n < hdr->groups; n++) { /* ignore applies to last group */ diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index 7590bfb24d..25fa70b211 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -157,9 +157,11 @@ struct nx842_crypto_header_group { } __packed; struct nx842_crypto_header { - __be16 magic; /* NX842_CRYPTO_MAGIC */ - __be16 ignore; /* decompressed end bytes to ignore */ - u8 groups; /* total groups in this header */ + struct_group_tagged(nx842_crypto_header_hdr, hdr, + __be16 magic; /* NX842_CRYPTO_MAGIC */ + __be16 ignore; /* decompressed end bytes to ignore */ + u8 groups; /* total groups in this header */ + ); struct nx842_crypto_header_group group[]; } __packed; @@ -171,7 +173,7 @@ struct nx842_crypto_ctx { u8 *wmem; u8 *sbounce, *dbounce; - struct nx842_crypto_header header; + struct nx842_crypto_header_hdr header; struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX]; struct nx842_driver *driver; diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 3423b5cde1..96d4af5d48 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -559,7 +559,7 @@ static int sahara_aes_process(struct skcipher_request *req) struct sahara_ctx *ctx; struct sahara_aes_reqctx *rctx; int ret; - unsigned long timeout; + unsigned long time_left; /* Request is ready to be dispatched by the device */ dev_dbg(dev->device, @@ -597,15 +597,15 @@ static int sahara_aes_process(struct skcipher_request *req) if (ret) return -EINVAL; - timeout = wait_for_completion_timeout(&dev->dma_completion, - msecs_to_jiffies(SAHARA_TIMEOUT_MS)); + time_left = wait_for_completion_timeout(&dev->dma_completion, + msecs_to_jiffies(SAHARA_TIMEOUT_MS)); dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, DMA_FROM_DEVICE); dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE); - if (!timeout) { + if (!time_left) { dev_err(dev->device, "AES timeout\n"); return -ETIMEDOUT; } @@ -931,7 +931,7 @@ static int sahara_sha_process(struct ahash_request *req) struct sahara_dev *dev = dev_ptr; struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); int ret; - unsigned long timeout; + unsigned long time_left; ret = sahara_sha_prepare_request(req); if (!ret) @@ -963,14 +963,14 @@ static int sahara_sha_process(struct ahash_request *req) sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR); - timeout = wait_for_completion_timeout(&dev->dma_completion, - msecs_to_jiffies(SAHARA_TIMEOUT_MS)); + time_left = wait_for_completion_timeout(&dev->dma_completion, + msecs_to_jiffies(SAHARA_TIMEOUT_MS)); if (rctx->sg_in_idx) dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE); - if (!timeout) { + if (!time_left) { dev_err(dev->device, "SHA timeout\n"); return -ETIMEDOUT; } diff --git a/drivers/crypto/starfive/Kconfig b/drivers/crypto/starfive/Kconfig index cb59357b58..0fe389e9f9 100644 --- a/drivers/crypto/starfive/Kconfig +++ b/drivers/crypto/starfive/Kconfig @@ -14,6 +14,10 @@ config CRYPTO_DEV_JH7110 select CRYPTO_RSA select CRYPTO_AES select CRYPTO_CCM + select CRYPTO_GCM + select CRYPTO_ECB + select CRYPTO_CBC + select CRYPTO_CTR help Support for StarFive JH7110 crypto hardware acceleration engine. This module provides acceleration for public key algo, diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c index 1ac15cc4ef..86a1a1fa9f 100644 --- a/drivers/crypto/starfive/jh7110-aes.c +++ b/drivers/crypto/starfive/jh7110-aes.c @@ -78,7 +78,7 @@ static inline int is_gcm(struct starfive_cryp_dev *cryp) return (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_GCM; } -static inline int is_encrypt(struct starfive_cryp_dev *cryp) +static inline bool is_encrypt(struct starfive_cryp_dev *cryp) { return cryp->flags & FLG_ENCRYPT; } @@ -103,16 +103,6 @@ static void starfive_aes_aead_hw_start(struct starfive_cryp_ctx *ctx, u32 hw_mod } } -static inline void starfive_aes_set_ivlen(struct starfive_cryp_ctx *ctx) -{ - struct starfive_cryp_dev *cryp = ctx->cryp; - - if (is_gcm(cryp)) - writel(GCM_AES_IV_SIZE, cryp->base + STARFIVE_AES_IVLEN); - else - writel(AES_BLOCK_SIZE, cryp->base + STARFIVE_AES_IVLEN); -} - static inline void starfive_aes_set_alen(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_dev *cryp = ctx->cryp; @@ -261,7 +251,6 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx) rctx->csr.aes.mode = hw_mode; rctx->csr.aes.cmode = !is_encrypt(cryp); - rctx->csr.aes.ie = 1; rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_1; if (cryp->side_chan) { @@ -279,7 +268,7 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx) case STARFIVE_AES_MODE_GCM: starfive_aes_set_alen(ctx); starfive_aes_set_mlen(ctx); - starfive_aes_set_ivlen(ctx); + writel(GCM_AES_IV_SIZE, cryp->base + STARFIVE_AES_IVLEN); starfive_aes_aead_hw_start(ctx, hw_mode); starfive_aes_write_iv(ctx, (void *)cryp->req.areq->iv); break; @@ -300,52 +289,49 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx) return cryp->err; } -static int starfive_aes_read_authtag(struct starfive_cryp_dev *cryp) +static int starfive_aes_read_authtag(struct starfive_cryp_ctx *ctx) { - int i, start_addr; + struct starfive_cryp_dev *cryp = ctx->cryp; + struct starfive_cryp_request_ctx *rctx = ctx->rctx; + int i; if (starfive_aes_wait_busy(cryp)) return dev_err_probe(cryp->dev, -ETIMEDOUT, "Timeout waiting for tag generation."); - start_addr = STARFIVE_AES_NONCE0; - - if (is_gcm(cryp)) - for (i = 0; i < AES_BLOCK_32; i++, start_addr += 4) - cryp->tag_out[i] = readl(cryp->base + start_addr); - else + if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_GCM) { + cryp->tag_out[0] = readl(cryp->base + STARFIVE_AES_NONCE0); + cryp->tag_out[1] = readl(cryp->base + STARFIVE_AES_NONCE1); + cryp->tag_out[2] = readl(cryp->base + STARFIVE_AES_NONCE2); + cryp->tag_out[3] = readl(cryp->base + STARFIVE_AES_NONCE3); + } else { for (i = 0; i < AES_BLOCK_32; i++) cryp->tag_out[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R); + } if (is_encrypt(cryp)) { - scatterwalk_copychunks(cryp->tag_out, &cryp->out_walk, cryp->authsize, 1); + scatterwalk_map_and_copy(cryp->tag_out, rctx->out_sg, + cryp->total_in, cryp->authsize, 1); } else { - scatterwalk_copychunks(cryp->tag_in, &cryp->in_walk, cryp->authsize, 0); - if (crypto_memneq(cryp->tag_in, cryp->tag_out, cryp->authsize)) - return dev_err_probe(cryp->dev, -EBADMSG, "Failed tag verification\n"); + return -EBADMSG; } return 0; } -static void starfive_aes_finish_req(struct starfive_cryp_dev *cryp) +static void starfive_aes_finish_req(struct starfive_cryp_ctx *ctx) { - union starfive_aes_csr csr; + struct starfive_cryp_dev *cryp = ctx->cryp; int err = cryp->err; if (!err && cryp->authsize) - err = starfive_aes_read_authtag(cryp); + err = starfive_aes_read_authtag(ctx); if (!err && ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CBC || (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CTR)) starfive_aes_get_iv(cryp, (void *)cryp->req.sreq->iv); - /* reset irq flags*/ - csr.v = 0; - csr.aesrst = 1; - writel(csr.v, cryp->base + STARFIVE_AES_CSR); - if (cryp->authsize) crypto_finalize_aead_request(cryp->engine, cryp->req.areq, err); else @@ -353,39 +339,6 @@ static void starfive_aes_finish_req(struct starfive_cryp_dev *cryp) err); } -void starfive_aes_done_task(unsigned long param) -{ - struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param; - u32 block[AES_BLOCK_32]; - u32 stat; - int i; - - for (i = 0; i < AES_BLOCK_32; i++) - block[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R); - - scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, AES_BLOCK_SIZE, - cryp->total_out), 1); - - cryp->total_out -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_out); - - if (!cryp->total_out) { - starfive_aes_finish_req(cryp); - return; - } - - memset(block, 0, AES_BLOCK_SIZE); - scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE, - cryp->total_in), 0); - cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in); - - for (i = 0; i < AES_BLOCK_32; i++) - writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R); - - stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); - stat &= ~STARFIVE_IE_MASK_AES_DONE; - writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET); -} - static int starfive_aes_gcm_write_adata(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_dev *cryp = ctx->cryp; @@ -451,60 +404,165 @@ static int starfive_aes_ccm_write_adata(struct starfive_cryp_ctx *ctx) return 0; } -static int starfive_aes_prepare_req(struct skcipher_request *req, - struct aead_request *areq) +static void starfive_aes_dma_done(void *param) { - struct starfive_cryp_ctx *ctx; - struct starfive_cryp_request_ctx *rctx; - struct starfive_cryp_dev *cryp; + struct starfive_cryp_dev *cryp = param; - if (!req && !areq) - return -EINVAL; + complete(&cryp->dma_done); +} - ctx = req ? crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)) : - crypto_aead_ctx(crypto_aead_reqtfm(areq)); +static void starfive_aes_dma_init(struct starfive_cryp_dev *cryp) +{ + cryp->cfg_in.direction = DMA_MEM_TO_DEV; + cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES; + cryp->cfg_in.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cryp->cfg_in.src_maxburst = cryp->dma_maxburst; + cryp->cfg_in.dst_maxburst = cryp->dma_maxburst; + cryp->cfg_in.dst_addr = cryp->phys_base + STARFIVE_ALG_FIFO_OFFSET; - cryp = ctx->cryp; - rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq); + dmaengine_slave_config(cryp->tx, &cryp->cfg_in); - if (req) { - cryp->req.sreq = req; - cryp->total_in = req->cryptlen; - cryp->total_out = req->cryptlen; - cryp->assoclen = 0; - cryp->authsize = 0; - } else { - cryp->req.areq = areq; - cryp->assoclen = areq->assoclen; - cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq)); - if (is_encrypt(cryp)) { - cryp->total_in = areq->cryptlen; - cryp->total_out = areq->cryptlen; - } else { - cryp->total_in = areq->cryptlen - cryp->authsize; - cryp->total_out = cryp->total_in; - } - } + cryp->cfg_out.direction = DMA_DEV_TO_MEM; + cryp->cfg_out.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cryp->cfg_out.dst_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES; + cryp->cfg_out.src_maxburst = 4; + cryp->cfg_out.dst_maxburst = 4; + cryp->cfg_out.src_addr = cryp->phys_base + STARFIVE_ALG_FIFO_OFFSET; - rctx->in_sg = req ? req->src : areq->src; - scatterwalk_start(&cryp->in_walk, rctx->in_sg); + dmaengine_slave_config(cryp->rx, &cryp->cfg_out); - rctx->out_sg = req ? req->dst : areq->dst; - scatterwalk_start(&cryp->out_walk, rctx->out_sg); + init_completion(&cryp->dma_done); +} - if (cryp->assoclen) { - rctx->adata = kzalloc(cryp->assoclen + AES_BLOCK_SIZE, GFP_KERNEL); - if (!rctx->adata) - return dev_err_probe(cryp->dev, -ENOMEM, - "Failed to alloc memory for adata"); +static int starfive_aes_dma_xfer(struct starfive_cryp_dev *cryp, + struct scatterlist *src, + struct scatterlist *dst, + int len) +{ + struct dma_async_tx_descriptor *in_desc, *out_desc; + union starfive_alg_cr alg_cr; + int ret = 0, in_save, out_save; + + alg_cr.v = 0; + alg_cr.start = 1; + alg_cr.aes_dma_en = 1; + writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET); + + in_save = sg_dma_len(src); + out_save = sg_dma_len(dst); - scatterwalk_copychunks(rctx->adata, &cryp->in_walk, cryp->assoclen, 0); - scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->assoclen, 2); + writel(ALIGN(len, AES_BLOCK_SIZE), cryp->base + STARFIVE_DMA_IN_LEN_OFFSET); + writel(ALIGN(len, AES_BLOCK_SIZE), cryp->base + STARFIVE_DMA_OUT_LEN_OFFSET); + + sg_dma_len(src) = ALIGN(len, AES_BLOCK_SIZE); + sg_dma_len(dst) = ALIGN(len, AES_BLOCK_SIZE); + + out_desc = dmaengine_prep_slave_sg(cryp->rx, dst, 1, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!out_desc) { + ret = -EINVAL; + goto dma_err; } - ctx->rctx = rctx; + out_desc->callback = starfive_aes_dma_done; + out_desc->callback_param = cryp; + + reinit_completion(&cryp->dma_done); + dmaengine_submit(out_desc); + dma_async_issue_pending(cryp->rx); + + in_desc = dmaengine_prep_slave_sg(cryp->tx, src, 1, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!in_desc) { + ret = -EINVAL; + goto dma_err; + } + + dmaengine_submit(in_desc); + dma_async_issue_pending(cryp->tx); + + if (!wait_for_completion_timeout(&cryp->dma_done, + msecs_to_jiffies(1000))) + ret = -ETIMEDOUT; + +dma_err: + sg_dma_len(src) = in_save; + sg_dma_len(dst) = out_save; + + alg_cr.v = 0; + alg_cr.clear = 1; + writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET); + + return ret; +} + +static int starfive_aes_map_sg(struct starfive_cryp_dev *cryp, + struct scatterlist *src, + struct scatterlist *dst) +{ + struct scatterlist *stsg, *dtsg; + struct scatterlist _src[2], _dst[2]; + unsigned int remain = cryp->total_in; + unsigned int len, src_nents, dst_nents; + int ret; + + if (src == dst) { + for (stsg = src, dtsg = dst; remain > 0; + stsg = sg_next(stsg), dtsg = sg_next(dtsg)) { + src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_BIDIRECTIONAL); + if (src_nents == 0) + return dev_err_probe(cryp->dev, -ENOMEM, + "dma_map_sg error\n"); + + dst_nents = src_nents; + len = min(sg_dma_len(stsg), remain); + + ret = starfive_aes_dma_xfer(cryp, stsg, dtsg, len); + dma_unmap_sg(cryp->dev, stsg, 1, DMA_BIDIRECTIONAL); + if (ret) + return ret; + + remain -= len; + } + } else { + for (stsg = src, dtsg = dst;;) { + src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_TO_DEVICE); + if (src_nents == 0) + return dev_err_probe(cryp->dev, -ENOMEM, + "dma_map_sg src error\n"); + + dst_nents = dma_map_sg(cryp->dev, dtsg, 1, DMA_FROM_DEVICE); + if (dst_nents == 0) + return dev_err_probe(cryp->dev, -ENOMEM, + "dma_map_sg dst error\n"); + + len = min(sg_dma_len(stsg), sg_dma_len(dtsg)); + len = min(len, remain); + + ret = starfive_aes_dma_xfer(cryp, stsg, dtsg, len); + dma_unmap_sg(cryp->dev, stsg, 1, DMA_TO_DEVICE); + dma_unmap_sg(cryp->dev, dtsg, 1, DMA_FROM_DEVICE); + if (ret) + return ret; + + remain -= len; + if (remain == 0) + break; + + if (sg_dma_len(stsg) - len) { + stsg = scatterwalk_ffwd(_src, stsg, len); + dtsg = sg_next(dtsg); + } else if (sg_dma_len(dtsg) - len) { + dtsg = scatterwalk_ffwd(_dst, dtsg, len); + stsg = sg_next(stsg); + } else { + stsg = sg_next(stsg); + dtsg = sg_next(dtsg); + } + } + } - return starfive_aes_hw_init(ctx); + return 0; } static int starfive_aes_do_one_req(struct crypto_engine *engine, void *areq) @@ -513,35 +571,42 @@ static int starfive_aes_do_one_req(struct crypto_engine *engine, void *areq) container_of(areq, struct skcipher_request, base); struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct starfive_cryp_request_ctx *rctx = skcipher_request_ctx(req); struct starfive_cryp_dev *cryp = ctx->cryp; - u32 block[AES_BLOCK_32]; - u32 stat; - int err; - int i; + int ret; - err = starfive_aes_prepare_req(req, NULL); - if (err) - return err; + cryp->req.sreq = req; + cryp->total_in = req->cryptlen; + cryp->total_out = req->cryptlen; + cryp->assoclen = 0; + cryp->authsize = 0; - /* - * Write first plain/ciphertext block to start the module - * then let irq tasklet handle the rest of the data blocks. - */ - scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE, - cryp->total_in), 0); - cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in); + rctx->in_sg = req->src; + rctx->out_sg = req->dst; + + ctx->rctx = rctx; + + ret = starfive_aes_hw_init(ctx); + if (ret) + return ret; - for (i = 0; i < AES_BLOCK_32; i++) - writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R); + if (!cryp->total_in) + goto finish_req; - stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); - stat &= ~STARFIVE_IE_MASK_AES_DONE; - writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET); + starfive_aes_dma_init(cryp); + + ret = starfive_aes_map_sg(cryp, rctx->in_sg, rctx->out_sg); + if (ret) + return ret; + +finish_req: + starfive_aes_finish_req(ctx); return 0; } -static int starfive_aes_init_tfm(struct crypto_skcipher *tfm) +static int starfive_aes_init_tfm(struct crypto_skcipher *tfm, + const char *alg_name) { struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm); @@ -549,12 +614,26 @@ static int starfive_aes_init_tfm(struct crypto_skcipher *tfm) if (!ctx->cryp) return -ENODEV; + ctx->skcipher_fbk = crypto_alloc_skcipher(alg_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->skcipher_fbk)) + return dev_err_probe(ctx->cryp->dev, PTR_ERR(ctx->skcipher_fbk), + "%s() failed to allocate fallback for %s\n", + __func__, alg_name); + crypto_skcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) + - sizeof(struct skcipher_request)); + crypto_skcipher_reqsize(ctx->skcipher_fbk)); return 0; } +static void starfive_aes_exit_tfm(struct crypto_skcipher *tfm) +{ + struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm); + + crypto_free_skcipher(ctx->skcipher_fbk); +} + static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq) { struct aead_request *req = @@ -562,79 +641,99 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq struct starfive_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct starfive_cryp_dev *cryp = ctx->cryp; - struct starfive_cryp_request_ctx *rctx; - u32 block[AES_BLOCK_32]; - u32 stat; - int err; - int i; + struct starfive_cryp_request_ctx *rctx = aead_request_ctx(req); + struct scatterlist _src[2], _dst[2]; + int ret; + + cryp->req.areq = req; + cryp->assoclen = req->assoclen; + cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); + + rctx->in_sg = scatterwalk_ffwd(_src, req->src, cryp->assoclen); + if (req->src == req->dst) + rctx->out_sg = rctx->in_sg; + else + rctx->out_sg = scatterwalk_ffwd(_dst, req->dst, cryp->assoclen); + + if (is_encrypt(cryp)) { + cryp->total_in = req->cryptlen; + cryp->total_out = req->cryptlen; + } else { + cryp->total_in = req->cryptlen - cryp->authsize; + cryp->total_out = cryp->total_in; + scatterwalk_map_and_copy(cryp->tag_in, req->src, + cryp->total_in + cryp->assoclen, + cryp->authsize, 0); + } - err = starfive_aes_prepare_req(NULL, req); - if (err) - return err; + if (cryp->assoclen) { + rctx->adata = kzalloc(cryp->assoclen + AES_BLOCK_SIZE, GFP_KERNEL); + if (!rctx->adata) + return dev_err_probe(cryp->dev, -ENOMEM, + "Failed to alloc memory for adata"); + + if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, cryp->assoclen), + rctx->adata, cryp->assoclen) != cryp->assoclen) + return -EINVAL; + } + + if (cryp->total_in) + sg_zero_buffer(rctx->in_sg, sg_nents(rctx->in_sg), + sg_dma_len(rctx->in_sg) - cryp->total_in, + cryp->total_in); - rctx = ctx->rctx; + ctx->rctx = rctx; + + ret = starfive_aes_hw_init(ctx); + if (ret) + return ret; if (!cryp->assoclen) goto write_text; if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CCM) - cryp->err = starfive_aes_ccm_write_adata(ctx); + ret = starfive_aes_ccm_write_adata(ctx); else - cryp->err = starfive_aes_gcm_write_adata(ctx); + ret = starfive_aes_gcm_write_adata(ctx); kfree(rctx->adata); - if (cryp->err) - return cryp->err; + if (ret) + return ret; write_text: if (!cryp->total_in) goto finish_req; - /* - * Write first plain/ciphertext block to start the module - * then let irq tasklet handle the rest of the data blocks. - */ - scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE, - cryp->total_in), 0); - cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in); - - for (i = 0; i < AES_BLOCK_32; i++) - writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R); - - stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); - stat &= ~STARFIVE_IE_MASK_AES_DONE; - writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET); + starfive_aes_dma_init(cryp); - return 0; + ret = starfive_aes_map_sg(cryp, rctx->in_sg, rctx->out_sg); + if (ret) + return ret; finish_req: - starfive_aes_finish_req(cryp); + starfive_aes_finish_req(ctx); return 0; } -static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm) +static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm, + const char *alg_name) { struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm); - struct starfive_cryp_dev *cryp = ctx->cryp; - struct crypto_tfm *aead = crypto_aead_tfm(tfm); - struct crypto_alg *alg = aead->__crt_alg; ctx->cryp = starfive_cryp_find_dev(ctx); if (!ctx->cryp) return -ENODEV; - if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { - ctx->aead_fbk = crypto_alloc_aead(alg->cra_name, 0, - CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->aead_fbk)) - return dev_err_probe(cryp->dev, PTR_ERR(ctx->aead_fbk), - "%s() failed to allocate fallback for %s\n", - __func__, alg->cra_name); - } + ctx->aead_fbk = crypto_alloc_aead(alg_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->aead_fbk)) + return dev_err_probe(ctx->cryp->dev, PTR_ERR(ctx->aead_fbk), + "%s() failed to allocate fallback for %s\n", + __func__, alg_name); - crypto_aead_set_reqsize(tfm, sizeof(struct starfive_cryp_ctx) + - sizeof(struct aead_request)); + crypto_aead_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) + + crypto_aead_reqsize(ctx->aead_fbk)); return 0; } @@ -646,6 +745,46 @@ static void starfive_aes_aead_exit_tfm(struct crypto_aead *tfm) crypto_free_aead(ctx->aead_fbk); } +static bool starfive_aes_check_unaligned(struct starfive_cryp_dev *cryp, + struct scatterlist *src, + struct scatterlist *dst) +{ + struct scatterlist *tsg; + int i; + + for_each_sg(src, tsg, sg_nents(src), i) + if (!IS_ALIGNED(tsg->offset, sizeof(u32)) || + (!IS_ALIGNED(tsg->length, AES_BLOCK_SIZE) && + !sg_is_last(tsg))) + return true; + + if (src != dst) + for_each_sg(dst, tsg, sg_nents(dst), i) + if (!IS_ALIGNED(tsg->offset, sizeof(u32)) || + (!IS_ALIGNED(tsg->length, AES_BLOCK_SIZE) && + !sg_is_last(tsg))) + return true; + + return false; +} + +static int starfive_aes_do_fallback(struct skcipher_request *req, bool enc) +{ + struct starfive_cryp_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct skcipher_request *subreq = skcipher_request_ctx(req); + + skcipher_request_set_tfm(subreq, ctx->skcipher_fbk); + skcipher_request_set_callback(subreq, req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + + return enc ? crypto_skcipher_encrypt(subreq) : + crypto_skcipher_decrypt(subreq); +} + static int starfive_aes_crypt(struct skcipher_request *req, unsigned long flags) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); @@ -660,32 +799,54 @@ static int starfive_aes_crypt(struct skcipher_request *req, unsigned long flags) if (req->cryptlen & blocksize_align) return -EINVAL; + if (starfive_aes_check_unaligned(cryp, req->src, req->dst)) + return starfive_aes_do_fallback(req, is_encrypt(cryp)); + return crypto_transfer_skcipher_request_to_engine(cryp->engine, req); } +static int starfive_aes_aead_do_fallback(struct aead_request *req, bool enc) +{ + struct starfive_cryp_ctx *ctx = + crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct aead_request *subreq = aead_request_ctx(req); + + aead_request_set_tfm(subreq, ctx->aead_fbk); + aead_request_set_callback(subreq, req->base.flags, + req->base.complete, + req->base.data); + aead_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + aead_request_set_ad(subreq, req->assoclen); + + return enc ? crypto_aead_encrypt(subreq) : + crypto_aead_decrypt(subreq); +} + static int starfive_aes_aead_crypt(struct aead_request *req, unsigned long flags) { struct starfive_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct starfive_cryp_dev *cryp = ctx->cryp; + struct scatterlist *src, *dst, _src[2], _dst[2]; cryp->flags = flags; - /* - * HW engine could not perform CCM tag verification on - * non-blocksize aligned text, use fallback algo instead + /* aes-ccm does not support tag verification for non-aligned text, + * use fallback for ccm decryption instead. */ - if (ctx->aead_fbk && !is_encrypt(cryp)) { - struct aead_request *subreq = aead_request_ctx(req); + if (((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CCM) && + !is_encrypt(cryp)) + return starfive_aes_aead_do_fallback(req, 0); - aead_request_set_tfm(subreq, ctx->aead_fbk); - aead_request_set_callback(subreq, req->base.flags, - req->base.complete, req->base.data); - aead_request_set_crypt(subreq, req->src, - req->dst, req->cryptlen, req->iv); - aead_request_set_ad(subreq, req->assoclen); + src = scatterwalk_ffwd(_src, req->src, req->assoclen); - return crypto_aead_decrypt(subreq); - } + if (req->src == req->dst) + dst = src; + else + dst = scatterwalk_ffwd(_dst, req->dst, req->assoclen); + + if (starfive_aes_check_unaligned(cryp, src, dst)) + return starfive_aes_aead_do_fallback(req, is_encrypt(cryp)); return crypto_transfer_aead_request_to_engine(cryp->engine, req); } @@ -706,7 +867,7 @@ static int starfive_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, memcpy(ctx->key, key, keylen); ctx->keylen = keylen; - return 0; + return crypto_skcipher_setkey(ctx->skcipher_fbk, key, keylen); } static int starfive_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key, @@ -725,16 +886,20 @@ static int starfive_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key, memcpy(ctx->key, key, keylen); ctx->keylen = keylen; - if (ctx->aead_fbk) - return crypto_aead_setkey(ctx->aead_fbk, key, keylen); - - return 0; + return crypto_aead_setkey(ctx->aead_fbk, key, keylen); } static int starfive_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { - return crypto_gcm_check_authsize(authsize); + struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm); + int ret; + + ret = crypto_gcm_check_authsize(authsize); + if (ret) + return ret; + + return crypto_aead_setauthsize(ctx->aead_fbk, authsize); } static int starfive_aes_ccm_setauthsize(struct crypto_aead *tfm, @@ -820,9 +985,35 @@ static int starfive_aes_ccm_decrypt(struct aead_request *req) return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_CCM); } +static int starfive_aes_ecb_init_tfm(struct crypto_skcipher *tfm) +{ + return starfive_aes_init_tfm(tfm, "ecb(aes-generic)"); +} + +static int starfive_aes_cbc_init_tfm(struct crypto_skcipher *tfm) +{ + return starfive_aes_init_tfm(tfm, "cbc(aes-generic)"); +} + +static int starfive_aes_ctr_init_tfm(struct crypto_skcipher *tfm) +{ + return starfive_aes_init_tfm(tfm, "ctr(aes-generic)"); +} + +static int starfive_aes_ccm_init_tfm(struct crypto_aead *tfm) +{ + return starfive_aes_aead_init_tfm(tfm, "ccm_base(ctr(aes-generic),cbcmac(aes-generic))"); +} + +static int starfive_aes_gcm_init_tfm(struct crypto_aead *tfm) +{ + return starfive_aes_aead_init_tfm(tfm, "gcm_base(ctr(aes-generic),ghash-generic)"); +} + static struct skcipher_engine_alg skcipher_algs[] = { { - .base.init = starfive_aes_init_tfm, + .base.init = starfive_aes_ecb_init_tfm, + .base.exit = starfive_aes_exit_tfm, .base.setkey = starfive_aes_setkey, .base.encrypt = starfive_aes_ecb_encrypt, .base.decrypt = starfive_aes_ecb_decrypt, @@ -832,7 +1023,8 @@ static struct skcipher_engine_alg skcipher_algs[] = { .cra_name = "ecb(aes)", .cra_driver_name = "starfive-ecb-aes", .cra_priority = 200, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 0xf, @@ -842,7 +1034,8 @@ static struct skcipher_engine_alg skcipher_algs[] = { .do_one_request = starfive_aes_do_one_req, }, }, { - .base.init = starfive_aes_init_tfm, + .base.init = starfive_aes_cbc_init_tfm, + .base.exit = starfive_aes_exit_tfm, .base.setkey = starfive_aes_setkey, .base.encrypt = starfive_aes_cbc_encrypt, .base.decrypt = starfive_aes_cbc_decrypt, @@ -853,7 +1046,8 @@ static struct skcipher_engine_alg skcipher_algs[] = { .cra_name = "cbc(aes)", .cra_driver_name = "starfive-cbc-aes", .cra_priority = 200, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 0xf, @@ -863,7 +1057,8 @@ static struct skcipher_engine_alg skcipher_algs[] = { .do_one_request = starfive_aes_do_one_req, }, }, { - .base.init = starfive_aes_init_tfm, + .base.init = starfive_aes_ctr_init_tfm, + .base.exit = starfive_aes_exit_tfm, .base.setkey = starfive_aes_setkey, .base.encrypt = starfive_aes_ctr_encrypt, .base.decrypt = starfive_aes_ctr_decrypt, @@ -874,7 +1069,8 @@ static struct skcipher_engine_alg skcipher_algs[] = { .cra_name = "ctr(aes)", .cra_driver_name = "starfive-ctr-aes", .cra_priority = 200, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 0xf, @@ -892,7 +1088,7 @@ static struct aead_engine_alg aead_algs[] = { .base.setauthsize = starfive_aes_gcm_setauthsize, .base.encrypt = starfive_aes_gcm_encrypt, .base.decrypt = starfive_aes_gcm_decrypt, - .base.init = starfive_aes_aead_init_tfm, + .base.init = starfive_aes_gcm_init_tfm, .base.exit = starfive_aes_aead_exit_tfm, .base.ivsize = GCM_AES_IV_SIZE, .base.maxauthsize = AES_BLOCK_SIZE, @@ -900,7 +1096,8 @@ static struct aead_engine_alg aead_algs[] = { .cra_name = "gcm(aes)", .cra_driver_name = "starfive-gcm-aes", .cra_priority = 200, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 0xf, @@ -914,7 +1111,7 @@ static struct aead_engine_alg aead_algs[] = { .base.setauthsize = starfive_aes_ccm_setauthsize, .base.encrypt = starfive_aes_ccm_encrypt, .base.decrypt = starfive_aes_ccm_decrypt, - .base.init = starfive_aes_aead_init_tfm, + .base.init = starfive_aes_ccm_init_tfm, .base.exit = starfive_aes_aead_exit_tfm, .base.ivsize = AES_BLOCK_SIZE, .base.maxauthsize = AES_BLOCK_SIZE, diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c index 425fddf3a8..e4dfed7ee0 100644 --- a/drivers/crypto/starfive/jh7110-cryp.c +++ b/drivers/crypto/starfive/jh7110-cryp.c @@ -89,34 +89,10 @@ static void starfive_dma_cleanup(struct starfive_cryp_dev *cryp) dma_release_channel(cryp->rx); } -static irqreturn_t starfive_cryp_irq(int irq, void *priv) -{ - u32 status; - u32 mask; - struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)priv; - - mask = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); - status = readl(cryp->base + STARFIVE_IE_FLAG_OFFSET); - if (status & STARFIVE_IE_FLAG_AES_DONE) { - mask |= STARFIVE_IE_MASK_AES_DONE; - writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET); - tasklet_schedule(&cryp->aes_done); - } - - if (status & STARFIVE_IE_FLAG_HASH_DONE) { - mask |= STARFIVE_IE_MASK_HASH_DONE; - writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET); - tasklet_schedule(&cryp->hash_done); - } - - return IRQ_HANDLED; -} - static int starfive_cryp_probe(struct platform_device *pdev) { struct starfive_cryp_dev *cryp; struct resource *res; - int irq; int ret; cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL); @@ -131,9 +107,6 @@ static int starfive_cryp_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, PTR_ERR(cryp->base), "Error remapping memory for platform device\n"); - tasklet_init(&cryp->aes_done, starfive_aes_done_task, (unsigned long)cryp); - tasklet_init(&cryp->hash_done, starfive_hash_done_task, (unsigned long)cryp); - cryp->phys_base = res->start; cryp->dma_maxburst = 32; cryp->side_chan = side_chan; @@ -153,16 +126,6 @@ static int starfive_cryp_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, PTR_ERR(cryp->rst), "Error getting hardware reset line\n"); - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - ret = devm_request_irq(&pdev->dev, irq, starfive_cryp_irq, 0, pdev->name, - (void *)cryp); - if (ret) - return dev_err_probe(&pdev->dev, ret, - "Failed to register interrupt handler\n"); - clk_prepare_enable(cryp->hclk); clk_prepare_enable(cryp->ahb); reset_control_deassert(cryp->rst); @@ -219,9 +182,6 @@ err_dma_init: clk_disable_unprepare(cryp->ahb); reset_control_assert(cryp->rst); - tasklet_kill(&cryp->aes_done); - tasklet_kill(&cryp->hash_done); - return ret; } @@ -233,9 +193,6 @@ static void starfive_cryp_remove(struct platform_device *pdev) starfive_hash_unregister_algs(); starfive_rsa_unregister_algs(); - tasklet_kill(&cryp->aes_done); - tasklet_kill(&cryp->hash_done); - crypto_engine_stop(cryp->engine); crypto_engine_exit(cryp->engine); diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h index 6cdf6db5d9..494a74f527 100644 --- a/drivers/crypto/starfive/jh7110-cryp.h +++ b/drivers/crypto/starfive/jh7110-cryp.h @@ -91,6 +91,7 @@ union starfive_hash_csr { #define STARFIVE_HASH_KEY_DONE BIT(13) u32 key_done :1; u32 key_flag :1; +#define STARFIVE_HASH_HMAC_DONE BIT(15) u32 hmac_done :1; #define STARFIVE_HASH_BUSY BIT(16) u32 busy :1; @@ -168,6 +169,7 @@ struct starfive_cryp_ctx { struct crypto_akcipher *akcipher_fbk; struct crypto_ahash *ahash_fbk; struct crypto_aead *aead_fbk; + struct crypto_skcipher *skcipher_fbk; }; struct starfive_cryp_dev { @@ -185,11 +187,8 @@ struct starfive_cryp_dev { struct dma_chan *rx; struct dma_slave_config cfg_in; struct dma_slave_config cfg_out; - struct scatter_walk in_walk; - struct scatter_walk out_walk; struct crypto_engine *engine; - struct tasklet_struct aes_done; - struct tasklet_struct hash_done; + struct completion dma_done; size_t assoclen; size_t total_in; size_t total_out; @@ -236,7 +235,4 @@ void starfive_rsa_unregister_algs(void); int starfive_aes_register_algs(void); void starfive_aes_unregister_algs(void); - -void starfive_hash_done_task(unsigned long param); -void starfive_aes_done_task(unsigned long param); #endif diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c index b6d1808012..2c60a1047b 100644 --- a/drivers/crypto/starfive/jh7110-hash.c +++ b/drivers/crypto/starfive/jh7110-hash.c @@ -36,15 +36,22 @@ #define STARFIVE_HASH_BUFLEN SHA512_BLOCK_SIZE #define STARFIVE_HASH_RESET 0x2 -static inline int starfive_hash_wait_busy(struct starfive_cryp_ctx *ctx) +static inline int starfive_hash_wait_busy(struct starfive_cryp_dev *cryp) { - struct starfive_cryp_dev *cryp = ctx->cryp; u32 status; return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status, !(status & STARFIVE_HASH_BUSY), 10, 100000); } +static inline int starfive_hash_wait_hmac_done(struct starfive_cryp_dev *cryp) +{ + u32 status; + + return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status, + (status & STARFIVE_HASH_HMAC_DONE), 10, 100000); +} + static inline int starfive_hash_wait_key_done(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_dev *cryp = ctx->cryp; @@ -84,64 +91,26 @@ static int starfive_hash_hmac_key(struct starfive_cryp_ctx *ctx) return 0; } -static void starfive_hash_start(void *param) +static void starfive_hash_start(struct starfive_cryp_dev *cryp) { - struct starfive_cryp_ctx *ctx = param; - struct starfive_cryp_request_ctx *rctx = ctx->rctx; - struct starfive_cryp_dev *cryp = ctx->cryp; - union starfive_alg_cr alg_cr; union starfive_hash_csr csr; - u32 stat; - - dma_unmap_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE); - - alg_cr.v = 0; - alg_cr.clear = 1; - - writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET); csr.v = readl(cryp->base + STARFIVE_HASH_SHACSR); csr.firstb = 0; csr.final = 1; - - stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); - stat &= ~STARFIVE_IE_MASK_HASH_DONE; - writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET); writel(csr.v, cryp->base + STARFIVE_HASH_SHACSR); } -static int starfive_hash_xmit_dma(struct starfive_cryp_ctx *ctx) +static void starfive_hash_dma_callback(void *param) { - struct starfive_cryp_request_ctx *rctx = ctx->rctx; - struct starfive_cryp_dev *cryp = ctx->cryp; - struct dma_async_tx_descriptor *in_desc; - union starfive_alg_cr alg_cr; - int total_len; - int ret; - - if (!rctx->total) { - starfive_hash_start(ctx); - return 0; - } + struct starfive_cryp_dev *cryp = param; - writel(rctx->total, cryp->base + STARFIVE_DMA_IN_LEN_OFFSET); - - total_len = rctx->total; - total_len = (total_len & 0x3) ? (((total_len >> 2) + 1) << 2) : total_len; - sg_dma_len(rctx->in_sg) = total_len; - - alg_cr.v = 0; - alg_cr.start = 1; - alg_cr.hash_dma_en = 1; - - writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET); - - ret = dma_map_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE); - if (!ret) - return dev_err_probe(cryp->dev, -EINVAL, "dma_map_sg() error\n"); + complete(&cryp->dma_done); +} - cryp->cfg_in.direction = DMA_MEM_TO_DEV; - cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; +static void starfive_hash_dma_init(struct starfive_cryp_dev *cryp) +{ + cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES; cryp->cfg_in.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; cryp->cfg_in.src_maxburst = cryp->dma_maxburst; cryp->cfg_in.dst_maxburst = cryp->dma_maxburst; @@ -149,50 +118,48 @@ static int starfive_hash_xmit_dma(struct starfive_cryp_ctx *ctx) dmaengine_slave_config(cryp->tx, &cryp->cfg_in); - in_desc = dmaengine_prep_slave_sg(cryp->tx, rctx->in_sg, - ret, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - - if (!in_desc) - return -EINVAL; - - in_desc->callback = starfive_hash_start; - in_desc->callback_param = ctx; - - dmaengine_submit(in_desc); - dma_async_issue_pending(cryp->tx); - - return 0; + init_completion(&cryp->dma_done); } -static int starfive_hash_xmit(struct starfive_cryp_ctx *ctx) +static int starfive_hash_dma_xfer(struct starfive_cryp_dev *cryp, + struct scatterlist *sg) { - struct starfive_cryp_request_ctx *rctx = ctx->rctx; - struct starfive_cryp_dev *cryp = ctx->cryp; + struct dma_async_tx_descriptor *in_desc; + union starfive_alg_cr alg_cr; int ret = 0; - rctx->csr.hash.v = 0; - rctx->csr.hash.reset = 1; - writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR); - - if (starfive_hash_wait_busy(ctx)) - return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error resetting engine.\n"); + alg_cr.v = 0; + alg_cr.start = 1; + alg_cr.hash_dma_en = 1; + writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET); - rctx->csr.hash.v = 0; - rctx->csr.hash.mode = ctx->hash_mode; - rctx->csr.hash.ie = 1; + writel(sg_dma_len(sg), cryp->base + STARFIVE_DMA_IN_LEN_OFFSET); + sg_dma_len(sg) = ALIGN(sg_dma_len(sg), sizeof(u32)); - if (ctx->is_hmac) { - ret = starfive_hash_hmac_key(ctx); - if (ret) - return ret; - } else { - rctx->csr.hash.start = 1; - rctx->csr.hash.firstb = 1; - writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR); + in_desc = dmaengine_prep_slave_sg(cryp->tx, sg, 1, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!in_desc) { + ret = -EINVAL; + goto end; } - return starfive_hash_xmit_dma(ctx); + reinit_completion(&cryp->dma_done); + in_desc->callback = starfive_hash_dma_callback; + in_desc->callback_param = cryp; + + dmaengine_submit(in_desc); + dma_async_issue_pending(cryp->tx); + + if (!wait_for_completion_timeout(&cryp->dma_done, + msecs_to_jiffies(1000))) + ret = -ETIMEDOUT; + +end: + alg_cr.v = 0; + alg_cr.clear = 1; + writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET); + + return ret; } static int starfive_hash_copy_hash(struct ahash_request *req) @@ -215,58 +182,74 @@ static int starfive_hash_copy_hash(struct ahash_request *req) return 0; } -void starfive_hash_done_task(unsigned long param) +static void starfive_hash_done_task(struct starfive_cryp_dev *cryp) { - struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param; int err = cryp->err; if (!err) err = starfive_hash_copy_hash(cryp->req.hreq); - /* Reset to clear hash_done in irq register*/ - writel(STARFIVE_HASH_RESET, cryp->base + STARFIVE_HASH_SHACSR); - crypto_finalize_hash_request(cryp->engine, cryp->req.hreq, err); } -static int starfive_hash_check_aligned(struct scatterlist *sg, size_t total, size_t align) +static int starfive_hash_one_request(struct crypto_engine *engine, void *areq) { - int len = 0; + struct ahash_request *req = container_of(areq, struct ahash_request, + base); + struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct starfive_cryp_request_ctx *rctx = ctx->rctx; + struct starfive_cryp_dev *cryp = ctx->cryp; + struct scatterlist *tsg; + int ret, src_nents, i; - if (!total) - return 0; + writel(STARFIVE_HASH_RESET, cryp->base + STARFIVE_HASH_SHACSR); - if (!IS_ALIGNED(total, align)) - return -EINVAL; + if (starfive_hash_wait_busy(cryp)) + return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error resetting hardware\n"); - while (sg) { - if (!IS_ALIGNED(sg->offset, sizeof(u32))) - return -EINVAL; + rctx->csr.hash.v = 0; + rctx->csr.hash.mode = ctx->hash_mode; - if (!IS_ALIGNED(sg->length, align)) - return -EINVAL; + if (ctx->is_hmac) { + ret = starfive_hash_hmac_key(ctx); + if (ret) + return ret; + } else { + rctx->csr.hash.start = 1; + rctx->csr.hash.firstb = 1; + writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR); + } + + /* No input message, get digest and end. */ + if (!rctx->total) + goto hash_start; + + starfive_hash_dma_init(cryp); + + for_each_sg(rctx->in_sg, tsg, rctx->in_sg_len, i) { + src_nents = dma_map_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE); + if (src_nents == 0) + return dev_err_probe(cryp->dev, -ENOMEM, + "dma_map_sg error\n"); - len += sg->length; - sg = sg_next(sg); + ret = starfive_hash_dma_xfer(cryp, tsg); + dma_unmap_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE); + if (ret) + return ret; } - if (len != total) - return -EINVAL; +hash_start: + starfive_hash_start(cryp); - return 0; -} + if (starfive_hash_wait_busy(cryp)) + return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error generating digest\n"); -static int starfive_hash_one_request(struct crypto_engine *engine, void *areq) -{ - struct ahash_request *req = container_of(areq, struct ahash_request, - base); - struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct starfive_cryp_dev *cryp = ctx->cryp; + if (ctx->is_hmac) + cryp->err = starfive_hash_wait_hmac_done(cryp); - if (!cryp) - return -ENODEV; + starfive_hash_done_task(cryp); - return starfive_hash_xmit(ctx); + return 0; } static int starfive_hash_init(struct ahash_request *req) @@ -337,22 +320,6 @@ static int starfive_hash_finup(struct ahash_request *req) return crypto_ahash_finup(&rctx->ahash_fbk_req); } -static int starfive_hash_digest_fb(struct ahash_request *req) -{ - struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); - - ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk); - ahash_request_set_callback(&rctx->ahash_fbk_req, req->base.flags, - req->base.complete, req->base.data); - - ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src, - req->result, req->nbytes); - - return crypto_ahash_digest(&rctx->ahash_fbk_req); -} - static int starfive_hash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -370,9 +337,6 @@ static int starfive_hash_digest(struct ahash_request *req) rctx->in_sg_len = sg_nents_for_len(rctx->in_sg, rctx->total); ctx->rctx = rctx; - if (starfive_hash_check_aligned(rctx->in_sg, rctx->total, rctx->blksize)) - return starfive_hash_digest_fb(req); - return crypto_transfer_hash_request_to_engine(cryp->engine, req); } @@ -406,7 +370,8 @@ static int starfive_hash_import(struct ahash_request *req, const void *in) static int starfive_hash_init_tfm(struct crypto_ahash *hash, const char *alg_name, - unsigned int mode) + unsigned int mode, + bool is_hmac) { struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); @@ -426,7 +391,7 @@ static int starfive_hash_init_tfm(struct crypto_ahash *hash, crypto_ahash_set_reqsize(hash, sizeof(struct starfive_cryp_request_ctx) + crypto_ahash_reqsize(ctx->ahash_fbk)); - ctx->keylen = 0; + ctx->is_hmac = is_hmac; ctx->hash_mode = mode; return 0; @@ -529,81 +494,61 @@ static int starfive_hash_setkey(struct crypto_ahash *hash, static int starfive_sha224_init_tfm(struct crypto_ahash *hash) { return starfive_hash_init_tfm(hash, "sha224-generic", - STARFIVE_HASH_SHA224); + STARFIVE_HASH_SHA224, 0); } static int starfive_sha256_init_tfm(struct crypto_ahash *hash) { return starfive_hash_init_tfm(hash, "sha256-generic", - STARFIVE_HASH_SHA256); + STARFIVE_HASH_SHA256, 0); } static int starfive_sha384_init_tfm(struct crypto_ahash *hash) { return starfive_hash_init_tfm(hash, "sha384-generic", - STARFIVE_HASH_SHA384); + STARFIVE_HASH_SHA384, 0); } static int starfive_sha512_init_tfm(struct crypto_ahash *hash) { return starfive_hash_init_tfm(hash, "sha512-generic", - STARFIVE_HASH_SHA512); + STARFIVE_HASH_SHA512, 0); } static int starfive_sm3_init_tfm(struct crypto_ahash *hash) { return starfive_hash_init_tfm(hash, "sm3-generic", - STARFIVE_HASH_SM3); + STARFIVE_HASH_SM3, 0); } static int starfive_hmac_sha224_init_tfm(struct crypto_ahash *hash) { - struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); - - ctx->is_hmac = true; - return starfive_hash_init_tfm(hash, "hmac(sha224-generic)", - STARFIVE_HASH_SHA224); + STARFIVE_HASH_SHA224, 1); } static int starfive_hmac_sha256_init_tfm(struct crypto_ahash *hash) { - struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); - - ctx->is_hmac = true; - return starfive_hash_init_tfm(hash, "hmac(sha256-generic)", - STARFIVE_HASH_SHA256); + STARFIVE_HASH_SHA256, 1); } static int starfive_hmac_sha384_init_tfm(struct crypto_ahash *hash) { - struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); - - ctx->is_hmac = true; - return starfive_hash_init_tfm(hash, "hmac(sha384-generic)", - STARFIVE_HASH_SHA384); + STARFIVE_HASH_SHA384, 1); } static int starfive_hmac_sha512_init_tfm(struct crypto_ahash *hash) { - struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); - - ctx->is_hmac = true; - return starfive_hash_init_tfm(hash, "hmac(sha512-generic)", - STARFIVE_HASH_SHA512); + STARFIVE_HASH_SHA512, 1); } static int starfive_hmac_sm3_init_tfm(struct crypto_ahash *hash) { - struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); - - ctx->is_hmac = true; - return starfive_hash_init_tfm(hash, "hmac(sm3-generic)", - STARFIVE_HASH_SM3); + STARFIVE_HASH_SM3, 1); } static struct ahash_engine_alg algs_sha2_sm3[] = { diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c index 7ec14b5b84..33093ba4b1 100644 --- a/drivers/crypto/starfive/jh7110-rsa.c +++ b/drivers/crypto/starfive/jh7110-rsa.c @@ -45,6 +45,9 @@ static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx) static void starfive_rsa_free_key(struct starfive_rsa_key *key) { + if (!key->key_sz) + return; + kfree_sensitive(key->d); kfree_sensitive(key->e); kfree_sensitive(key->n); @@ -533,16 +536,14 @@ static int starfive_rsa_init_tfm(struct crypto_akcipher *tfm) { struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); + ctx->cryp = starfive_cryp_find_dev(ctx); + if (!ctx->cryp) + return -ENODEV; + ctx->akcipher_fbk = crypto_alloc_akcipher("rsa-generic", 0, 0); if (IS_ERR(ctx->akcipher_fbk)) return PTR_ERR(ctx->akcipher_fbk); - ctx->cryp = starfive_cryp_find_dev(ctx); - if (!ctx->cryp) { - crypto_free_akcipher(ctx->akcipher_fbk); - return -ENODEV; - } - akcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) + sizeof(struct crypto_akcipher) + 32); diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 34e0d7e381..351827372e 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -94,6 +94,7 @@ #define HASH_FLAGS_ERRORS BIT(21) #define HASH_FLAGS_EMPTY BIT(22) #define HASH_FLAGS_HMAC BIT(23) +#define HASH_FLAGS_SGS_COPIED BIT(24) #define HASH_OP_UPDATE 1 #define HASH_OP_FINAL 2 @@ -145,7 +146,7 @@ struct stm32_hash_state { u16 bufcnt; u16 blocklen; - u8 buffer[HASH_BUFLEN] __aligned(4); + u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32)); /* hash state */ u32 hw_context[3 + HASH_CSR_NB_MAX]; @@ -158,8 +159,8 @@ struct stm32_hash_request_ctx { u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32)); size_t digcnt; - /* DMA */ struct scatterlist *sg; + struct scatterlist sgl[2]; /* scatterlist used to realize alignment */ unsigned int offset; unsigned int total; struct scatterlist sg_key; @@ -184,6 +185,7 @@ struct stm32_hash_pdata { size_t algs_info_size; bool has_sr; bool has_mdmat; + bool context_secured; bool broken_emptymsg; bool ux500; }; @@ -195,6 +197,7 @@ struct stm32_hash_dev { struct reset_control *rst; void __iomem *io_base; phys_addr_t phys_base; + u8 xmit_buf[HASH_BUFLEN] __aligned(sizeof(u32)); u32 dma_mode; bool polled; @@ -220,6 +223,8 @@ static struct stm32_hash_drv stm32_hash = { }; static void stm32_hash_dma_callback(void *param); +static int stm32_hash_prepare_request(struct ahash_request *req); +static void stm32_hash_unprepare_request(struct ahash_request *req); static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset) { @@ -232,6 +237,11 @@ static inline void stm32_hash_write(struct stm32_hash_dev *hdev, writel_relaxed(value, hdev->io_base + offset); } +/** + * stm32_hash_wait_busy - wait until hash processor is available. It return an + * error if the hash core is processing a block of data for more than 10 ms. + * @hdev: the stm32_hash_dev device. + */ static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev) { u32 status; @@ -245,6 +255,11 @@ static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev) !(status & HASH_SR_BUSY), 10, 10000); } +/** + * stm32_hash_set_nblw - set the number of valid bytes in the last word. + * @hdev: the stm32_hash_dev device. + * @length: the length of the final word. + */ static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length) { u32 reg; @@ -282,6 +297,11 @@ static int stm32_hash_write_key(struct stm32_hash_dev *hdev) return 0; } +/** + * stm32_hash_write_ctrl - Initialize the hash processor, only if + * HASH_FLAGS_INIT is set. + * @hdev: the stm32_hash_dev device + */ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); @@ -469,9 +489,7 @@ static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); struct stm32_hash_state *state = &rctx->state; - u32 *preg = state->hw_context; int bufcnt, err = 0, final; - int i, swap_reg; dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags); @@ -495,34 +513,23 @@ static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev) return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1); } - if (!(hdev->flags & HASH_FLAGS_INIT)) - return 0; - - if (stm32_hash_wait_busy(hdev)) - return -ETIMEDOUT; - - swap_reg = hash_swap_reg(rctx); - - if (!hdev->pdata->ux500) - *preg++ = stm32_hash_read(hdev, HASH_IMR); - *preg++ = stm32_hash_read(hdev, HASH_STR); - *preg++ = stm32_hash_read(hdev, HASH_CR); - for (i = 0; i < swap_reg; i++) - *preg++ = stm32_hash_read(hdev, HASH_CSR(i)); - - state->flags |= HASH_FLAGS_INIT; - return err; } static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev, - struct scatterlist *sg, int length, int mdma) + struct scatterlist *sg, int length, int mdmat) { struct dma_async_tx_descriptor *in_desc; dma_cookie_t cookie; u32 reg; int err; + dev_dbg(hdev->dev, "%s mdmat: %x length: %d\n", __func__, mdmat, length); + + /* do not use dma if there is no data to send */ + if (length <= 0) + return 0; + in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); @@ -535,13 +542,12 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev, in_desc->callback = stm32_hash_dma_callback; in_desc->callback_param = hdev; - hdev->flags |= HASH_FLAGS_FINAL; hdev->flags |= HASH_FLAGS_DMA_ACTIVE; reg = stm32_hash_read(hdev, HASH_CR); if (hdev->pdata->has_mdmat) { - if (mdma) + if (mdmat) reg |= HASH_CR_MDMAT; else reg &= ~HASH_CR_MDMAT; @@ -550,7 +556,6 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev, stm32_hash_write(hdev, HASH_CR, reg); - stm32_hash_set_nblw(hdev, length); cookie = dmaengine_submit(in_desc); err = dma_submit_error(cookie); @@ -590,7 +595,7 @@ static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev) struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); int err; - if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode == 1) { + if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode > 0) { err = stm32_hash_write_key(hdev); if (stm32_hash_wait_busy(hdev)) return -ETIMEDOUT; @@ -655,18 +660,20 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) struct scatterlist sg[1], *tsg; int err = 0, reg, ncp = 0; unsigned int i, len = 0, bufcnt = 0; + bool final = hdev->flags & HASH_FLAGS_FINAL; bool is_last = false; + u32 last_word; - rctx->sg = hdev->req->src; - rctx->total = hdev->req->nbytes; + dev_dbg(hdev->dev, "%s total: %d bufcnt: %d final: %d\n", + __func__, rctx->total, rctx->state.bufcnt, final); - rctx->nents = sg_nents(rctx->sg); if (rctx->nents < 0) return -EINVAL; stm32_hash_write_ctrl(hdev); - if (hdev->flags & HASH_FLAGS_HMAC) { + if (hdev->flags & HASH_FLAGS_HMAC && (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) { + hdev->flags |= HASH_FLAGS_HMAC_KEY; err = stm32_hash_hmac_dma_send(hdev); if (err != -EINPROGRESS) return err; @@ -677,22 +684,36 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) len = sg->length; if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) { - sg->length = rctx->total - bufcnt; - is_last = true; - if (hdev->dma_mode == 1) { - len = (ALIGN(sg->length, 16) - 16); - - ncp = sg_pcopy_to_buffer( - rctx->sg, rctx->nents, - rctx->state.buffer, sg->length - len, - rctx->total - sg->length + len); - - sg->length = len; + if (!final) { + /* Always manually put the last word of a non-final transfer. */ + len -= sizeof(u32); + sg_pcopy_to_buffer(rctx->sg, rctx->nents, &last_word, 4, len); + sg->length -= sizeof(u32); } else { - if (!(IS_ALIGNED(sg->length, sizeof(u32)))) { - len = sg->length; - sg->length = ALIGN(sg->length, - sizeof(u32)); + /* + * In Multiple DMA mode, DMA must be aborted before the final + * transfer. + */ + sg->length = rctx->total - bufcnt; + if (hdev->dma_mode > 0) { + len = (ALIGN(sg->length, 16) - 16); + + ncp = sg_pcopy_to_buffer(rctx->sg, rctx->nents, + rctx->state.buffer, + sg->length - len, + rctx->total - sg->length + len); + + if (!len) + break; + + sg->length = len; + } else { + is_last = true; + if (!(IS_ALIGNED(sg->length, sizeof(u32)))) { + len = sg->length; + sg->length = ALIGN(sg->length, + sizeof(u32)); + } } } } @@ -706,43 +727,67 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) err = stm32_hash_xmit_dma(hdev, sg, len, !is_last); + /* The last word of a non final transfer is sent manually. */ + if (!final) { + stm32_hash_write(hdev, HASH_DIN, last_word); + len += sizeof(u32); + } + + rctx->total -= len; + bufcnt += sg[0].length; dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE); - if (err == -ENOMEM) + if (err == -ENOMEM || err == -ETIMEDOUT) return err; if (is_last) break; } - if (hdev->dma_mode == 1) { - if (stm32_hash_wait_busy(hdev)) - return -ETIMEDOUT; - reg = stm32_hash_read(hdev, HASH_CR); - reg &= ~HASH_CR_DMAE; - reg |= HASH_CR_DMAA; - stm32_hash_write(hdev, HASH_CR, reg); + /* + * When the second last block transfer of 4 words is performed by the DMA, + * the software must set the DMA Abort bit (DMAA) to 1 before completing the + * last transfer of 4 words or less. + */ + if (final) { + if (hdev->dma_mode > 0) { + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + reg = stm32_hash_read(hdev, HASH_CR); + reg &= ~HASH_CR_DMAE; + reg |= HASH_CR_DMAA; + stm32_hash_write(hdev, HASH_CR, reg); + + if (ncp) { + memset(buffer + ncp, 0, 4 - DIV_ROUND_UP(ncp, sizeof(u32))); + writesl(hdev->io_base + HASH_DIN, buffer, + DIV_ROUND_UP(ncp, sizeof(u32))); + } - if (ncp) { - memset(buffer + ncp, 0, - DIV_ROUND_UP(ncp, sizeof(u32)) - ncp); - writesl(hdev->io_base + HASH_DIN, buffer, - DIV_ROUND_UP(ncp, sizeof(u32))); + stm32_hash_set_nblw(hdev, ncp); + reg = stm32_hash_read(hdev, HASH_STR); + reg |= HASH_STR_DCAL; + stm32_hash_write(hdev, HASH_STR, reg); + err = -EINPROGRESS; } - stm32_hash_set_nblw(hdev, ncp); - reg = stm32_hash_read(hdev, HASH_STR); - reg |= HASH_STR_DCAL; - stm32_hash_write(hdev, HASH_STR, reg); - err = -EINPROGRESS; - } - if (hdev->flags & HASH_FLAGS_HMAC) { - if (stm32_hash_wait_busy(hdev)) - return -ETIMEDOUT; - err = stm32_hash_hmac_dma_send(hdev); + /* + * The hash processor needs the key to be loaded a second time in order + * to process the HMAC. + */ + if (hdev->flags & HASH_FLAGS_HMAC) { + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + err = stm32_hash_hmac_dma_send(hdev); + } + + return err; } - return err; + if (err != -EINPROGRESS) + return err; + + return 0; } static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx) @@ -765,33 +810,6 @@ static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx) return hdev; } -static bool stm32_hash_dma_aligned_data(struct ahash_request *req) -{ - struct scatterlist *sg; - struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); - struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); - int i; - - if (!hdev->dma_lch || req->nbytes <= rctx->state.blocklen) - return false; - - if (sg_nents(req->src) > 1) { - if (hdev->dma_mode == 1) - return false; - for_each_sg(req->src, sg, sg_nents(req->src), i) { - if ((!IS_ALIGNED(sg->length, sizeof(u32))) && - (!sg_is_last(sg))) - return false; - } - } - - if (req->src->offset % 4) - return false; - - return true; -} - static int stm32_hash_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -802,8 +820,10 @@ static int stm32_hash_init(struct ahash_request *req) bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE; rctx->hdev = hdev; + state->flags = 0; - state->flags = HASH_FLAGS_CPU; + if (!(hdev->dma_lch && hdev->pdata->has_mdmat)) + state->flags |= HASH_FLAGS_CPU; if (sha3_mode) state->flags |= HASH_FLAGS_SHA3_MODE; @@ -857,6 +877,7 @@ static int stm32_hash_init(struct ahash_request *req) dev_err(hdev->dev, "Error, block too large"); return -EINVAL; } + rctx->nents = 0; rctx->total = 0; rctx->offset = 0; rctx->data_type = HASH_DATA_8_BITS; @@ -874,6 +895,9 @@ static int stm32_hash_update_req(struct stm32_hash_dev *hdev) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); struct stm32_hash_state *state = &rctx->state; + dev_dbg(hdev->dev, "update_req: total: %u, digcnt: %zd, final: 0", + rctx->total, rctx->digcnt); + if (!(state->flags & HASH_FLAGS_CPU)) return stm32_hash_dma_send(hdev); @@ -887,6 +911,11 @@ static int stm32_hash_final_req(struct stm32_hash_dev *hdev) struct stm32_hash_state *state = &rctx->state; int buflen = state->bufcnt; + if (!(state->flags & HASH_FLAGS_CPU)) { + hdev->flags |= HASH_FLAGS_FINAL; + return stm32_hash_dma_send(hdev); + } + if (state->flags & HASH_FLAGS_FINUP) return stm32_hash_update_req(hdev); @@ -968,15 +997,21 @@ static int stm32_hash_finish(struct ahash_request *req) static void stm32_hash_finish_req(struct ahash_request *req, int err) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_state *state = &rctx->state; struct stm32_hash_dev *hdev = rctx->hdev; + if (hdev->flags & HASH_FLAGS_DMA_ACTIVE) + state->flags |= HASH_FLAGS_DMA_ACTIVE; + else + state->flags &= ~HASH_FLAGS_DMA_ACTIVE; + if (!err && (HASH_FLAGS_FINAL & hdev->flags)) { stm32_hash_copy_hash(req); err = stm32_hash_finish(req); } - pm_runtime_mark_last_busy(hdev->dev); - pm_runtime_put_autosuspend(hdev->dev); + /* Finalized request mist be unprepared here */ + stm32_hash_unprepare_request(req); crypto_finalize_hash_request(hdev->engine, req, err); } @@ -1006,6 +1041,10 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq) pm_runtime_get_sync(hdev->dev); + err = stm32_hash_prepare_request(req); + if (err) + return err; + hdev->req = req; hdev->flags = 0; swap_reg = hash_swap_reg(rctx); @@ -1030,6 +1069,12 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq) if (state->flags & HASH_FLAGS_HMAC) hdev->flags |= HASH_FLAGS_HMAC | HASH_FLAGS_HMAC_KEY; + + if (state->flags & HASH_FLAGS_CPU) + hdev->flags |= HASH_FLAGS_CPU; + + if (state->flags & HASH_FLAGS_DMA_ACTIVE) + hdev->flags |= HASH_FLAGS_DMA_ACTIVE; } if (rctx->op == HASH_OP_UPDATE) @@ -1054,6 +1099,284 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq) return 0; } +static int stm32_hash_copy_sgs(struct stm32_hash_request_ctx *rctx, + struct scatterlist *sg, int bs, + unsigned int new_len) +{ + struct stm32_hash_state *state = &rctx->state; + int pages; + void *buf; + + pages = get_order(new_len); + + buf = (void *)__get_free_pages(GFP_ATOMIC, pages); + if (!buf) { + pr_err("Couldn't allocate pages for unaligned cases.\n"); + return -ENOMEM; + } + + if (state->bufcnt) + memcpy(buf, rctx->hdev->xmit_buf, state->bufcnt); + + scatterwalk_map_and_copy(buf + state->bufcnt, sg, rctx->offset, + min(new_len, rctx->total) - state->bufcnt, 0); + sg_init_table(rctx->sgl, 1); + sg_set_buf(rctx->sgl, buf, new_len); + rctx->sg = rctx->sgl; + state->flags |= HASH_FLAGS_SGS_COPIED; + rctx->nents = 1; + rctx->offset += new_len - state->bufcnt; + state->bufcnt = 0; + rctx->total = new_len; + + return 0; +} + +static int stm32_hash_align_sgs(struct scatterlist *sg, + int nbytes, int bs, bool init, bool final, + struct stm32_hash_request_ctx *rctx) +{ + struct stm32_hash_state *state = &rctx->state; + struct stm32_hash_dev *hdev = rctx->hdev; + struct scatterlist *sg_tmp = sg; + int offset = rctx->offset; + int new_len; + int n = 0; + int bufcnt = state->bufcnt; + bool secure_ctx = hdev->pdata->context_secured; + bool aligned = true; + + if (!sg || !sg->length || !nbytes) { + if (bufcnt) { + bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs; + sg_init_table(rctx->sgl, 1); + sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, bufcnt); + rctx->sg = rctx->sgl; + rctx->nents = 1; + } + + return 0; + } + + new_len = nbytes; + + if (offset) + aligned = false; + + if (final) { + new_len = DIV_ROUND_UP(new_len, bs) * bs; + } else { + new_len = (new_len - 1) / bs * bs; // return n block - 1 block + + /* + * Context save in some version of HASH IP can only be done when the + * FIFO is ready to get a new block. This implies to send n block plus a + * 32 bit word in the first DMA send. + */ + if (init && secure_ctx) { + new_len += sizeof(u32); + if (unlikely(new_len > nbytes)) + new_len -= bs; + } + } + + if (!new_len) + return 0; + + if (nbytes != new_len) + aligned = false; + + while (nbytes > 0 && sg_tmp) { + n++; + + if (bufcnt) { + if (!IS_ALIGNED(bufcnt, bs)) { + aligned = false; + break; + } + nbytes -= bufcnt; + bufcnt = 0; + if (!nbytes) + aligned = false; + + continue; + } + + if (offset < sg_tmp->length) { + if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) { + aligned = false; + break; + } + + if (!IS_ALIGNED(sg_tmp->length - offset, bs)) { + aligned = false; + break; + } + } + + if (offset) { + offset -= sg_tmp->length; + if (offset < 0) { + nbytes += offset; + offset = 0; + } + } else { + nbytes -= sg_tmp->length; + } + + sg_tmp = sg_next(sg_tmp); + + if (nbytes < 0) { + aligned = false; + break; + } + } + + if (!aligned) + return stm32_hash_copy_sgs(rctx, sg, bs, new_len); + + rctx->total = new_len; + rctx->offset += new_len; + rctx->nents = n; + if (state->bufcnt) { + sg_init_table(rctx->sgl, 2); + sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, state->bufcnt); + sg_chain(rctx->sgl, 2, sg); + rctx->sg = rctx->sgl; + } else { + rctx->sg = sg; + } + + return 0; +} + +static int stm32_hash_prepare_request(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + struct stm32_hash_state *state = &rctx->state; + unsigned int nbytes; + int ret, hash_later, bs; + bool update = rctx->op & HASH_OP_UPDATE; + bool init = !(state->flags & HASH_FLAGS_INIT); + bool finup = state->flags & HASH_FLAGS_FINUP; + bool final = state->flags & HASH_FLAGS_FINAL; + + if (!hdev->dma_lch || state->flags & HASH_FLAGS_CPU) + return 0; + + bs = crypto_ahash_blocksize(tfm); + + nbytes = state->bufcnt; + + /* + * In case of update request nbytes must correspond to the content of the + * buffer + the offset minus the content of the request already in the + * buffer. + */ + if (update || finup) + nbytes += req->nbytes - rctx->offset; + + dev_dbg(hdev->dev, + "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n", + __func__, nbytes, bs, rctx->total, rctx->offset, state->bufcnt); + + if (!nbytes) + return 0; + + rctx->total = nbytes; + + if (update && req->nbytes && (!IS_ALIGNED(state->bufcnt, bs))) { + int len = bs - state->bufcnt % bs; + + if (len > req->nbytes) + len = req->nbytes; + scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src, + 0, len, 0); + state->bufcnt += len; + rctx->offset = len; + } + + /* copy buffer in a temporary one that is used for sg alignment */ + if (state->bufcnt) + memcpy(hdev->xmit_buf, state->buffer, state->bufcnt); + + ret = stm32_hash_align_sgs(req->src, nbytes, bs, init, final, rctx); + if (ret) + return ret; + + hash_later = nbytes - rctx->total; + if (hash_later < 0) + hash_later = 0; + + if (hash_later && hash_later <= state->blocklen) { + scatterwalk_map_and_copy(state->buffer, + req->src, + req->nbytes - hash_later, + hash_later, 0); + + state->bufcnt = hash_later; + } else { + state->bufcnt = 0; + } + + if (hash_later > state->blocklen) { + /* FIXME: add support of this case */ + pr_err("Buffer contains more than one block.\n"); + return -ENOMEM; + } + + rctx->total = min(nbytes, rctx->total); + + return 0; +} + +static void stm32_hash_unprepare_request(struct ahash_request *req) +{ + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_state *state = &rctx->state; + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + u32 *preg = state->hw_context; + int swap_reg, i; + + if (hdev->dma_lch) + dmaengine_terminate_sync(hdev->dma_lch); + + if (state->flags & HASH_FLAGS_SGS_COPIED) + free_pages((unsigned long)sg_virt(rctx->sg), get_order(rctx->sg->length)); + + rctx->sg = NULL; + rctx->offset = 0; + + state->flags &= ~(HASH_FLAGS_SGS_COPIED); + + if (!(hdev->flags & HASH_FLAGS_INIT)) + goto pm_runtime; + + state->flags |= HASH_FLAGS_INIT; + + if (stm32_hash_wait_busy(hdev)) { + dev_warn(hdev->dev, "Wait busy failed."); + return; + } + + swap_reg = hash_swap_reg(rctx); + + if (!hdev->pdata->ux500) + *preg++ = stm32_hash_read(hdev, HASH_IMR); + *preg++ = stm32_hash_read(hdev, HASH_STR); + *preg++ = stm32_hash_read(hdev, HASH_CR); + for (i = 0; i < swap_reg; i++) + *preg++ = stm32_hash_read(hdev, HASH_CSR(i)); + +pm_runtime: + pm_runtime_mark_last_busy(hdev->dev); + pm_runtime_put_autosuspend(hdev->dev); +} + static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); @@ -1070,16 +1393,26 @@ static int stm32_hash_update(struct ahash_request *req) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_state *state = &rctx->state; - if (!req->nbytes || !(state->flags & HASH_FLAGS_CPU)) + if (!req->nbytes) return 0; - rctx->total = req->nbytes; - rctx->sg = req->src; - rctx->offset = 0; - if ((state->bufcnt + rctx->total < state->blocklen)) { - stm32_hash_append_sg(rctx); - return 0; + if (state->flags & HASH_FLAGS_CPU) { + rctx->total = req->nbytes; + rctx->sg = req->src; + rctx->offset = 0; + + if ((state->bufcnt + rctx->total < state->blocklen)) { + stm32_hash_append_sg(rctx); + return 0; + } + } else { /* DMA mode */ + if (state->bufcnt + req->nbytes <= state->blocklen) { + scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src, + 0, req->nbytes, 0); + state->bufcnt += req->nbytes; + return 0; + } } return stm32_hash_enqueue(req, HASH_OP_UPDATE); @@ -1098,20 +1431,18 @@ static int stm32_hash_final(struct ahash_request *req) static int stm32_hash_finup(struct ahash_request *req) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); - struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); struct stm32_hash_state *state = &rctx->state; if (!req->nbytes) goto out; state->flags |= HASH_FLAGS_FINUP; - rctx->total = req->nbytes; - rctx->sg = req->src; - rctx->offset = 0; - if (hdev->dma_lch && stm32_hash_dma_aligned_data(req)) - state->flags &= ~HASH_FLAGS_CPU; + if ((state->flags & HASH_FLAGS_CPU)) { + rctx->total = req->nbytes; + rctx->sg = req->src; + rctx->offset = 0; + } out: return stm32_hash_final(req); @@ -1215,7 +1546,6 @@ static int stm32_hash_cra_sha3_hmac_init(struct crypto_tfm *tfm) HASH_FLAGS_HMAC); } - static void stm32_hash_cra_exit(struct crypto_tfm *tfm) { struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm); @@ -1228,14 +1558,9 @@ static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id) { struct stm32_hash_dev *hdev = dev_id; - if (HASH_FLAGS_CPU & hdev->flags) { - if (HASH_FLAGS_OUTPUT_READY & hdev->flags) { - hdev->flags &= ~HASH_FLAGS_OUTPUT_READY; - goto finish; - } - } else if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) { - hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE; - goto finish; + if (HASH_FLAGS_OUTPUT_READY & hdev->flags) { + hdev->flags &= ~HASH_FLAGS_OUTPUT_READY; + goto finish; } return IRQ_HANDLED; @@ -1984,6 +2309,7 @@ static const struct stm32_hash_pdata stm32_hash_pdata_stm32mp13 = { .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32mp13), .has_sr = true, .has_mdmat = true, + .context_secured = true, }; static const struct of_device_id stm32_hash_of_match[] = { diff --git a/drivers/crypto/tegra/Makefile b/drivers/crypto/tegra/Makefile new file mode 100644 index 0000000000..a32001e58e --- /dev/null +++ b/drivers/crypto/tegra/Makefile @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +tegra-se-objs := tegra-se-key.o tegra-se-main.o + +tegra-se-y += tegra-se-aes.o +tegra-se-y += tegra-se-hash.o + +obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra-se.o diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c new file mode 100644 index 0000000000..ae7a0f8435 --- /dev/null +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -0,0 +1,1933 @@ +// SPDX-License-Identifier: GPL-2.0-only +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* + * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tegra-se.h" + +struct tegra_aes_ctx { + struct tegra_se *se; + u32 alg; + u32 ivsize; + u32 key1_id; + u32 key2_id; +}; + +struct tegra_aes_reqctx { + struct tegra_se_datbuf datbuf; + bool encrypt; + u32 config; + u32 crypto_config; + u32 len; + u32 *iv; +}; + +struct tegra_aead_ctx { + struct tegra_se *se; + unsigned int authsize; + u32 alg; + u32 keylen; + u32 key_id; +}; + +struct tegra_aead_reqctx { + struct tegra_se_datbuf inbuf; + struct tegra_se_datbuf outbuf; + struct scatterlist *src_sg; + struct scatterlist *dst_sg; + unsigned int assoclen; + unsigned int cryptlen; + unsigned int authsize; + bool encrypt; + u32 config; + u32 crypto_config; + u32 key_id; + u32 iv[4]; + u8 authdata[16]; +}; + +struct tegra_cmac_ctx { + struct tegra_se *se; + unsigned int alg; + u32 key_id; + struct crypto_shash *fallback_tfm; +}; + +struct tegra_cmac_reqctx { + struct scatterlist *src_sg; + struct tegra_se_datbuf datbuf; + struct tegra_se_datbuf residue; + unsigned int total_len; + unsigned int blk_size; + unsigned int task; + u32 crypto_config; + u32 config; + u32 key_id; + u32 *iv; + u32 result[CMAC_RESULT_REG_COUNT]; +}; + +/* increment counter (128-bit int) */ +static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums) +{ + do { + --bits; + nums += counter[bits]; + counter[bits] = nums & 0xff; + nums >>= 8; + } while (bits && nums); +} + +static void tegra_cbc_iv_copyback(struct skcipher_request *req, struct tegra_aes_ctx *ctx) +{ + struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req); + unsigned int offset; + + offset = req->cryptlen - ctx->ivsize; + + if (rctx->encrypt) + memcpy(req->iv, rctx->datbuf.buf + offset, ctx->ivsize); + else + scatterwalk_map_and_copy(req->iv, req->src, offset, ctx->ivsize, 0); +} + +static void tegra_aes_update_iv(struct skcipher_request *req, struct tegra_aes_ctx *ctx) +{ + int num; + + if (ctx->alg == SE_ALG_CBC) { + tegra_cbc_iv_copyback(req, ctx); + } else if (ctx->alg == SE_ALG_CTR) { + num = req->cryptlen / ctx->ivsize; + if (req->cryptlen % ctx->ivsize) + num++; + + ctr_iv_inc(req->iv, ctx->ivsize, num); + } +} + +static int tegra234_aes_crypto_cfg(u32 alg, bool encrypt) +{ + switch (alg) { + case SE_ALG_CMAC: + case SE_ALG_GMAC: + case SE_ALG_GCM: + case SE_ALG_GCM_FINAL: + return 0; + case SE_ALG_CBC: + if (encrypt) + return SE_CRYPTO_CFG_CBC_ENCRYPT; + else + return SE_CRYPTO_CFG_CBC_DECRYPT; + case SE_ALG_ECB: + if (encrypt) + return SE_CRYPTO_CFG_ECB_ENCRYPT; + else + return SE_CRYPTO_CFG_ECB_DECRYPT; + case SE_ALG_XTS: + if (encrypt) + return SE_CRYPTO_CFG_XTS_ENCRYPT; + else + return SE_CRYPTO_CFG_XTS_DECRYPT; + + case SE_ALG_CTR: + return SE_CRYPTO_CFG_CTR; + case SE_ALG_CBC_MAC: + return SE_CRYPTO_CFG_CBC_MAC; + + default: + break; + } + + return -EINVAL; +} + +static int tegra234_aes_cfg(u32 alg, bool encrypt) +{ + switch (alg) { + case SE_ALG_CBC: + case SE_ALG_ECB: + case SE_ALG_XTS: + case SE_ALG_CTR: + if (encrypt) + return SE_CFG_AES_ENCRYPT; + else + return SE_CFG_AES_DECRYPT; + + case SE_ALG_GMAC: + if (encrypt) + return SE_CFG_GMAC_ENCRYPT; + else + return SE_CFG_GMAC_DECRYPT; + + case SE_ALG_GCM: + if (encrypt) + return SE_CFG_GCM_ENCRYPT; + else + return SE_CFG_GCM_DECRYPT; + + case SE_ALG_GCM_FINAL: + if (encrypt) + return SE_CFG_GCM_FINAL_ENCRYPT; + else + return SE_CFG_GCM_FINAL_DECRYPT; + + case SE_ALG_CMAC: + return SE_CFG_CMAC; + + case SE_ALG_CBC_MAC: + return SE_AES_ENC_ALG_AES_ENC | + SE_AES_DST_HASH_REG; + } + return -EINVAL; +} + +static unsigned int tegra_aes_prep_cmd(struct tegra_aes_ctx *ctx, + struct tegra_aes_reqctx *rctx) +{ + unsigned int data_count, res_bits, i = 0, j; + struct tegra_se *se = ctx->se; + u32 *cpuvaddr = se->cmdbuf->addr; + dma_addr_t addr = rctx->datbuf.addr; + + data_count = rctx->len / AES_BLOCK_SIZE; + res_bits = (rctx->len % AES_BLOCK_SIZE) * 8; + + /* + * Hardware processes data_count + 1 blocks. + * Reduce 1 block if there is no residue + */ + if (!res_bits) + data_count--; + + if (rctx->iv) { + cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); + for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) + cpuvaddr[i++] = rctx->iv[j]; + } + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | + SE_LAST_BLOCK_RES_BITS(res_bits); + + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = rctx->config; + cpuvaddr[i++] = rctx->crypto_config; + + /* Source address setting */ + cpuvaddr[i++] = lower_32_bits(addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(rctx->len); + + /* Destination address setting */ + cpuvaddr[i++] = lower_32_bits(addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | + SE_ADDR_HI_SZ(rctx->len); + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF | + SE_AES_OP_START; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config); + + return i; +} + +static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct skcipher_request *req = container_of(areq, struct skcipher_request, base); + struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req); + struct tegra_se *se = ctx->se; + unsigned int cmdlen; + int ret; + + rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + + rctx->datbuf.size = SE_AES_BUFLEN; + rctx->iv = (u32 *)req->iv; + rctx->len = req->cryptlen; + + /* Pad input to AES Block size */ + if (ctx->alg != SE_ALG_XTS) { + if (rctx->len % AES_BLOCK_SIZE) + rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE); + } + + scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0); + + /* Prepare the command and submit for execution */ + cmdlen = tegra_aes_prep_cmd(ctx, rctx); + ret = tegra_se_host1x_submit(se, cmdlen); + + /* Copy the result */ + tegra_aes_update_iv(req, ctx); + scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1); + + /* Free the buffer */ + dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->datbuf.buf, rctx->datbuf.addr); + + crypto_finalize_skcipher_request(se->engine, req, ret); + + return 0; +} + +static int tegra_aes_cra_init(struct crypto_skcipher *tfm) +{ + struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct tegra_se_alg *se_alg; + const char *algname; + int ret; + + se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher.base); + + crypto_skcipher_set_reqsize(tfm, sizeof(struct tegra_aes_reqctx)); + + ctx->ivsize = crypto_skcipher_ivsize(tfm); + ctx->se = se_alg->se_dev; + ctx->key1_id = 0; + ctx->key2_id = 0; + + algname = crypto_tfm_alg_name(&tfm->base); + ret = se_algname_to_algid(algname); + if (ret < 0) { + dev_err(ctx->se->dev, "invalid algorithm\n"); + return ret; + } + + ctx->alg = ret; + + return 0; +} + +static void tegra_aes_cra_exit(struct crypto_skcipher *tfm) +{ + struct tegra_aes_ctx *ctx = crypto_tfm_ctx(&tfm->base); + + if (ctx->key1_id) + tegra_key_invalidate(ctx->se, ctx->key1_id, ctx->alg); + + if (ctx->key2_id) + tegra_key_invalidate(ctx->se, ctx->key2_id, ctx->alg); +} + +static int tegra_aes_setkey(struct crypto_skcipher *tfm, + const u8 *key, u32 keylen) +{ + struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (aes_check_keylen(keylen)) { + dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); + return -EINVAL; + } + + return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id); +} + +static int tegra_xts_setkey(struct crypto_skcipher *tfm, + const u8 *key, u32 keylen) +{ + struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + u32 len = keylen / 2; + int ret; + + ret = xts_verify_key(tfm, key, keylen); + if (ret || aes_check_keylen(len)) { + dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); + return -EINVAL; + } + + ret = tegra_key_submit(ctx->se, key, len, + ctx->alg, &ctx->key1_id); + if (ret) + return ret; + + return tegra_key_submit(ctx->se, key + len, len, + ctx->alg, &ctx->key2_id); + + return 0; +} + +static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen) +{ + int manifest; + + manifest = SE_KAC_USER_NS; + + switch (alg) { + case SE_ALG_CBC: + case SE_ALG_ECB: + case SE_ALG_CTR: + manifest |= SE_KAC_ENC; + break; + case SE_ALG_XTS: + manifest |= SE_KAC_XTS; + break; + case SE_ALG_GCM: + manifest |= SE_KAC_GCM; + break; + case SE_ALG_CMAC: + manifest |= SE_KAC_CMAC; + break; + case SE_ALG_CBC_MAC: + manifest |= SE_KAC_ENC; + break; + default: + return -EINVAL; + } + + switch (keylen) { + case AES_KEYSIZE_128: + manifest |= SE_KAC_SIZE_128; + break; + case AES_KEYSIZE_192: + manifest |= SE_KAC_SIZE_192; + break; + case AES_KEYSIZE_256: + manifest |= SE_KAC_SIZE_256; + break; + default: + return -EINVAL; + } + + return manifest; +} + +static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt) + +{ + struct crypto_skcipher *tfm; + struct tegra_aes_ctx *ctx; + struct tegra_aes_reqctx *rctx; + + tfm = crypto_skcipher_reqtfm(req); + ctx = crypto_skcipher_ctx(tfm); + rctx = skcipher_request_ctx(req); + + if (ctx->alg != SE_ALG_XTS) { + if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(tfm))) { + dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen); + return -EINVAL; + } + } else if (req->cryptlen < XTS_BLOCK_SIZE) { + dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen); + return -EINVAL; + } + + if (!req->cryptlen) + return 0; + + rctx->encrypt = encrypt; + rctx->config = tegra234_aes_cfg(ctx->alg, encrypt); + rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt); + rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id); + + if (ctx->key2_id) + rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id); + + return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req); +} + +static int tegra_aes_encrypt(struct skcipher_request *req) +{ + return tegra_aes_crypt(req, true); +} + +static int tegra_aes_decrypt(struct skcipher_request *req) +{ + return tegra_aes_crypt(req, false); +} + +static struct tegra_se_alg tegra_aes_algs[] = { + { + .alg.skcipher.op.do_one_request = tegra_aes_do_one_req, + .alg.skcipher.base = { + .init = tegra_aes_cra_init, + .exit = tegra_aes_cra_exit, + .setkey = tegra_aes_setkey, + .encrypt = tegra_aes_encrypt, + .decrypt = tegra_aes_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-tegra", + .cra_priority = 500, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_aes_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, + } + }, { + .alg.skcipher.op.do_one_request = tegra_aes_do_one_req, + .alg.skcipher.base = { + .init = tegra_aes_cra_init, + .exit = tegra_aes_cra_exit, + .setkey = tegra_aes_setkey, + .encrypt = tegra_aes_encrypt, + .decrypt = tegra_aes_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-tegra", + .cra_priority = 500, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_aes_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, + } + }, { + .alg.skcipher.op.do_one_request = tegra_aes_do_one_req, + .alg.skcipher.base = { + .init = tegra_aes_cra_init, + .exit = tegra_aes_cra_exit, + .setkey = tegra_aes_setkey, + .encrypt = tegra_aes_encrypt, + .decrypt = tegra_aes_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-tegra", + .cra_priority = 500, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct tegra_aes_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, + } + }, { + .alg.skcipher.op.do_one_request = tegra_aes_do_one_req, + .alg.skcipher.base = { + .init = tegra_aes_cra_init, + .exit = tegra_aes_cra_exit, + .setkey = tegra_xts_setkey, + .encrypt = tegra_aes_encrypt, + .decrypt = tegra_aes_decrypt, + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "xts-aes-tegra", + .cra_priority = 500, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_aes_ctx), + .cra_alignmask = (__alignof__(u64) - 1), + .cra_module = THIS_MODULE, + }, + } + }, +}; + +static unsigned int tegra_gmac_prep_cmd(struct tegra_aead_ctx *ctx, + struct tegra_aead_reqctx *rctx) +{ + unsigned int data_count, res_bits, i = 0; + struct tegra_se *se = ctx->se; + u32 *cpuvaddr = se->cmdbuf->addr; + + data_count = (rctx->assoclen / AES_BLOCK_SIZE); + res_bits = (rctx->assoclen % AES_BLOCK_SIZE) * 8; + + /* + * Hardware processes data_count + 1 blocks. + * Reduce 1 block if there is no residue + */ + if (!res_bits) + data_count--; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | + SE_LAST_BLOCK_RES_BITS(res_bits); + + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4); + cpuvaddr[i++] = rctx->config; + cpuvaddr[i++] = rctx->crypto_config; + cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) | + SE_ADDR_HI_SZ(rctx->assoclen); + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL | + SE_AES_OP_INIT | SE_AES_OP_LASTBUF | + SE_AES_OP_START; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + return i; +} + +static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx *ctx, + struct tegra_aead_reqctx *rctx) +{ + unsigned int data_count, res_bits, i = 0, j; + struct tegra_se *se = ctx->se; + u32 *cpuvaddr = se->cmdbuf->addr, op; + + data_count = (rctx->cryptlen / AES_BLOCK_SIZE); + res_bits = (rctx->cryptlen % AES_BLOCK_SIZE) * 8; + op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL | + SE_AES_OP_LASTBUF | SE_AES_OP_START; + + /* + * If there is no assoc data, + * this will be the init command + */ + if (!rctx->assoclen) + op |= SE_AES_OP_INIT; + + /* + * Hardware processes data_count + 1 blocks. + * Reduce 1 block if there is no residue + */ + if (!res_bits) + data_count--; + + cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); + for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) + cpuvaddr[i++] = rctx->iv[j]; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | + SE_LAST_BLOCK_RES_BITS(res_bits); + + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = rctx->config; + cpuvaddr[i++] = rctx->crypto_config; + + /* Source Address */ + cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) | + SE_ADDR_HI_SZ(rctx->cryptlen); + + /* Destination Address */ + cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | + SE_ADDR_HI_SZ(rctx->cryptlen); + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = op; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config); + return i; +} + +static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr, + struct tegra_aead_reqctx *rctx) +{ + unsigned int i = 0, j; + u32 op; + + op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL | + SE_AES_OP_LASTBUF | SE_AES_OP_START; + + /* + * Set init for zero sized vector + */ + if (!rctx->assoclen && !rctx->cryptlen) + op |= SE_AES_OP_INIT; + + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->aad_len, 2); + cpuvaddr[i++] = rctx->assoclen * 8; + cpuvaddr[i++] = 0; + + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2); + cpuvaddr[i++] = rctx->cryptlen * 8; + cpuvaddr[i++] = 0; + + cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); + for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) + cpuvaddr[i++] = rctx->iv[j]; + + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = rctx->config; + cpuvaddr[i++] = rctx->crypto_config; + cpuvaddr[i++] = 0; + cpuvaddr[i++] = 0; + + /* Destination Address */ + cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | + SE_ADDR_HI_SZ(0x10); /* HW always generates 128-bit tag */ + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = op; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config); + + return i; +} + +static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) +{ + struct tegra_se *se = ctx->se; + unsigned int cmdlen; + + scatterwalk_map_and_copy(rctx->inbuf.buf, + rctx->src_sg, 0, rctx->assoclen, 0); + + rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt); + rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) | + SE_AES_KEY_INDEX(ctx->key_id); + + cmdlen = tegra_gmac_prep_cmd(ctx, rctx); + + return tegra_se_host1x_submit(se, cmdlen); +} + +static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) +{ + struct tegra_se *se = ctx->se; + int cmdlen, ret; + + scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg, + rctx->assoclen, rctx->cryptlen, 0); + + rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt); + rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) | + SE_AES_KEY_INDEX(ctx->key_id); + + /* Prepare command and submit */ + cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx); + ret = tegra_se_host1x_submit(se, cmdlen); + if (ret) + return ret; + + /* Copy the result */ + scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg, + rctx->assoclen, rctx->cryptlen, 1); + + return 0; +} + +static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) +{ + struct tegra_se *se = ctx->se; + u32 *cpuvaddr = se->cmdbuf->addr; + int cmdlen, ret, offset; + + rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt); + rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) | + SE_AES_KEY_INDEX(ctx->key_id); + + /* Prepare command and submit */ + cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx); + ret = tegra_se_host1x_submit(se, cmdlen); + if (ret) + return ret; + + if (rctx->encrypt) { + /* Copy the result */ + offset = rctx->assoclen + rctx->cryptlen; + scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg, + offset, rctx->authsize, 1); + } + + return 0; +} + +static int tegra_gcm_do_verify(struct tegra_se *se, struct tegra_aead_reqctx *rctx) +{ + unsigned int offset; + u8 mac[16]; + + offset = rctx->assoclen + rctx->cryptlen; + scatterwalk_map_and_copy(mac, rctx->src_sg, offset, rctx->authsize, 0); + + if (crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize)) + return -EBADMSG; + + return 0; +} + +static inline int tegra_ccm_check_iv(const u8 *iv) +{ + /* iv[0] gives value of q-1 + * 2 <= q <= 8 as per NIST 800-38C notation + * 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation + */ + if (iv[0] < 1 || iv[0] > 7) { + pr_debug("ccm_check_iv failed %d\n", iv[0]); + return -EINVAL; + } + + return 0; +} + +static unsigned int tegra_cbcmac_prep_cmd(struct tegra_aead_ctx *ctx, + struct tegra_aead_reqctx *rctx) +{ + unsigned int data_count, i = 0; + struct tegra_se *se = ctx->se; + u32 *cpuvaddr = se->cmdbuf->addr; + + data_count = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count); + + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = rctx->config; + cpuvaddr[i++] = rctx->crypto_config; + + cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) | + SE_ADDR_HI_SZ(rctx->inbuf.size); + + cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | + SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */ + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = SE_AES_OP_WRSTALL | + SE_AES_OP_LASTBUF | SE_AES_OP_START; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + return i; +} + +static unsigned int tegra_ctr_prep_cmd(struct tegra_aead_ctx *ctx, + struct tegra_aead_reqctx *rctx) +{ + unsigned int i = 0, j; + struct tegra_se *se = ctx->se; + u32 *cpuvaddr = se->cmdbuf->addr; + + cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); + for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) + cpuvaddr[i++] = rctx->iv[j]; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1; + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = rctx->config; + cpuvaddr[i++] = rctx->crypto_config; + + /* Source address setting */ + cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) | + SE_ADDR_HI_SZ(rctx->inbuf.size); + + /* Destination address setting */ + cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | + SE_ADDR_HI_SZ(rctx->inbuf.size); + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF | + SE_AES_OP_START; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", + rctx->config, rctx->crypto_config); + + return i; +} + +static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) +{ + struct tegra_se *se = ctx->se; + int cmdlen; + + rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt); + rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC, + rctx->encrypt) | + SE_AES_KEY_INDEX(ctx->key_id); + + /* Prepare command and submit */ + cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx); + + return tegra_se_host1x_submit(se, cmdlen); +} + +static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) +{ + __be32 data; + + memset(block, 0, csize); + block += csize; + + if (csize >= 4) + csize = 4; + else if (msglen > (1 << (8 * csize))) + return -EOVERFLOW; + + data = cpu_to_be32(msglen); + memcpy(block - csize, (u8 *)&data + 4 - csize, csize); + + return 0; +} + +static int tegra_ccm_format_nonce(struct tegra_aead_reqctx *rctx, u8 *nonce) +{ + unsigned int q, t; + u8 *q_ptr, *iv = (u8 *)rctx->iv; + + memcpy(nonce, rctx->iv, 16); + + /*** 1. Prepare Flags Octet ***/ + + /* Encode t (mac length) */ + t = rctx->authsize; + nonce[0] |= (((t - 2) / 2) << 3); + + /* Adata */ + if (rctx->assoclen) + nonce[0] |= (1 << 6); + + /*** Encode Q - message length ***/ + q = iv[0] + 1; + q_ptr = nonce + 16 - q; + + return tegra_ccm_set_msg_len(q_ptr, rctx->cryptlen, q); +} + +static int tegra_ccm_format_adata(u8 *adata, unsigned int a) +{ + int len = 0; + + /* add control info for associated data + * RFC 3610 and NIST Special Publication 800-38C + */ + if (a < 65280) { + *(__be16 *)adata = cpu_to_be16(a); + len = 2; + } else { + *(__be16 *)adata = cpu_to_be16(0xfffe); + *(__be32 *)&adata[2] = cpu_to_be32(a); + len = 6; + } + + return len; +} + +static int tegra_ccm_add_padding(u8 *buf, unsigned int len) +{ + unsigned int padlen = 16 - (len % 16); + u8 padding[16] = {0}; + + if (padlen == 16) + return 0; + + memcpy(buf, padding, padlen); + + return padlen; +} + +static int tegra_ccm_format_blocks(struct tegra_aead_reqctx *rctx) +{ + unsigned int alen = 0, offset = 0; + u8 nonce[16], adata[16]; + int ret; + + ret = tegra_ccm_format_nonce(rctx, nonce); + if (ret) + return ret; + + memcpy(rctx->inbuf.buf, nonce, 16); + offset = 16; + + if (rctx->assoclen) { + alen = tegra_ccm_format_adata(adata, rctx->assoclen); + memcpy(rctx->inbuf.buf + offset, adata, alen); + offset += alen; + + scatterwalk_map_and_copy(rctx->inbuf.buf + offset, + rctx->src_sg, 0, rctx->assoclen, 0); + + offset += rctx->assoclen; + offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, + rctx->assoclen + alen); + } + + return offset; +} + +static int tegra_ccm_mac_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx) +{ + u32 result[16]; + int i, ret; + + /* Read and clear Result */ + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + result[i] = readl(se->base + se->hw->regs->result + (i * 4)); + + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + writel(0, se->base + se->hw->regs->result + (i * 4)); + + if (rctx->encrypt) { + memcpy(rctx->authdata, result, rctx->authsize); + } else { + ret = crypto_memneq(rctx->authdata, result, rctx->authsize); + if (ret) + return -EBADMSG; + } + + return 0; +} + +static int tegra_ccm_ctr_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx) +{ + /* Copy result */ + scatterwalk_map_and_copy(rctx->outbuf.buf + 16, rctx->dst_sg, + rctx->assoclen, rctx->cryptlen, 1); + + if (rctx->encrypt) + scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg, + rctx->assoclen + rctx->cryptlen, + rctx->authsize, 1); + else + memcpy(rctx->authdata, rctx->outbuf.buf, rctx->authsize); + + return 0; +} + +static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) +{ + struct tegra_se *se = ctx->se; + struct scatterlist *sg; + int offset, ret; + + offset = tegra_ccm_format_blocks(rctx); + if (offset < 0) + return -EINVAL; + + /* Copy plain text to the buffer */ + sg = rctx->encrypt ? rctx->src_sg : rctx->dst_sg; + + scatterwalk_map_and_copy(rctx->inbuf.buf + offset, + sg, rctx->assoclen, + rctx->cryptlen, 0); + offset += rctx->cryptlen; + offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen); + + rctx->inbuf.size = offset; + + ret = tegra_ccm_do_cbcmac(ctx, rctx); + if (ret) + return ret; + + return tegra_ccm_mac_result(se, rctx); +} + +static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) +{ + struct tegra_se *se = ctx->se; + unsigned int cmdlen, offset = 0; + struct scatterlist *sg = rctx->src_sg; + int ret; + + rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt); + rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) | + SE_AES_KEY_INDEX(ctx->key_id); + + /* Copy authdata in the top of buffer for encryption/decryption */ + if (rctx->encrypt) + memcpy(rctx->inbuf.buf, rctx->authdata, rctx->authsize); + else + scatterwalk_map_and_copy(rctx->inbuf.buf, sg, + rctx->assoclen + rctx->cryptlen, + rctx->authsize, 0); + + offset += rctx->authsize; + offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->authsize); + + /* If there is no cryptlen, proceed to submit the task */ + if (rctx->cryptlen) { + scatterwalk_map_and_copy(rctx->inbuf.buf + offset, sg, + rctx->assoclen, rctx->cryptlen, 0); + offset += rctx->cryptlen; + offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen); + } + + rctx->inbuf.size = offset; + + /* Prepare command and submit */ + cmdlen = tegra_ctr_prep_cmd(ctx, rctx); + ret = tegra_se_host1x_submit(se, cmdlen); + if (ret) + return ret; + + return tegra_ccm_ctr_result(se, rctx); +} + +static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se, + struct tegra_aead_reqctx *rctx) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + u8 *iv = (u8 *)rctx->iv; + int ret, i; + + rctx->src_sg = req->src; + rctx->dst_sg = req->dst; + rctx->assoclen = req->assoclen; + rctx->authsize = crypto_aead_authsize(tfm); + + memcpy(iv, req->iv, 16); + + ret = tegra_ccm_check_iv(iv); + if (ret) + return ret; + + /* Note: rfc 3610 and NIST 800-38C require counter (ctr_0) of + * zero to encrypt auth tag. + * req->iv has the formatted ctr_0 (i.e. Flags || N || 0). + */ + memset(iv + 15 - iv[0], 0, iv[0] + 1); + + /* Clear any previous result */ + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + writel(0, se->base + se->hw->regs->result + (i * 4)); + + return 0; +} + +static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct aead_request *req = container_of(areq, struct aead_request, base); + struct tegra_aead_reqctx *rctx = aead_request_ctx(req); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct tegra_se *se = ctx->se; + int ret; + + /* Allocate buffers required */ + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + &rctx->inbuf.addr, GFP_KERNEL); + if (!rctx->inbuf.buf) + return -ENOMEM; + + rctx->inbuf.size = SE_AES_BUFLEN; + + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + &rctx->outbuf.addr, GFP_KERNEL); + if (!rctx->outbuf.buf) { + ret = -ENOMEM; + goto outbuf_err; + } + + rctx->outbuf.size = SE_AES_BUFLEN; + + ret = tegra_ccm_crypt_init(req, se, rctx); + if (ret) + goto out; + + if (rctx->encrypt) { + rctx->cryptlen = req->cryptlen; + + /* CBC MAC Operation */ + ret = tegra_ccm_compute_auth(ctx, rctx); + if (ret) + goto out; + + /* CTR operation */ + ret = tegra_ccm_do_ctr(ctx, rctx); + if (ret) + goto out; + } else { + rctx->cryptlen = req->cryptlen - ctx->authsize; + if (ret) + goto out; + + /* CTR operation */ + ret = tegra_ccm_do_ctr(ctx, rctx); + if (ret) + goto out; + + /* CBC MAC Operation */ + ret = tegra_ccm_compute_auth(ctx, rctx); + if (ret) + goto out; + } + +out: + dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->outbuf.buf, rctx->outbuf.addr); + +outbuf_err: + dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->inbuf.buf, rctx->inbuf.addr); + + crypto_finalize_aead_request(ctx->se->engine, req, ret); + + return 0; +} + +static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct aead_request *req = container_of(areq, struct aead_request, base); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct tegra_aead_reqctx *rctx = aead_request_ctx(req); + int ret; + + /* Allocate buffers required */ + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + &rctx->inbuf.addr, GFP_KERNEL); + if (!rctx->inbuf.buf) + return -ENOMEM; + + rctx->inbuf.size = SE_AES_BUFLEN; + + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + &rctx->outbuf.addr, GFP_KERNEL); + if (!rctx->outbuf.buf) { + ret = -ENOMEM; + goto outbuf_err; + } + + rctx->outbuf.size = SE_AES_BUFLEN; + + rctx->src_sg = req->src; + rctx->dst_sg = req->dst; + rctx->assoclen = req->assoclen; + rctx->authsize = crypto_aead_authsize(tfm); + + if (rctx->encrypt) + rctx->cryptlen = req->cryptlen; + else + rctx->cryptlen = req->cryptlen - ctx->authsize; + + memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); + rctx->iv[3] = (1 << 24); + + /* If there is associated data perform GMAC operation */ + if (rctx->assoclen) { + ret = tegra_gcm_do_gmac(ctx, rctx); + if (ret) + goto out; + } + + /* GCM Encryption/Decryption operation */ + if (rctx->cryptlen) { + ret = tegra_gcm_do_crypt(ctx, rctx); + if (ret) + goto out; + } + + /* GCM_FINAL operation */ + ret = tegra_gcm_do_final(ctx, rctx); + if (ret) + goto out; + + if (!rctx->encrypt) + ret = tegra_gcm_do_verify(ctx->se, rctx); + +out: + dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->outbuf.buf, rctx->outbuf.addr); + +outbuf_err: + dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->inbuf.buf, rctx->inbuf.addr); + + /* Finalize the request if there are no errors */ + crypto_finalize_aead_request(ctx->se->engine, req, ret); + + return 0; +} + +static int tegra_aead_cra_init(struct crypto_aead *tfm) +{ + struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct aead_alg *alg = crypto_aead_alg(tfm); + struct tegra_se_alg *se_alg; + const char *algname; + int ret; + + algname = crypto_tfm_alg_name(&tfm->base); + + se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base); + + crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx)); + + ctx->se = se_alg->se_dev; + ctx->key_id = 0; + + ret = se_algname_to_algid(algname); + if (ret < 0) { + dev_err(ctx->se->dev, "invalid algorithm\n"); + return ret; + } + + ctx->alg = ret; + + return 0; +} + +static int tegra_ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); + + switch (authsize) { + case 4: + case 6: + case 8: + case 10: + case 12: + case 14: + case 16: + break; + default: + return -EINVAL; + } + + ctx->authsize = authsize; + + return 0; +} + +static int tegra_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); + int ret; + + ret = crypto_gcm_check_authsize(authsize); + if (ret) + return ret; + + ctx->authsize = authsize; + + return 0; +} + +static void tegra_aead_cra_exit(struct crypto_aead *tfm) +{ + struct tegra_aead_ctx *ctx = crypto_tfm_ctx(&tfm->base); + + if (ctx->key_id) + tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); +} + +static int tegra_aead_crypt(struct aead_request *req, bool encrypt) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct tegra_aead_reqctx *rctx = aead_request_ctx(req); + + rctx->encrypt = encrypt; + + return crypto_transfer_aead_request_to_engine(ctx->se->engine, req); +} + +static int tegra_aead_encrypt(struct aead_request *req) +{ + return tegra_aead_crypt(req, true); +} + +static int tegra_aead_decrypt(struct aead_request *req) +{ + return tegra_aead_crypt(req, false); +} + +static int tegra_aead_setkey(struct crypto_aead *tfm, + const u8 *key, u32 keylen) +{ + struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); + + if (aes_check_keylen(keylen)) { + dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); + return -EINVAL; + } + + return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); +} + +static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx, + struct tegra_cmac_reqctx *rctx) +{ + unsigned int data_count, res_bits = 0, i = 0, j; + struct tegra_se *se = ctx->se; + u32 *cpuvaddr = se->cmdbuf->addr, op; + + data_count = (rctx->datbuf.size / AES_BLOCK_SIZE); + + op = SE_AES_OP_WRSTALL | SE_AES_OP_START | SE_AES_OP_LASTBUF; + + if (!(rctx->task & SHA_UPDATE)) { + op |= SE_AES_OP_FINAL; + res_bits = (rctx->datbuf.size % AES_BLOCK_SIZE) * 8; + } + + if (!res_bits && data_count) + data_count--; + + if (rctx->task & SHA_FIRST) { + rctx->task &= ~SHA_FIRST; + + cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); + /* Load 0 IV */ + for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) + cpuvaddr[i++] = 0; + } + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | + SE_LAST_BLOCK_RES_BITS(res_bits); + + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = rctx->config; + cpuvaddr[i++] = rctx->crypto_config; + + /* Source Address */ + cpuvaddr[i++] = lower_32_bits(rctx->datbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) | + SE_ADDR_HI_SZ(rctx->datbuf.size); + cpuvaddr[i++] = 0; + cpuvaddr[i++] = SE_ADDR_HI_SZ(AES_BLOCK_SIZE); + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = op; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + return i; +} + +static void tegra_cmac_copy_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx) +{ + int i; + + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4)); +} + +static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx) +{ + int i; + + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + writel(rctx->result[i], + se->base + se->hw->regs->result + (i * 4)); +} + +static int tegra_cmac_do_update(struct ahash_request *req) +{ + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + unsigned int nblks, nresidue, cmdlen; + int ret; + + if (!req->nbytes) + return 0; + + nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size; + nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size; + + /* + * Reserve the last block as residue during final() to process. + */ + if (!nresidue && nblks) { + nresidue += rctx->blk_size; + nblks--; + } + + rctx->src_sg = req->src; + rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue; + rctx->total_len += rctx->datbuf.size; + rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); + rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id); + + /* + * Keep one block and residue bytes in residue and + * return. The bytes will be processed in final() + */ + if (nblks < 1) { + scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size, + rctx->src_sg, 0, req->nbytes, 0); + + rctx->residue.size += req->nbytes; + return 0; + } + + /* Copy the previous residue first */ + if (rctx->residue.size) + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + + scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size, + rctx->src_sg, 0, req->nbytes - nresidue, 0); + + scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg, + req->nbytes - nresidue, nresidue, 0); + + /* Update residue value with the residue after current block */ + rctx->residue.size = nresidue; + + /* + * If this is not the first 'update' call, paste the previous copied + * intermediate results to the registers so that it gets picked up. + * This is to support the import/export functionality. + */ + if (!(rctx->task & SHA_FIRST)) + tegra_cmac_paste_result(ctx->se, rctx); + + cmdlen = tegra_cmac_prep_cmd(ctx, rctx); + + ret = tegra_se_host1x_submit(se, cmdlen); + /* + * If this is not the final update, copy the intermediate results + * from the registers so that it can be used in the next 'update' + * call. This is to support the import/export functionality. + */ + if (!(rctx->task & SHA_FINAL)) + tegra_cmac_copy_result(ctx->se, rctx); + + return ret; +} + +static int tegra_cmac_do_final(struct ahash_request *req) +{ + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + u32 *result = (u32 *)req->result; + int ret = 0, i, cmdlen; + + if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) { + return crypto_shash_tfm_digest(ctx->fallback_tfm, + rctx->datbuf.buf, 0, req->result); + } + + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + rctx->datbuf.size = rctx->residue.size; + rctx->total_len += rctx->residue.size; + rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); + + /* Prepare command and submit */ + cmdlen = tegra_cmac_prep_cmd(ctx, rctx); + ret = tegra_se_host1x_submit(se, cmdlen); + if (ret) + goto out; + + /* Read and clear Result register */ + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + result[i] = readl(se->base + se->hw->regs->result + (i * 4)); + + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + writel(0, se->base + se->hw->regs->result + (i * 4)); + +out: + dma_free_coherent(se->dev, SE_SHA_BUFLEN, + rctx->datbuf.buf, rctx->datbuf.addr); + dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2, + rctx->residue.buf, rctx->residue.addr); + return ret; +} + +static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct ahash_request *req = ahash_request_cast(areq); + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + int ret; + + if (rctx->task & SHA_UPDATE) { + ret = tegra_cmac_do_update(req); + rctx->task &= ~SHA_UPDATE; + } + + if (rctx->task & SHA_FINAL) { + ret = tegra_cmac_do_final(req); + rctx->task &= ~SHA_FINAL; + } + + crypto_finalize_hash_request(se->engine, req, ret); + + return 0; +} + +static void tegra_cmac_init_fallback(struct crypto_ahash *tfm, struct tegra_cmac_ctx *ctx, + const char *algname) +{ + unsigned int statesize; + + ctx->fallback_tfm = crypto_alloc_shash(algname, 0, CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(ctx->fallback_tfm)) { + dev_warn(ctx->se->dev, "failed to allocate fallback for %s\n", algname); + ctx->fallback_tfm = NULL; + return; + } + + statesize = crypto_shash_statesize(ctx->fallback_tfm); + + if (statesize > sizeof(struct tegra_cmac_reqctx)) + crypto_ahash_set_statesize(tfm, statesize); +} + +static int tegra_cmac_cra_init(struct crypto_tfm *tfm) +{ + struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + struct tegra_se_alg *se_alg; + const char *algname; + int ret; + + algname = crypto_tfm_alg_name(tfm); + se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base); + + crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_cmac_reqctx)); + + ctx->se = se_alg->se_dev; + ctx->key_id = 0; + + ret = se_algname_to_algid(algname); + if (ret < 0) { + dev_err(ctx->se->dev, "invalid algorithm\n"); + return ret; + } + + ctx->alg = ret; + + tegra_cmac_init_fallback(ahash_tfm, ctx, algname); + + return 0; +} + +static void tegra_cmac_cra_exit(struct crypto_tfm *tfm) +{ + struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->fallback_tfm) + crypto_free_shash(ctx->fallback_tfm); + + tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); +} + +static int tegra_cmac_init(struct ahash_request *req) +{ + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + int i; + + rctx->total_len = 0; + rctx->datbuf.size = 0; + rctx->residue.size = 0; + rctx->task = SHA_FIRST; + rctx->blk_size = crypto_ahash_blocksize(tfm); + + rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, + &rctx->residue.addr, GFP_KERNEL); + if (!rctx->residue.buf) + goto resbuf_fail; + + rctx->residue.size = 0; + + rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + goto datbuf_fail; + + rctx->datbuf.size = 0; + + /* Clear any previous result */ + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + writel(0, se->base + se->hw->regs->result + (i * 4)); + + return 0; + +datbuf_fail: + dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, + rctx->residue.addr); +resbuf_fail: + return -ENOMEM; +} + +static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + + if (aes_check_keylen(keylen)) { + dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); + return -EINVAL; + } + + if (ctx->fallback_tfm) + crypto_shash_setkey(ctx->fallback_tfm, key, keylen); + + return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); +} + +static int tegra_cmac_update(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + + rctx->task |= SHA_UPDATE; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); +} + +static int tegra_cmac_final(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + + rctx->task |= SHA_FINAL; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); +} + +static int tegra_cmac_finup(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + + rctx->task |= SHA_UPDATE | SHA_FINAL; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); +} + +static int tegra_cmac_digest(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + + tegra_cmac_init(req); + rctx->task |= SHA_UPDATE | SHA_FINAL; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); +} + +static int tegra_cmac_export(struct ahash_request *req, void *out) +{ + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + + memcpy(out, rctx, sizeof(*rctx)); + + return 0; +} + +static int tegra_cmac_import(struct ahash_request *req, const void *in) +{ + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + + memcpy(rctx, in, sizeof(*rctx)); + + return 0; +} + +static struct tegra_se_alg tegra_aead_algs[] = { + { + .alg.aead.op.do_one_request = tegra_gcm_do_one_req, + .alg.aead.base = { + .init = tegra_aead_cra_init, + .exit = tegra_aead_cra_exit, + .setkey = tegra_aead_setkey, + .setauthsize = tegra_gcm_setauthsize, + .encrypt = tegra_aead_encrypt, + .decrypt = tegra_aead_decrypt, + .maxauthsize = AES_BLOCK_SIZE, + .ivsize = GCM_AES_IV_SIZE, + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-tegra", + .cra_priority = 500, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct tegra_aead_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, + } + }, { + .alg.aead.op.do_one_request = tegra_ccm_do_one_req, + .alg.aead.base = { + .init = tegra_aead_cra_init, + .exit = tegra_aead_cra_exit, + .setkey = tegra_aead_setkey, + .setauthsize = tegra_ccm_setauthsize, + .encrypt = tegra_aead_encrypt, + .decrypt = tegra_aead_decrypt, + .maxauthsize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, + .chunksize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ccm(aes)", + .cra_driver_name = "ccm-aes-tegra", + .cra_priority = 500, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct tegra_aead_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, + } + } +}; + +static struct tegra_se_alg tegra_cmac_algs[] = { + { + .alg.ahash.op.do_one_request = tegra_cmac_do_one_req, + .alg.ahash.base = { + .init = tegra_cmac_init, + .setkey = tegra_cmac_setkey, + .update = tegra_cmac_update, + .final = tegra_cmac_final, + .finup = tegra_cmac_finup, + .digest = tegra_cmac_digest, + .export = tegra_cmac_export, + .import = tegra_cmac_import, + .halg.digestsize = AES_BLOCK_SIZE, + .halg.statesize = sizeof(struct tegra_cmac_reqctx), + .halg.base = { + .cra_name = "cmac(aes)", + .cra_driver_name = "tegra-se-cmac", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_cmac_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_cmac_cra_init, + .cra_exit = tegra_cmac_cra_exit, + } + } + } +}; + +int tegra_init_aes(struct tegra_se *se) +{ + struct aead_engine_alg *aead_alg; + struct ahash_engine_alg *ahash_alg; + struct skcipher_engine_alg *sk_alg; + int i, ret; + + se->manifest = tegra_aes_kac_manifest; + + for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) { + sk_alg = &tegra_aes_algs[i].alg.skcipher; + tegra_aes_algs[i].se_dev = se; + + ret = crypto_engine_register_skcipher(sk_alg); + if (ret) { + dev_err(se->dev, "failed to register %s\n", + sk_alg->base.base.cra_name); + goto err_aes; + } + } + + for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) { + aead_alg = &tegra_aead_algs[i].alg.aead; + tegra_aead_algs[i].se_dev = se; + + ret = crypto_engine_register_aead(aead_alg); + if (ret) { + dev_err(se->dev, "failed to register %s\n", + aead_alg->base.base.cra_name); + goto err_aead; + } + } + + for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) { + ahash_alg = &tegra_cmac_algs[i].alg.ahash; + tegra_cmac_algs[i].se_dev = se; + + ret = crypto_engine_register_ahash(ahash_alg); + if (ret) { + dev_err(se->dev, "failed to register %s\n", + ahash_alg->base.halg.base.cra_name); + goto err_cmac; + } + } + + return 0; + +err_cmac: + while (i--) + crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash); + + i = ARRAY_SIZE(tegra_aead_algs); +err_aead: + while (i--) + crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead); + + i = ARRAY_SIZE(tegra_aes_algs); +err_aes: + while (i--) + crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher); + + return ret; +} + +void tegra_deinit_aes(struct tegra_se *se) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) + crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher); + + for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) + crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead); + + for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) + crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash); +} diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c new file mode 100644 index 0000000000..4d4bd727f4 --- /dev/null +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -0,0 +1,1060 @@ +// SPDX-License-Identifier: GPL-2.0-only +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* + * Crypto driver to handle HASH algorithms using NVIDIA Security Engine. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tegra-se.h" + +struct tegra_sha_ctx { + struct tegra_se *se; + unsigned int alg; + bool fallback; + u32 key_id; + struct crypto_ahash *fallback_tfm; +}; + +struct tegra_sha_reqctx { + struct scatterlist *src_sg; + struct tegra_se_datbuf datbuf; + struct tegra_se_datbuf residue; + struct tegra_se_datbuf digest; + unsigned int alg; + unsigned int config; + unsigned int total_len; + unsigned int blk_size; + unsigned int task; + u32 key_id; + u32 result[HASH_RESULT_REG_COUNT]; + struct ahash_request fallback_req; +}; + +static int tegra_sha_get_config(u32 alg) +{ + int cfg = 0; + + switch (alg) { + case SE_ALG_SHA1: + cfg |= SE_SHA_ENC_ALG_SHA; + cfg |= SE_SHA_ENC_MODE_SHA1; + break; + + case SE_ALG_HMAC_SHA224: + cfg |= SE_SHA_ENC_ALG_HMAC; + fallthrough; + case SE_ALG_SHA224: + cfg |= SE_SHA_ENC_ALG_SHA; + cfg |= SE_SHA_ENC_MODE_SHA224; + break; + + case SE_ALG_HMAC_SHA256: + cfg |= SE_SHA_ENC_ALG_HMAC; + fallthrough; + case SE_ALG_SHA256: + cfg |= SE_SHA_ENC_ALG_SHA; + cfg |= SE_SHA_ENC_MODE_SHA256; + break; + + case SE_ALG_HMAC_SHA384: + cfg |= SE_SHA_ENC_ALG_HMAC; + fallthrough; + case SE_ALG_SHA384: + cfg |= SE_SHA_ENC_ALG_SHA; + cfg |= SE_SHA_ENC_MODE_SHA384; + break; + + case SE_ALG_HMAC_SHA512: + cfg |= SE_SHA_ENC_ALG_HMAC; + fallthrough; + case SE_ALG_SHA512: + cfg |= SE_SHA_ENC_ALG_SHA; + cfg |= SE_SHA_ENC_MODE_SHA512; + break; + + case SE_ALG_SHA3_224: + cfg |= SE_SHA_ENC_ALG_SHA; + cfg |= SE_SHA_ENC_MODE_SHA3_224; + break; + case SE_ALG_SHA3_256: + cfg |= SE_SHA_ENC_ALG_SHA; + cfg |= SE_SHA_ENC_MODE_SHA3_256; + break; + case SE_ALG_SHA3_384: + cfg |= SE_SHA_ENC_ALG_SHA; + cfg |= SE_SHA_ENC_MODE_SHA3_384; + break; + case SE_ALG_SHA3_512: + cfg |= SE_SHA_ENC_ALG_SHA; + cfg |= SE_SHA_ENC_MODE_SHA3_512; + break; + default: + return -EINVAL; + } + + return cfg; +} + +static int tegra_sha_fallback_init(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_init(&rctx->fallback_req); +} + +static int tegra_sha_fallback_update(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + + return crypto_ahash_update(&rctx->fallback_req); +} + +static int tegra_sha_fallback_final(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.result = req->result; + + return crypto_ahash_final(&rctx->fallback_req); +} + +static int tegra_sha_fallback_finup(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + rctx->fallback_req.result = req->result; + + return crypto_ahash_finup(&rctx->fallback_req); +} + +static int tegra_sha_fallback_digest(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = req->nbytes; + rctx->fallback_req.src = req->src; + rctx->fallback_req.result = req->result; + + return crypto_ahash_digest(&rctx->fallback_req); +} + +static int tegra_sha_fallback_import(struct ahash_request *req, const void *in) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_import(&rctx->fallback_req, in); +} + +static int tegra_sha_fallback_export(struct ahash_request *req, void *out) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + rctx->fallback_req.base.flags = req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_export(&rctx->fallback_req, out); +} + +static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, + struct tegra_sha_reqctx *rctx) +{ + u64 msg_len, msg_left; + int i = 0; + + msg_len = rctx->total_len * 8; + msg_left = rctx->datbuf.size * 8; + + /* + * If IN_ADDR_HI_0.SZ > SHA_MSG_LEFT_[0-3] to the HASH engine, + * HW treats it as the last buffer and process the data. + * Therefore, add an extra byte to msg_left if it is not the + * last buffer. + */ + if (rctx->task & SHA_UPDATE) { + msg_left += 8; + msg_len += 8; + } + + cpuvaddr[i++] = host1x_opcode_setpayload(8); + cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_MSG_LENGTH); + cpuvaddr[i++] = lower_32_bits(msg_len); + cpuvaddr[i++] = upper_32_bits(msg_len); + cpuvaddr[i++] = 0; + cpuvaddr[i++] = 0; + cpuvaddr[i++] = lower_32_bits(msg_left); + cpuvaddr[i++] = upper_32_bits(msg_left); + cpuvaddr[i++] = 0; + cpuvaddr[i++] = 0; + cpuvaddr[i++] = host1x_opcode_setpayload(6); + cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_CFG); + cpuvaddr[i++] = rctx->config; + + if (rctx->task & SHA_FIRST) { + cpuvaddr[i++] = SE_SHA_TASK_HASH_INIT; + rctx->task &= ~SHA_FIRST; + } else { + cpuvaddr[i++] = 0; + } + + cpuvaddr[i++] = rctx->datbuf.addr; + cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) | + SE_ADDR_HI_SZ(rctx->datbuf.size)); + cpuvaddr[i++] = rctx->digest.addr; + cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) | + SE_ADDR_HI_SZ(rctx->digest.size)); + if (rctx->key_id) { + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_CRYPTO_CFG); + cpuvaddr[i++] = SE_AES_KEY_INDEX(rctx->key_id); + } + + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_OPERATION); + cpuvaddr[i++] = SE_SHA_OP_WRSTALL | + SE_SHA_OP_START | + SE_SHA_OP_LASTBUF; + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + dev_dbg(se->dev, "msg len %llu msg left %llu cfg %#x", + msg_len, msg_left, rctx->config); + + return i; +} + +static void tegra_sha_copy_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx) +{ + int i; + + for (i = 0; i < HASH_RESULT_REG_COUNT; i++) + rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4)); +} + +static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx) +{ + int i; + + for (i = 0; i < HASH_RESULT_REG_COUNT; i++) + writel(rctx->result[i], + se->base + se->hw->regs->result + (i * 4)); +} + +static int tegra_sha_do_update(struct ahash_request *req) +{ + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + unsigned int nblks, nresidue, size, ret; + u32 *cpuvaddr = ctx->se->cmdbuf->addr; + + nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size; + nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size; + + /* + * If nbytes is a multiple of block size and there is no residue, + * then reserve the last block as residue during final() to process. + */ + if (!nresidue && nblks) { + nresidue = rctx->blk_size; + nblks--; + } + + rctx->src_sg = req->src; + rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue; + rctx->total_len += rctx->datbuf.size; + + /* + * If nbytes are less than a block size, copy it residue and + * return. The bytes will be processed in final() + */ + if (nblks < 1) { + scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size, + rctx->src_sg, 0, req->nbytes, 0); + + rctx->residue.size += req->nbytes; + return 0; + } + + /* Copy the previous residue first */ + if (rctx->residue.size) + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + + scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size, + rctx->src_sg, 0, req->nbytes - nresidue, 0); + + scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg, + req->nbytes - nresidue, nresidue, 0); + + /* Update residue value with the residue after current block */ + rctx->residue.size = nresidue; + + rctx->config = tegra_sha_get_config(rctx->alg) | + SE_SHA_DST_HASH_REG; + + /* + * If this is not the first 'update' call, paste the previous copied + * intermediate results to the registers so that it gets picked up. + * This is to support the import/export functionality. + */ + if (!(rctx->task & SHA_FIRST)) + tegra_sha_paste_hash_result(ctx->se, rctx); + + size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx); + + ret = tegra_se_host1x_submit(ctx->se, size); + + /* + * If this is not the final update, copy the intermediate results + * from the registers so that it can be used in the next 'update' + * call. This is to support the import/export functionality. + */ + if (!(rctx->task & SHA_FINAL)) + tegra_sha_copy_hash_result(ctx->se, rctx); + + return ret; +} + +static int tegra_sha_do_final(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + u32 *cpuvaddr = se->cmdbuf->addr; + int size, ret = 0; + + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + rctx->datbuf.size = rctx->residue.size; + rctx->total_len += rctx->residue.size; + + rctx->config = tegra_sha_get_config(rctx->alg) | + SE_SHA_DST_MEMORY; + + size = tegra_sha_prep_cmd(se, cpuvaddr, rctx); + + ret = tegra_se_host1x_submit(se, size); + if (ret) + goto out; + + /* Copy result */ + memcpy(req->result, rctx->digest.buf, rctx->digest.size); + +out: + dma_free_coherent(se->dev, SE_SHA_BUFLEN, + rctx->datbuf.buf, rctx->datbuf.addr); + dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm), + rctx->residue.buf, rctx->residue.addr); + dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, + rctx->digest.addr); + return ret; +} + +static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct ahash_request *req = ahash_request_cast(areq); + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + int ret = 0; + + if (rctx->task & SHA_UPDATE) { + ret = tegra_sha_do_update(req); + rctx->task &= ~SHA_UPDATE; + } + + if (rctx->task & SHA_FINAL) { + ret = tegra_sha_do_final(req); + rctx->task &= ~SHA_FINAL; + } + + crypto_finalize_hash_request(se->engine, req, ret); + + return 0; +} + +static void tegra_sha_init_fallback(struct crypto_ahash *tfm, struct tegra_sha_ctx *ctx, + const char *algname) +{ + unsigned int statesize; + + ctx->fallback_tfm = crypto_alloc_ahash(algname, 0, CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(ctx->fallback_tfm)) { + dev_warn(ctx->se->dev, + "failed to allocate fallback for %s\n", algname); + ctx->fallback_tfm = NULL; + return; + } + + statesize = crypto_ahash_statesize(ctx->fallback_tfm); + + if (statesize > sizeof(struct tegra_sha_reqctx)) + crypto_ahash_set_statesize(tfm, statesize); + + /* Update reqsize if fallback is added */ + crypto_ahash_set_reqsize(tfm, + sizeof(struct tegra_sha_reqctx) + + crypto_ahash_reqsize(ctx->fallback_tfm)); +} + +static int tegra_sha_cra_init(struct crypto_tfm *tfm) +{ + struct tegra_sha_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + struct tegra_se_alg *se_alg; + const char *algname; + int ret; + + algname = crypto_tfm_alg_name(tfm); + se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base); + + crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_sha_reqctx)); + + ctx->se = se_alg->se_dev; + ctx->fallback = false; + ctx->key_id = 0; + + ret = se_algname_to_algid(algname); + if (ret < 0) { + dev_err(ctx->se->dev, "invalid algorithm\n"); + return ret; + } + + if (se_alg->alg_base) + tegra_sha_init_fallback(ahash_tfm, ctx, algname); + + ctx->alg = ret; + + return 0; +} + +static void tegra_sha_cra_exit(struct crypto_tfm *tfm) +{ + struct tegra_sha_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->fallback_tfm) + crypto_free_ahash(ctx->fallback_tfm); + + tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); +} + +static int tegra_sha_init(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + + if (ctx->fallback) + return tegra_sha_fallback_init(req); + + rctx->total_len = 0; + rctx->datbuf.size = 0; + rctx->residue.size = 0; + rctx->key_id = ctx->key_id; + rctx->task = SHA_FIRST; + rctx->alg = ctx->alg; + rctx->blk_size = crypto_ahash_blocksize(tfm); + rctx->digest.size = crypto_ahash_digestsize(tfm); + + rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size, + &rctx->digest.addr, GFP_KERNEL); + if (!rctx->digest.buf) + goto digbuf_fail; + + rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size, + &rctx->residue.addr, GFP_KERNEL); + if (!rctx->residue.buf) + goto resbuf_fail; + + rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + goto datbuf_fail; + + return 0; + +datbuf_fail: + dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, + rctx->residue.addr); +resbuf_fail: + dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf, + rctx->datbuf.addr); +digbuf_fail: + return -ENOMEM; +} + +static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key, + unsigned int keylen) +{ + if (!ctx->fallback_tfm) { + dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); + return -EINVAL; + } + + ctx->fallback = true; + return crypto_ahash_setkey(ctx->fallback_tfm, key, keylen); +} + +static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + if (aes_check_keylen(keylen)) + return tegra_hmac_fallback_setkey(ctx, key, keylen); + + ctx->fallback = false; + + return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); +} + +static int tegra_sha_update(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + if (ctx->fallback) + return tegra_sha_fallback_update(req); + + rctx->task |= SHA_UPDATE; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); +} + +static int tegra_sha_final(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + if (ctx->fallback) + return tegra_sha_fallback_final(req); + + rctx->task |= SHA_FINAL; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); +} + +static int tegra_sha_finup(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + if (ctx->fallback) + return tegra_sha_fallback_finup(req); + + rctx->task |= SHA_UPDATE | SHA_FINAL; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); +} + +static int tegra_sha_digest(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + if (ctx->fallback) + return tegra_sha_fallback_digest(req); + + tegra_sha_init(req); + rctx->task |= SHA_UPDATE | SHA_FINAL; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); +} + +static int tegra_sha_export(struct ahash_request *req, void *out) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + if (ctx->fallback) + return tegra_sha_fallback_export(req, out); + + memcpy(out, rctx, sizeof(*rctx)); + + return 0; +} + +static int tegra_sha_import(struct ahash_request *req, const void *in) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + if (ctx->fallback) + return tegra_sha_fallback_import(req, in); + + memcpy(rctx, in, sizeof(*rctx)); + + return 0; +} + +static struct tegra_se_alg tegra_hash_algs[] = { + { + .alg.ahash.op.do_one_request = tegra_sha_do_one_req, + .alg.ahash.base = { + .init = tegra_sha_init, + .update = tegra_sha_update, + .final = tegra_sha_final, + .finup = tegra_sha_finup, + .digest = tegra_sha_digest, + .export = tegra_sha_export, + .import = tegra_sha_import, + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.statesize = sizeof(struct tegra_sha_reqctx), + .halg.base = { + .cra_name = "sha1", + .cra_driver_name = "tegra-se-sha1", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_sha_cra_init, + .cra_exit = tegra_sha_cra_exit, + } + } + }, { + .alg.ahash.op.do_one_request = tegra_sha_do_one_req, + .alg.ahash.base = { + .init = tegra_sha_init, + .update = tegra_sha_update, + .final = tegra_sha_final, + .finup = tegra_sha_finup, + .digest = tegra_sha_digest, + .export = tegra_sha_export, + .import = tegra_sha_import, + .halg.digestsize = SHA224_DIGEST_SIZE, + .halg.statesize = sizeof(struct tegra_sha_reqctx), + .halg.base = { + .cra_name = "sha224", + .cra_driver_name = "tegra-se-sha224", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_sha_cra_init, + .cra_exit = tegra_sha_cra_exit, + } + } + }, { + .alg.ahash.op.do_one_request = tegra_sha_do_one_req, + .alg.ahash.base = { + .init = tegra_sha_init, + .update = tegra_sha_update, + .final = tegra_sha_final, + .finup = tegra_sha_finup, + .digest = tegra_sha_digest, + .export = tegra_sha_export, + .import = tegra_sha_import, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct tegra_sha_reqctx), + .halg.base = { + .cra_name = "sha256", + .cra_driver_name = "tegra-se-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_sha_cra_init, + .cra_exit = tegra_sha_cra_exit, + } + } + }, { + .alg.ahash.op.do_one_request = tegra_sha_do_one_req, + .alg.ahash.base = { + .init = tegra_sha_init, + .update = tegra_sha_update, + .final = tegra_sha_final, + .finup = tegra_sha_finup, + .digest = tegra_sha_digest, + .export = tegra_sha_export, + .import = tegra_sha_import, + .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.statesize = sizeof(struct tegra_sha_reqctx), + .halg.base = { + .cra_name = "sha384", + .cra_driver_name = "tegra-se-sha384", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_sha_cra_init, + .cra_exit = tegra_sha_cra_exit, + } + } + }, { + .alg.ahash.op.do_one_request = tegra_sha_do_one_req, + .alg.ahash.base = { + .init = tegra_sha_init, + .update = tegra_sha_update, + .final = tegra_sha_final, + .finup = tegra_sha_finup, + .digest = tegra_sha_digest, + .export = tegra_sha_export, + .import = tegra_sha_import, + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.statesize = sizeof(struct tegra_sha_reqctx), + .halg.base = { + .cra_name = "sha512", + .cra_driver_name = "tegra-se-sha512", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_sha_cra_init, + .cra_exit = tegra_sha_cra_exit, + } + } + }, { + .alg.ahash.op.do_one_request = tegra_sha_do_one_req, + .alg.ahash.base = { + .init = tegra_sha_init, + .update = tegra_sha_update, + .final = tegra_sha_final, + .finup = tegra_sha_finup, + .digest = tegra_sha_digest, + .export = tegra_sha_export, + .import = tegra_sha_import, + .halg.digestsize = SHA3_224_DIGEST_SIZE, + .halg.statesize = sizeof(struct tegra_sha_reqctx), + .halg.base = { + .cra_name = "sha3-224", + .cra_driver_name = "tegra-se-sha3-224", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH, + .cra_blocksize = SHA3_224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_sha_cra_init, + .cra_exit = tegra_sha_cra_exit, + } + } + }, { + .alg.ahash.op.do_one_request = tegra_sha_do_one_req, + .alg.ahash.base = { + .init = tegra_sha_init, + .update = tegra_sha_update, + .final = tegra_sha_final, + .finup = tegra_sha_finup, + .digest = tegra_sha_digest, + .export = tegra_sha_export, + .import = tegra_sha_import, + .halg.digestsize = SHA3_256_DIGEST_SIZE, + .halg.statesize = sizeof(struct tegra_sha_reqctx), + .halg.base = { + .cra_name = "sha3-256", + .cra_driver_name = "tegra-se-sha3-256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH, + .cra_blocksize = SHA3_256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_sha_cra_init, + .cra_exit = tegra_sha_cra_exit, + } + } + }, { + .alg.ahash.op.do_one_request = tegra_sha_do_one_req, + .alg.ahash.base = { + .init = tegra_sha_init, + .update = tegra_sha_update, + .final = tegra_sha_final, + .finup = tegra_sha_finup, + .digest = tegra_sha_digest, + .export = tegra_sha_export, + .import = tegra_sha_import, + .halg.digestsize = SHA3_384_DIGEST_SIZE, + .halg.statesize = sizeof(struct tegra_sha_reqctx), + .halg.base = { + .cra_name = "sha3-384", + .cra_driver_name = "tegra-se-sha3-384", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH, + .cra_blocksize = SHA3_384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_sha_cra_init, + .cra_exit = tegra_sha_cra_exit, + } + } + }, { + .alg.ahash.op.do_one_request = tegra_sha_do_one_req, + .alg.ahash.base = { + .init = tegra_sha_init, + .update = tegra_sha_update, + .final = tegra_sha_final, + .finup = tegra_sha_finup, + .digest = tegra_sha_digest, + .export = tegra_sha_export, + .import = tegra_sha_import, + .halg.digestsize = SHA3_512_DIGEST_SIZE, + .halg.statesize = sizeof(struct tegra_sha_reqctx), + .halg.base = { + .cra_name = "sha3-512", + .cra_driver_name = "tegra-se-sha3-512", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH, + .cra_blocksize = SHA3_512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_sha_cra_init, + .cra_exit = tegra_sha_cra_exit, + } + } + }, { + .alg_base = "sha224", + .alg.ahash.op.do_one_request = tegra_sha_do_one_req, + .alg.ahash.base = { + .init = tegra_sha_init, + .update = tegra_sha_update, + .final = tegra_sha_final, + .finup = tegra_sha_finup, + .digest = tegra_sha_digest, + .export = tegra_sha_export, + .import = tegra_sha_import, + .setkey = tegra_hmac_setkey, + .halg.digestsize = SHA224_DIGEST_SIZE, + .halg.statesize = sizeof(struct tegra_sha_reqctx), + .halg.base = { + .cra_name = "hmac(sha224)", + .cra_driver_name = "tegra-se-hmac-sha224", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_sha_cra_init, + .cra_exit = tegra_sha_cra_exit, + } + } + }, { + .alg_base = "sha256", + .alg.ahash.op.do_one_request = tegra_sha_do_one_req, + .alg.ahash.base = { + .init = tegra_sha_init, + .update = tegra_sha_update, + .final = tegra_sha_final, + .finup = tegra_sha_finup, + .digest = tegra_sha_digest, + .export = tegra_sha_export, + .import = tegra_sha_import, + .setkey = tegra_hmac_setkey, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.statesize = sizeof(struct tegra_sha_reqctx), + .halg.base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "tegra-se-hmac-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_sha_cra_init, + .cra_exit = tegra_sha_cra_exit, + } + } + }, { + .alg_base = "sha384", + .alg.ahash.op.do_one_request = tegra_sha_do_one_req, + .alg.ahash.base = { + .init = tegra_sha_init, + .update = tegra_sha_update, + .final = tegra_sha_final, + .finup = tegra_sha_finup, + .digest = tegra_sha_digest, + .export = tegra_sha_export, + .import = tegra_sha_import, + .setkey = tegra_hmac_setkey, + .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.statesize = sizeof(struct tegra_sha_reqctx), + .halg.base = { + .cra_name = "hmac(sha384)", + .cra_driver_name = "tegra-se-hmac-sha384", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_sha_cra_init, + .cra_exit = tegra_sha_cra_exit, + } + } + }, { + .alg_base = "sha512", + .alg.ahash.op.do_one_request = tegra_sha_do_one_req, + .alg.ahash.base = { + .init = tegra_sha_init, + .update = tegra_sha_update, + .final = tegra_sha_final, + .finup = tegra_sha_finup, + .digest = tegra_sha_digest, + .export = tegra_sha_export, + .import = tegra_sha_import, + .setkey = tegra_hmac_setkey, + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.statesize = sizeof(struct tegra_sha_reqctx), + .halg.base = { + .cra_name = "hmac(sha512)", + .cra_driver_name = "tegra-se-hmac-sha512", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_sha_cra_init, + .cra_exit = tegra_sha_cra_exit, + } + } + } +}; + +static int tegra_hash_kac_manifest(u32 user, u32 alg, u32 keylen) +{ + int manifest; + + manifest = SE_KAC_USER_NS; + + switch (alg) { + case SE_ALG_HMAC_SHA224: + case SE_ALG_HMAC_SHA256: + case SE_ALG_HMAC_SHA384: + case SE_ALG_HMAC_SHA512: + manifest |= SE_KAC_HMAC; + break; + default: + return -EINVAL; + } + + switch (keylen) { + case AES_KEYSIZE_128: + manifest |= SE_KAC_SIZE_128; + break; + case AES_KEYSIZE_192: + manifest |= SE_KAC_SIZE_192; + break; + case AES_KEYSIZE_256: + default: + manifest |= SE_KAC_SIZE_256; + break; + } + + return manifest; +} + +int tegra_init_hash(struct tegra_se *se) +{ + struct ahash_engine_alg *alg; + int i, ret; + + se->manifest = tegra_hash_kac_manifest; + + for (i = 0; i < ARRAY_SIZE(tegra_hash_algs); i++) { + tegra_hash_algs[i].se_dev = se; + alg = &tegra_hash_algs[i].alg.ahash; + + ret = crypto_engine_register_ahash(alg); + if (ret) { + dev_err(se->dev, "failed to register %s\n", + alg->base.halg.base.cra_name); + goto sha_err; + } + } + + return 0; + +sha_err: + while (i--) + crypto_engine_unregister_ahash(&tegra_hash_algs[i].alg.ahash); + + return ret; +} + +void tegra_deinit_hash(struct tegra_se *se) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tegra_hash_algs); i++) + crypto_engine_unregister_ahash(&tegra_hash_algs[i].alg.ahash); +} diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c new file mode 100644 index 0000000000..ac14678dbd --- /dev/null +++ b/drivers/crypto/tegra/tegra-se-key.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0-only +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* + * Crypto driver file to manage keys of NVIDIA Security Engine. + */ + +#include +#include +#include + +#include "tegra-se.h" + +#define SE_KEY_FULL_MASK GENMASK(SE_MAX_KEYSLOT, 0) + +/* Reserve keyslot 0, 14, 15 */ +#define SE_KEY_RSVD_MASK (BIT(0) | BIT(14) | BIT(15)) +#define SE_KEY_VALID_MASK (SE_KEY_FULL_MASK & ~SE_KEY_RSVD_MASK) + +/* Mutex lock to guard keyslots */ +static DEFINE_MUTEX(kslt_lock); + +/* Keyslot bitmask (0 = available, 1 = in use/not available) */ +static u16 tegra_se_keyslots = SE_KEY_RSVD_MASK; + +static u16 tegra_keyslot_alloc(void) +{ + u16 keyid; + + mutex_lock(&kslt_lock); + /* Check if all key slots are full */ + if (tegra_se_keyslots == GENMASK(SE_MAX_KEYSLOT, 0)) { + mutex_unlock(&kslt_lock); + return 0; + } + + keyid = ffz(tegra_se_keyslots); + tegra_se_keyslots |= BIT(keyid); + + mutex_unlock(&kslt_lock); + + return keyid; +} + +static void tegra_keyslot_free(u16 slot) +{ + mutex_lock(&kslt_lock); + tegra_se_keyslots &= ~(BIT(slot)); + mutex_unlock(&kslt_lock); +} + +static unsigned int tegra_key_prep_ins_cmd(struct tegra_se *se, u32 *cpuvaddr, + const u32 *key, u32 keylen, u16 slot, u32 alg) +{ + int i = 0, j; + + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->op); + cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_DUMMY; + + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->manifest); + cpuvaddr[i++] = se->manifest(se->owner, alg, keylen); + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_dst); + + cpuvaddr[i++] = SE_AES_KEY_DST_INDEX(slot); + + for (j = 0; j < keylen / 4; j++) { + /* Set key address */ + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_addr); + cpuvaddr[i++] = j; + + /* Set key data */ + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_data); + cpuvaddr[i++] = key[j]; + } + + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->config); + cpuvaddr[i++] = SE_CFG_INS; + + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->op); + cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_START | + SE_AES_OP_LASTBUF; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + dev_dbg(se->dev, "key-slot %u key-manifest %#x\n", + slot, se->manifest(se->owner, alg, keylen)); + + return i; +} + +static bool tegra_key_in_kslt(u32 keyid) +{ + bool ret; + + if (keyid > SE_MAX_KEYSLOT) + return false; + + mutex_lock(&kslt_lock); + ret = ((BIT(keyid) & SE_KEY_VALID_MASK) && + (BIT(keyid) & tegra_se_keyslots)); + mutex_unlock(&kslt_lock); + + return ret; +} + +static int tegra_key_insert(struct tegra_se *se, const u8 *key, + u32 keylen, u16 slot, u32 alg) +{ + const u32 *keyval = (u32 *)key; + u32 *addr = se->cmdbuf->addr, size; + + size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg); + + return tegra_se_host1x_submit(se, size); +} + +void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg) +{ + u8 zkey[AES_MAX_KEY_SIZE] = {0}; + + if (!keyid) + return; + + /* Overwrite the key with 0s */ + tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg); + + tegra_keyslot_free(keyid); +} + +int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid) +{ + int ret; + + /* Use the existing slot if it is already allocated */ + if (!tegra_key_in_kslt(*keyid)) { + *keyid = tegra_keyslot_alloc(); + if (!(*keyid)) { + dev_err(se->dev, "failed to allocate key slot\n"); + return -ENOMEM; + } + } + + ret = tegra_key_insert(se, key, keylen, *keyid, alg); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c new file mode 100644 index 0000000000..f94c0331b1 --- /dev/null +++ b/drivers/crypto/tegra/tegra-se-main.c @@ -0,0 +1,436 @@ +// SPDX-License-Identifier: GPL-2.0-only +// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* + * Crypto driver for NVIDIA Security Engine in Tegra Chips + */ + +#include +#include +#include +#include +#include + +#include + +#include "tegra-se.h" + +static struct host1x_bo *tegra_se_cmdbuf_get(struct host1x_bo *host_bo) +{ + struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo); + + kref_get(&cmdbuf->ref); + + return host_bo; +} + +static void tegra_se_cmdbuf_release(struct kref *ref) +{ + struct tegra_se_cmdbuf *cmdbuf = container_of(ref, struct tegra_se_cmdbuf, ref); + + dma_free_attrs(cmdbuf->dev, cmdbuf->size, cmdbuf->addr, + cmdbuf->iova, 0); + + kfree(cmdbuf); +} + +static void tegra_se_cmdbuf_put(struct host1x_bo *host_bo) +{ + struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo); + + kref_put(&cmdbuf->ref, tegra_se_cmdbuf_release); +} + +static struct host1x_bo_mapping * +tegra_se_cmdbuf_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction) +{ + struct tegra_se_cmdbuf *cmdbuf = container_of(bo, struct tegra_se_cmdbuf, bo); + struct host1x_bo_mapping *map; + int err; + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) + return ERR_PTR(-ENOMEM); + + kref_init(&map->ref); + map->bo = host1x_bo_get(bo); + map->direction = direction; + map->dev = dev; + + map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL); + if (!map->sgt) { + err = -ENOMEM; + goto free; + } + + err = dma_get_sgtable(dev, map->sgt, cmdbuf->addr, + cmdbuf->iova, cmdbuf->words * 4); + if (err) + goto free_sgt; + + err = dma_map_sgtable(dev, map->sgt, direction, 0); + if (err) + goto free_sgt; + + map->phys = sg_dma_address(map->sgt->sgl); + map->size = cmdbuf->words * 4; + map->chunks = err; + + return map; + +free_sgt: + sg_free_table(map->sgt); + kfree(map->sgt); +free: + kfree(map); + return ERR_PTR(err); +} + +static void tegra_se_cmdbuf_unpin(struct host1x_bo_mapping *map) +{ + if (!map) + return; + + dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0); + sg_free_table(map->sgt); + kfree(map->sgt); + host1x_bo_put(map->bo); + + kfree(map); +} + +static void *tegra_se_cmdbuf_mmap(struct host1x_bo *host_bo) +{ + struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo); + + return cmdbuf->addr; +} + +static void tegra_se_cmdbuf_munmap(struct host1x_bo *host_bo, void *addr) +{ +} + +static const struct host1x_bo_ops tegra_se_cmdbuf_ops = { + .get = tegra_se_cmdbuf_get, + .put = tegra_se_cmdbuf_put, + .pin = tegra_se_cmdbuf_pin, + .unpin = tegra_se_cmdbuf_unpin, + .mmap = tegra_se_cmdbuf_mmap, + .munmap = tegra_se_cmdbuf_munmap, +}; + +static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssize_t size) +{ + struct tegra_se_cmdbuf *cmdbuf; + struct device *dev = se->dev->parent; + + cmdbuf = kzalloc(sizeof(*cmdbuf), GFP_KERNEL); + if (!cmdbuf) + return NULL; + + cmdbuf->addr = dma_alloc_attrs(dev, size, &cmdbuf->iova, + GFP_KERNEL, 0); + if (!cmdbuf->addr) + return NULL; + + cmdbuf->size = size; + cmdbuf->dev = dev; + + host1x_bo_init(&cmdbuf->bo, &tegra_se_cmdbuf_ops); + kref_init(&cmdbuf->ref); + + return cmdbuf; +} + +int tegra_se_host1x_submit(struct tegra_se *se, u32 size) +{ + struct host1x_job *job; + int ret; + + job = host1x_job_alloc(se->channel, 1, 0, true); + if (!job) { + dev_err(se->dev, "failed to allocate host1x job\n"); + return -ENOMEM; + } + + job->syncpt = host1x_syncpt_get(se->syncpt); + job->syncpt_incrs = 1; + job->client = &se->client; + job->class = se->client.class; + job->serialize = true; + job->engine_fallback_streamid = se->stream_id; + job->engine_streamid_offset = SE_STREAM_ID; + + se->cmdbuf->words = size; + + host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0); + + ret = host1x_job_pin(job, se->dev); + if (ret) { + dev_err(se->dev, "failed to pin host1x job\n"); + goto job_put; + } + + ret = host1x_job_submit(job); + if (ret) { + dev_err(se->dev, "failed to submit host1x job\n"); + goto job_unpin; + } + + ret = host1x_syncpt_wait(job->syncpt, job->syncpt_end, + MAX_SCHEDULE_TIMEOUT, NULL); + if (ret) { + dev_err(se->dev, "host1x job timed out\n"); + return ret; + } + + host1x_job_put(job); + return 0; + +job_unpin: + host1x_job_unpin(job); +job_put: + host1x_job_put(job); + + return ret; +} + +static int tegra_se_client_init(struct host1x_client *client) +{ + struct tegra_se *se = container_of(client, struct tegra_se, client); + int ret; + + se->channel = host1x_channel_request(&se->client); + if (!se->channel) { + dev_err(se->dev, "host1x channel map failed\n"); + return -ENODEV; + } + + se->syncpt = host1x_syncpt_request(&se->client, 0); + if (!se->syncpt) { + dev_err(se->dev, "host1x syncpt allocation failed\n"); + ret = -EINVAL; + goto channel_put; + } + + se->syncpt_id = host1x_syncpt_id(se->syncpt); + + se->cmdbuf = tegra_se_host1x_bo_alloc(se, SZ_4K); + if (!se->cmdbuf) { + ret = -ENOMEM; + goto syncpt_put; + } + + ret = se->hw->init_alg(se); + if (ret) { + dev_err(se->dev, "failed to register algorithms\n"); + goto cmdbuf_put; + } + + return 0; + +cmdbuf_put: + tegra_se_cmdbuf_put(&se->cmdbuf->bo); +syncpt_put: + host1x_syncpt_put(se->syncpt); +channel_put: + host1x_channel_put(se->channel); + + return ret; +} + +static int tegra_se_client_deinit(struct host1x_client *client) +{ + struct tegra_se *se = container_of(client, struct tegra_se, client); + + se->hw->deinit_alg(se); + tegra_se_cmdbuf_put(&se->cmdbuf->bo); + host1x_syncpt_put(se->syncpt); + host1x_channel_put(se->channel); + + return 0; +} + +static const struct host1x_client_ops tegra_se_client_ops = { + .init = tegra_se_client_init, + .exit = tegra_se_client_deinit, +}; + +static int tegra_se_host1x_register(struct tegra_se *se) +{ + INIT_LIST_HEAD(&se->client.list); + se->client.dev = se->dev; + se->client.ops = &tegra_se_client_ops; + se->client.class = se->hw->host1x_class; + se->client.num_syncpts = 1; + + host1x_client_register(&se->client); + + return 0; +} + +static int tegra_se_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct tegra_se *se; + int ret; + + se = devm_kzalloc(dev, sizeof(*se), GFP_KERNEL); + if (!se) + return -ENOMEM; + + se->dev = dev; + se->owner = TEGRA_GPSE_ID; + se->hw = device_get_match_data(&pdev->dev); + + se->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(se->base)) + return PTR_ERR(se->base); + + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39)); + platform_set_drvdata(pdev, se); + + se->clk = devm_clk_get_enabled(se->dev, NULL); + if (IS_ERR(se->clk)) + return dev_err_probe(dev, PTR_ERR(se->clk), + "failed to enable clocks\n"); + + if (!tegra_dev_iommu_get_stream_id(dev, &se->stream_id)) + return dev_err_probe(dev, -ENODEV, + "failed to get IOMMU stream ID\n"); + + writel(se->stream_id, se->base + SE_STREAM_ID); + + se->engine = crypto_engine_alloc_init(dev, 0); + if (!se->engine) + return dev_err_probe(dev, -ENOMEM, "failed to init crypto engine\n"); + + ret = crypto_engine_start(se->engine); + if (ret) { + crypto_engine_exit(se->engine); + return dev_err_probe(dev, ret, "failed to start crypto engine\n"); + } + + ret = tegra_se_host1x_register(se); + if (ret) { + crypto_engine_stop(se->engine); + crypto_engine_exit(se->engine); + return dev_err_probe(dev, ret, "failed to init host1x params\n"); + } + + return 0; +} + +static void tegra_se_remove(struct platform_device *pdev) +{ + struct tegra_se *se = platform_get_drvdata(pdev); + + crypto_engine_stop(se->engine); + crypto_engine_exit(se->engine); + host1x_client_unregister(&se->client); +} + +static const struct tegra_se_regs tegra234_aes1_regs = { + .config = SE_AES1_CFG, + .op = SE_AES1_OPERATION, + .last_blk = SE_AES1_LAST_BLOCK, + .linear_ctr = SE_AES1_LINEAR_CTR, + .aad_len = SE_AES1_AAD_LEN, + .cryp_msg_len = SE_AES1_CRYPTO_MSG_LEN, + .manifest = SE_AES1_KEYMANIFEST, + .key_addr = SE_AES1_KEY_ADDR, + .key_data = SE_AES1_KEY_DATA, + .key_dst = SE_AES1_KEY_DST, + .result = SE_AES1_CMAC_RESULT, +}; + +static const struct tegra_se_regs tegra234_hash_regs = { + .config = SE_SHA_CFG, + .op = SE_SHA_OPERATION, + .manifest = SE_SHA_KEYMANIFEST, + .key_addr = SE_SHA_KEY_ADDR, + .key_data = SE_SHA_KEY_DATA, + .key_dst = SE_SHA_KEY_DST, + .result = SE_SHA_HASH_RESULT, +}; + +static const struct tegra_se_hw tegra234_aes_hw = { + .regs = &tegra234_aes1_regs, + .kac_ver = 1, + .host1x_class = 0x3b, + .init_alg = tegra_init_aes, + .deinit_alg = tegra_deinit_aes, +}; + +static const struct tegra_se_hw tegra234_hash_hw = { + .regs = &tegra234_hash_regs, + .kac_ver = 1, + .host1x_class = 0x3d, + .init_alg = tegra_init_hash, + .deinit_alg = tegra_deinit_hash, +}; + +static const struct of_device_id tegra_se_of_match[] = { + { + .compatible = "nvidia,tegra234-se-aes", + .data = &tegra234_aes_hw + }, { + .compatible = "nvidia,tegra234-se-hash", + .data = &tegra234_hash_hw, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_se_of_match); + +static struct platform_driver tegra_se_driver = { + .driver = { + .name = "tegra-se", + .of_match_table = tegra_se_of_match, + }, + .probe = tegra_se_probe, + .remove_new = tegra_se_remove, +}; + +static int tegra_se_host1x_probe(struct host1x_device *dev) +{ + return host1x_device_init(dev); +} + +static int tegra_se_host1x_remove(struct host1x_device *dev) +{ + host1x_device_exit(dev); + + return 0; +} + +static struct host1x_driver tegra_se_host1x_driver = { + .driver = { + .name = "tegra-se-host1x", + }, + .probe = tegra_se_host1x_probe, + .remove = tegra_se_host1x_remove, + .subdevs = tegra_se_of_match, +}; + +static int __init tegra_se_module_init(void) +{ + int ret; + + ret = host1x_driver_register(&tegra_se_host1x_driver); + if (ret) + return ret; + + return platform_driver_register(&tegra_se_driver); +} + +static void __exit tegra_se_module_exit(void) +{ + host1x_driver_unregister(&tegra_se_host1x_driver); + platform_driver_unregister(&tegra_se_driver); +} + +module_init(tegra_se_module_init); +module_exit(tegra_se_module_exit); + +MODULE_DESCRIPTION("NVIDIA Tegra Security Engine Driver"); +MODULE_AUTHOR("Akhil R "); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h new file mode 100644 index 0000000000..b9dd7ceb87 --- /dev/null +++ b/drivers/crypto/tegra/tegra-se.h @@ -0,0 +1,560 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * Header file for NVIDIA Security Engine driver. + */ + +#ifndef _TEGRA_SE_H +#define _TEGRA_SE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SE_OWNERSHIP 0x14 +#define SE_OWNERSHIP_UID(x) FIELD_GET(GENMASK(7, 0), x) +#define TEGRA_GPSE_ID 3 + +#define SE_STREAM_ID 0x90 + +#define SE_SHA_CFG 0x4004 +#define SE_SHA_KEY_ADDR 0x4094 +#define SE_SHA_KEY_DATA 0x4098 +#define SE_SHA_KEYMANIFEST 0x409c +#define SE_SHA_CRYPTO_CFG 0x40a4 +#define SE_SHA_KEY_DST 0x40a8 +#define SE_SHA_SRC_KSLT 0x4180 +#define SE_SHA_TGT_KSLT 0x4184 +#define SE_SHA_MSG_LENGTH 0x401c +#define SE_SHA_OPERATION 0x407c +#define SE_SHA_HASH_RESULT 0x40b0 + +#define SE_SHA_ENC_MODE(x) FIELD_PREP(GENMASK(31, 24), x) +#define SE_SHA_ENC_MODE_SHA1 SE_SHA_ENC_MODE(0) +#define SE_SHA_ENC_MODE_SHA224 SE_SHA_ENC_MODE(4) +#define SE_SHA_ENC_MODE_SHA256 SE_SHA_ENC_MODE(5) +#define SE_SHA_ENC_MODE_SHA384 SE_SHA_ENC_MODE(6) +#define SE_SHA_ENC_MODE_SHA512 SE_SHA_ENC_MODE(7) +#define SE_SHA_ENC_MODE_SHA_CTX_INTEGRITY SE_SHA_ENC_MODE(8) +#define SE_SHA_ENC_MODE_SHA3_224 SE_SHA_ENC_MODE(9) +#define SE_SHA_ENC_MODE_SHA3_256 SE_SHA_ENC_MODE(10) +#define SE_SHA_ENC_MODE_SHA3_384 SE_SHA_ENC_MODE(11) +#define SE_SHA_ENC_MODE_SHA3_512 SE_SHA_ENC_MODE(12) +#define SE_SHA_ENC_MODE_SHAKE128 SE_SHA_ENC_MODE(13) +#define SE_SHA_ENC_MODE_SHAKE256 SE_SHA_ENC_MODE(14) +#define SE_SHA_ENC_MODE_HMAC_SHA256_1KEY SE_SHA_ENC_MODE(0) +#define SE_SHA_ENC_MODE_HMAC_SHA256_2KEY SE_SHA_ENC_MODE(1) +#define SE_SHA_ENC_MODE_SM3_256 SE_SHA_ENC_MODE(0) + +#define SE_SHA_CFG_ENC_ALG(x) FIELD_PREP(GENMASK(15, 12), x) +#define SE_SHA_ENC_ALG_NOP SE_SHA_CFG_ENC_ALG(0) +#define SE_SHA_ENC_ALG_SHA_ENC SE_SHA_CFG_ENC_ALG(1) +#define SE_SHA_ENC_ALG_RNG SE_SHA_CFG_ENC_ALG(2) +#define SE_SHA_ENC_ALG_SHA SE_SHA_CFG_ENC_ALG(3) +#define SE_SHA_ENC_ALG_SM3 SE_SHA_CFG_ENC_ALG(4) +#define SE_SHA_ENC_ALG_HMAC SE_SHA_CFG_ENC_ALG(7) +#define SE_SHA_ENC_ALG_KDF SE_SHA_CFG_ENC_ALG(8) +#define SE_SHA_ENC_ALG_KEY_INVLD SE_SHA_CFG_ENC_ALG(10) +#define SE_SHA_ENC_ALG_KEY_INQUIRE SE_SHA_CFG_ENC_ALG(12) +#define SE_SHA_ENC_ALG_INS SE_SHA_CFG_ENC_ALG(13) + +#define SE_SHA_OP_LASTBUF FIELD_PREP(BIT(16), 1) +#define SE_SHA_OP_WRSTALL FIELD_PREP(BIT(15), 1) + +#define SE_SHA_OP_OP(x) FIELD_PREP(GENMASK(2, 0), x) +#define SE_SHA_OP_START SE_SHA_OP_OP(1) +#define SE_SHA_OP_RESTART_OUT SE_SHA_OP_OP(2) +#define SE_SHA_OP_RESTART_IN SE_SHA_OP_OP(4) +#define SE_SHA_OP_RESTART_INOUT SE_SHA_OP_OP(5) +#define SE_SHA_OP_DUMMY SE_SHA_OP_OP(6) + +#define SE_SHA_CFG_DEC_ALG(x) FIELD_PREP(GENMASK(11, 8), x) +#define SE_SHA_DEC_ALG_NOP SE_SHA_CFG_DEC_ALG(0) +#define SE_SHA_DEC_ALG_AES_DEC SE_SHA_CFG_DEC_ALG(1) +#define SE_SHA_DEC_ALG_HMAC SE_SHA_CFG_DEC_ALG(7) +#define SE_SHA_DEC_ALG_HMAC_VERIFY SE_SHA_CFG_DEC_ALG(9) + +#define SE_SHA_CFG_DST(x) FIELD_PREP(GENMASK(4, 2), x) +#define SE_SHA_DST_MEMORY SE_SHA_CFG_DST(0) +#define SE_SHA_DST_HASH_REG SE_SHA_CFG_DST(1) +#define SE_SHA_DST_KEYTABLE SE_SHA_CFG_DST(2) +#define SE_SHA_DST_SRK SE_SHA_CFG_DST(3) + +#define SE_SHA_TASK_HASH_INIT BIT(0) + +/* AES Configuration */ +#define SE_AES0_CFG 0x1004 +#define SE_AES0_CRYPTO_CONFIG 0x1008 +#define SE_AES0_KEY_DST 0x1030 +#define SE_AES0_OPERATION 0x1038 +#define SE_AES0_LINEAR_CTR 0x101c +#define SE_AES0_LAST_BLOCK 0x102c +#define SE_AES0_KEY_ADDR 0x10bc +#define SE_AES0_KEY_DATA 0x10c0 +#define SE_AES0_CMAC_RESULT 0x10c4 +#define SE_AES0_SRC_KSLT 0x1100 +#define SE_AES0_TGT_KSLT 0x1104 +#define SE_AES0_KEYMANIFEST 0x1114 +#define SE_AES0_AAD_LEN 0x112c +#define SE_AES0_CRYPTO_MSG_LEN 0x1134 + +#define SE_AES1_CFG 0x2004 +#define SE_AES1_CRYPTO_CONFIG 0x2008 +#define SE_AES1_KEY_DST 0x2030 +#define SE_AES1_OPERATION 0x2038 +#define SE_AES1_LINEAR_CTR 0x201c +#define SE_AES1_LAST_BLOCK 0x202c +#define SE_AES1_KEY_ADDR 0x20bc +#define SE_AES1_KEY_DATA 0x20c0 +#define SE_AES1_CMAC_RESULT 0x20c4 +#define SE_AES1_SRC_KSLT 0x2100 +#define SE_AES1_TGT_KSLT 0x2104 +#define SE_AES1_KEYMANIFEST 0x2114 +#define SE_AES1_AAD_LEN 0x212c +#define SE_AES1_CRYPTO_MSG_LEN 0x2134 + +#define SE_AES_CFG_ENC_MODE(x) FIELD_PREP(GENMASK(31, 24), x) +#define SE_AES_ENC_MODE_GMAC SE_AES_CFG_ENC_MODE(3) +#define SE_AES_ENC_MODE_GCM SE_AES_CFG_ENC_MODE(4) +#define SE_AES_ENC_MODE_GCM_FINAL SE_AES_CFG_ENC_MODE(5) +#define SE_AES_ENC_MODE_CMAC SE_AES_CFG_ENC_MODE(7) +#define SE_AES_ENC_MODE_CBC_MAC SE_AES_CFG_ENC_MODE(12) + +#define SE_AES_CFG_DEC_MODE(x) FIELD_PREP(GENMASK(23, 16), x) +#define SE_AES_DEC_MODE_GMAC SE_AES_CFG_DEC_MODE(3) +#define SE_AES_DEC_MODE_GCM SE_AES_CFG_DEC_MODE(4) +#define SE_AES_DEC_MODE_GCM_FINAL SE_AES_CFG_DEC_MODE(5) +#define SE_AES_DEC_MODE_CBC_MAC SE_AES_CFG_DEC_MODE(12) + +#define SE_AES_CFG_ENC_ALG(x) FIELD_PREP(GENMASK(15, 12), x) +#define SE_AES_ENC_ALG_NOP SE_AES_CFG_ENC_ALG(0) +#define SE_AES_ENC_ALG_AES_ENC SE_AES_CFG_ENC_ALG(1) +#define SE_AES_ENC_ALG_RNG SE_AES_CFG_ENC_ALG(2) +#define SE_AES_ENC_ALG_SHA SE_AES_CFG_ENC_ALG(3) +#define SE_AES_ENC_ALG_HMAC SE_AES_CFG_ENC_ALG(7) +#define SE_AES_ENC_ALG_KDF SE_AES_CFG_ENC_ALG(8) +#define SE_AES_ENC_ALG_INS SE_AES_CFG_ENC_ALG(13) + +#define SE_AES_CFG_DEC_ALG(x) FIELD_PREP(GENMASK(11, 8), x) +#define SE_AES_DEC_ALG_NOP SE_AES_CFG_DEC_ALG(0) +#define SE_AES_DEC_ALG_AES_DEC SE_AES_CFG_DEC_ALG(1) + +#define SE_AES_CFG_DST(x) FIELD_PREP(GENMASK(4, 2), x) +#define SE_AES_DST_MEMORY SE_AES_CFG_DST(0) +#define SE_AES_DST_HASH_REG SE_AES_CFG_DST(1) +#define SE_AES_DST_KEYTABLE SE_AES_CFG_DST(2) +#define SE_AES_DST_SRK SE_AES_CFG_DST(3) + +/* AES Crypto Configuration */ +#define SE_AES_KEY2_INDEX(x) FIELD_PREP(GENMASK(31, 28), x) +#define SE_AES_KEY_INDEX(x) FIELD_PREP(GENMASK(27, 24), x) + +#define SE_AES_CRYPTO_CFG_SCC_DIS FIELD_PREP(BIT(20), 1) + +#define SE_AES_CRYPTO_CFG_CTR_CNTN(x) FIELD_PREP(GENMASK(18, 11), x) + +#define SE_AES_CRYPTO_CFG_IV_MODE(x) FIELD_PREP(BIT(10), x) +#define SE_AES_IV_MODE_SWIV SE_AES_CRYPTO_CFG_IV_MODE(0) +#define SE_AES_IV_MODE_HWIV SE_AES_CRYPTO_CFG_IV_MODE(1) + +#define SE_AES_CRYPTO_CFG_CORE_SEL(x) FIELD_PREP(BIT(9), x) +#define SE_AES_CORE_SEL_DECRYPT SE_AES_CRYPTO_CFG_CORE_SEL(0) +#define SE_AES_CORE_SEL_ENCRYPT SE_AES_CRYPTO_CFG_CORE_SEL(1) + +#define SE_AES_CRYPTO_CFG_IV_SEL(x) FIELD_PREP(GENMASK(8, 7), x) +#define SE_AES_IV_SEL_UPDATED SE_AES_CRYPTO_CFG_IV_SEL(1) +#define SE_AES_IV_SEL_REG SE_AES_CRYPTO_CFG_IV_SEL(2) +#define SE_AES_IV_SEL_RANDOM SE_AES_CRYPTO_CFG_IV_SEL(3) + +#define SE_AES_CRYPTO_CFG_VCTRAM_SEL(x) FIELD_PREP(GENMASK(6, 5), x) +#define SE_AES_VCTRAM_SEL_MEMORY SE_AES_CRYPTO_CFG_VCTRAM_SEL(0) +#define SE_AES_VCTRAM_SEL_TWEAK SE_AES_CRYPTO_CFG_VCTRAM_SEL(1) +#define SE_AES_VCTRAM_SEL_AESOUT SE_AES_CRYPTO_CFG_VCTRAM_SEL(2) +#define SE_AES_VCTRAM_SEL_PREV_MEM SE_AES_CRYPTO_CFG_VCTRAM_SEL(3) + +#define SE_AES_CRYPTO_CFG_INPUT_SEL(x) FIELD_PREP(GENMASK(4, 3), x) +#define SE_AES_INPUT_SEL_MEMORY SE_AES_CRYPTO_CFG_INPUT_SEL(0) +#define SE_AES_INPUT_SEL_RANDOM SE_AES_CRYPTO_CFG_INPUT_SEL(1) +#define SE_AES_INPUT_SEL_AESOUT SE_AES_CRYPTO_CFG_INPUT_SEL(2) +#define SE_AES_INPUT_SEL_LINEAR_CTR SE_AES_CRYPTO_CFG_INPUT_SEL(3) +#define SE_AES_INPUT_SEL_REG SE_AES_CRYPTO_CFG_INPUT_SEL(1) + +#define SE_AES_CRYPTO_CFG_XOR_POS(x) FIELD_PREP(GENMASK(2, 1), x) +#define SE_AES_XOR_POS_BYPASS SE_AES_CRYPTO_CFG_XOR_POS(0) +#define SE_AES_XOR_POS_BOTH SE_AES_CRYPTO_CFG_XOR_POS(1) +#define SE_AES_XOR_POS_TOP SE_AES_CRYPTO_CFG_XOR_POS(2) +#define SE_AES_XOR_POS_BOTTOM SE_AES_CRYPTO_CFG_XOR_POS(3) + +#define SE_AES_CRYPTO_CFG_HASH_EN(x) FIELD_PREP(BIT(0), x) +#define SE_AES_HASH_DISABLE SE_AES_CRYPTO_CFG_HASH_EN(0) +#define SE_AES_HASH_ENABLE SE_AES_CRYPTO_CFG_HASH_EN(1) + +#define SE_LAST_BLOCK_VAL(x) FIELD_PREP(GENMASK(19, 0), x) +#define SE_LAST_BLOCK_RES_BITS(x) FIELD_PREP(GENMASK(26, 20), x) + +#define SE_AES_OP_LASTBUF FIELD_PREP(BIT(16), 1) +#define SE_AES_OP_WRSTALL FIELD_PREP(BIT(15), 1) +#define SE_AES_OP_FINAL FIELD_PREP(BIT(5), 1) +#define SE_AES_OP_INIT FIELD_PREP(BIT(4), 1) + +#define SE_AES_OP_OP(x) FIELD_PREP(GENMASK(2, 0), x) +#define SE_AES_OP_START SE_AES_OP_OP(1) +#define SE_AES_OP_RESTART_OUT SE_AES_OP_OP(2) +#define SE_AES_OP_RESTART_IN SE_AES_OP_OP(4) +#define SE_AES_OP_RESTART_INOUT SE_AES_OP_OP(5) +#define SE_AES_OP_DUMMY SE_AES_OP_OP(6) + +#define SE_KAC_SIZE(x) FIELD_PREP(GENMASK(15, 14), x) +#define SE_KAC_SIZE_128 SE_KAC_SIZE(0) +#define SE_KAC_SIZE_192 SE_KAC_SIZE(1) +#define SE_KAC_SIZE_256 SE_KAC_SIZE(2) + +#define SE_KAC_EXPORTABLE FIELD_PREP(BIT(12), 1) + +#define SE_KAC_PURPOSE(x) FIELD_PREP(GENMASK(11, 8), x) +#define SE_KAC_ENC SE_KAC_PURPOSE(0) +#define SE_KAC_CMAC SE_KAC_PURPOSE(1) +#define SE_KAC_HMAC SE_KAC_PURPOSE(2) +#define SE_KAC_GCM_KW SE_KAC_PURPOSE(3) +#define SE_KAC_HMAC_KDK SE_KAC_PURPOSE(6) +#define SE_KAC_HMAC_KDD SE_KAC_PURPOSE(7) +#define SE_KAC_HMAC_KDD_KUW SE_KAC_PURPOSE(8) +#define SE_KAC_XTS SE_KAC_PURPOSE(9) +#define SE_KAC_GCM SE_KAC_PURPOSE(10) + +#define SE_KAC_USER_NS FIELD_PREP(GENMASK(6, 4), 3) + +#define SE_AES_KEY_DST_INDEX(x) FIELD_PREP(GENMASK(11, 8), x) +#define SE_ADDR_HI_MSB(x) FIELD_PREP(GENMASK(31, 24), x) +#define SE_ADDR_HI_SZ(x) FIELD_PREP(GENMASK(23, 0), x) + +#define SE_CFG_AES_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \ + SE_AES_DEC_ALG_NOP | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_AES_DECRYPT (SE_AES_ENC_ALG_NOP | \ + SE_AES_DEC_ALG_AES_DEC | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_GMAC_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \ + SE_AES_DEC_ALG_NOP | \ + SE_AES_ENC_MODE_GMAC | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_GMAC_DECRYPT (SE_AES_ENC_ALG_NOP | \ + SE_AES_DEC_ALG_AES_DEC | \ + SE_AES_DEC_MODE_GMAC | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_GCM_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \ + SE_AES_DEC_ALG_NOP | \ + SE_AES_ENC_MODE_GCM | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_GCM_DECRYPT (SE_AES_ENC_ALG_NOP | \ + SE_AES_DEC_ALG_AES_DEC | \ + SE_AES_DEC_MODE_GCM | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_GCM_FINAL_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \ + SE_AES_DEC_ALG_NOP | \ + SE_AES_ENC_MODE_GCM_FINAL | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_GCM_FINAL_DECRYPT (SE_AES_ENC_ALG_NOP | \ + SE_AES_DEC_ALG_AES_DEC | \ + SE_AES_DEC_MODE_GCM_FINAL | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_CMAC (SE_AES_ENC_ALG_AES_ENC | \ + SE_AES_ENC_MODE_CMAC | \ + SE_AES_DST_HASH_REG) + +#define SE_CFG_CBC_MAC (SE_AES_ENC_ALG_AES_ENC | \ + SE_AES_ENC_MODE_CBC_MAC) + +#define SE_CFG_INS (SE_AES_ENC_ALG_INS | \ + SE_AES_DEC_ALG_NOP) + +#define SE_CRYPTO_CFG_ECB_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \ + SE_AES_XOR_POS_BYPASS | \ + SE_AES_CORE_SEL_ENCRYPT) + +#define SE_CRYPTO_CFG_ECB_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \ + SE_AES_XOR_POS_BYPASS | \ + SE_AES_CORE_SEL_DECRYPT) + +#define SE_CRYPTO_CFG_CBC_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \ + SE_AES_VCTRAM_SEL_AESOUT | \ + SE_AES_XOR_POS_TOP | \ + SE_AES_CORE_SEL_ENCRYPT | \ + SE_AES_IV_SEL_REG) + +#define SE_CRYPTO_CFG_CBC_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \ + SE_AES_VCTRAM_SEL_PREV_MEM | \ + SE_AES_XOR_POS_BOTTOM | \ + SE_AES_CORE_SEL_DECRYPT | \ + SE_AES_IV_SEL_REG) + +#define SE_CRYPTO_CFG_CTR (SE_AES_INPUT_SEL_LINEAR_CTR | \ + SE_AES_VCTRAM_SEL_MEMORY | \ + SE_AES_XOR_POS_BOTTOM | \ + SE_AES_CORE_SEL_ENCRYPT | \ + SE_AES_CRYPTO_CFG_CTR_CNTN(1) | \ + SE_AES_IV_SEL_REG) + +#define SE_CRYPTO_CFG_XTS_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \ + SE_AES_VCTRAM_SEL_TWEAK | \ + SE_AES_XOR_POS_BOTH | \ + SE_AES_CORE_SEL_ENCRYPT | \ + SE_AES_IV_SEL_REG) + +#define SE_CRYPTO_CFG_XTS_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \ + SE_AES_VCTRAM_SEL_TWEAK | \ + SE_AES_XOR_POS_BOTH | \ + SE_AES_CORE_SEL_DECRYPT | \ + SE_AES_IV_SEL_REG) + +#define SE_CRYPTO_CFG_XTS_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \ + SE_AES_VCTRAM_SEL_TWEAK | \ + SE_AES_XOR_POS_BOTH | \ + SE_AES_CORE_SEL_DECRYPT | \ + SE_AES_IV_SEL_REG) + +#define SE_CRYPTO_CFG_CBC_MAC (SE_AES_INPUT_SEL_MEMORY | \ + SE_AES_VCTRAM_SEL_AESOUT | \ + SE_AES_XOR_POS_TOP | \ + SE_AES_CORE_SEL_ENCRYPT | \ + SE_AES_HASH_ENABLE | \ + SE_AES_IV_SEL_REG) + +#define HASH_RESULT_REG_COUNT 50 +#define CMAC_RESULT_REG_COUNT 4 + +#define SE_CRYPTO_CTR_REG_COUNT 4 +#define SE_MAX_KEYSLOT 15 +#define SE_MAX_MEM_ALLOC SZ_4M +#define SE_AES_BUFLEN 0x8000 +#define SE_SHA_BUFLEN 0x2000 + +#define SHA_FIRST BIT(0) +#define SHA_UPDATE BIT(1) +#define SHA_FINAL BIT(2) + +/* Security Engine operation modes */ +enum se_aes_alg { + SE_ALG_CBC, /* Cipher Block Chaining (CBC) mode */ + SE_ALG_ECB, /* Electronic Codebook (ECB) mode */ + SE_ALG_CTR, /* Counter (CTR) mode */ + SE_ALG_XTS, /* XTS mode */ + SE_ALG_GMAC, /* GMAC mode */ + SE_ALG_GCM, /* GCM mode */ + SE_ALG_GCM_FINAL, /* GCM FINAL mode */ + SE_ALG_CMAC, /* Cipher-based MAC (CMAC) mode */ + SE_ALG_CBC_MAC, /* CBC MAC mode */ +}; + +enum se_hash_alg { + SE_ALG_RNG_DRBG, /* Deterministic Random Bit Generator */ + SE_ALG_SHA1, /* Secure Hash Algorithm-1 (SHA1) mode */ + SE_ALG_SHA224, /* Secure Hash Algorithm-224 (SHA224) mode */ + SE_ALG_SHA256, /* Secure Hash Algorithm-256 (SHA256) mode */ + SE_ALG_SHA384, /* Secure Hash Algorithm-384 (SHA384) mode */ + SE_ALG_SHA512, /* Secure Hash Algorithm-512 (SHA512) mode */ + SE_ALG_SHA3_224, /* Secure Hash Algorithm3-224 (SHA3-224) mode */ + SE_ALG_SHA3_256, /* Secure Hash Algorithm3-256 (SHA3-256) mode */ + SE_ALG_SHA3_384, /* Secure Hash Algorithm3-384 (SHA3-384) mode */ + SE_ALG_SHA3_512, /* Secure Hash Algorithm3-512 (SHA3-512) mode */ + SE_ALG_SHAKE128, /* Secure Hash Algorithm3 (SHAKE128) mode */ + SE_ALG_SHAKE256, /* Secure Hash Algorithm3 (SHAKE256) mode */ + SE_ALG_HMAC_SHA224, /* Hash based MAC (HMAC) - 224 */ + SE_ALG_HMAC_SHA256, /* Hash based MAC (HMAC) - 256 */ + SE_ALG_HMAC_SHA384, /* Hash based MAC (HMAC) - 384 */ + SE_ALG_HMAC_SHA512, /* Hash based MAC (HMAC) - 512 */ +}; + +struct tegra_se_alg { + struct tegra_se *se_dev; + const char *alg_base; + + union { + struct skcipher_engine_alg skcipher; + struct aead_engine_alg aead; + struct ahash_engine_alg ahash; + } alg; +}; + +struct tegra_se_regs { + u32 op; + u32 config; + u32 last_blk; + u32 linear_ctr; + u32 out_addr; + u32 aad_len; + u32 cryp_msg_len; + u32 manifest; + u32 key_addr; + u32 key_data; + u32 key_dst; + u32 result; +}; + +struct tegra_se_hw { + const struct tegra_se_regs *regs; + int (*init_alg)(struct tegra_se *se); + void (*deinit_alg)(struct tegra_se *se); + bool support_sm_alg; + u32 host1x_class; + u32 kac_ver; +}; + +struct tegra_se { + int (*manifest)(u32 user, u32 alg, u32 keylen); + const struct tegra_se_hw *hw; + struct host1x_client client; + struct host1x_channel *channel; + struct tegra_se_cmdbuf *cmdbuf; + struct crypto_engine *engine; + struct host1x_syncpt *syncpt; + struct device *dev; + struct clk *clk; + unsigned int opcode_addr; + unsigned int stream_id; + unsigned int syncpt_id; + void __iomem *base; + u32 owner; +}; + +struct tegra_se_cmdbuf { + dma_addr_t iova; + u32 *addr; + struct device *dev; + struct kref ref; + struct host1x_bo bo; + ssize_t size; + u32 words; +}; + +struct tegra_se_datbuf { + u8 *buf; + dma_addr_t addr; + ssize_t size; +}; + +static inline int se_algname_to_algid(const char *name) +{ + if (!strcmp(name, "cbc(aes)")) + return SE_ALG_CBC; + else if (!strcmp(name, "ecb(aes)")) + return SE_ALG_ECB; + else if (!strcmp(name, "ctr(aes)")) + return SE_ALG_CTR; + else if (!strcmp(name, "xts(aes)")) + return SE_ALG_XTS; + else if (!strcmp(name, "cmac(aes)")) + return SE_ALG_CMAC; + else if (!strcmp(name, "gcm(aes)")) + return SE_ALG_GCM; + else if (!strcmp(name, "ccm(aes)")) + return SE_ALG_CBC_MAC; + + else if (!strcmp(name, "sha1")) + return SE_ALG_SHA1; + else if (!strcmp(name, "sha224")) + return SE_ALG_SHA224; + else if (!strcmp(name, "sha256")) + return SE_ALG_SHA256; + else if (!strcmp(name, "sha384")) + return SE_ALG_SHA384; + else if (!strcmp(name, "sha512")) + return SE_ALG_SHA512; + else if (!strcmp(name, "sha3-224")) + return SE_ALG_SHA3_224; + else if (!strcmp(name, "sha3-256")) + return SE_ALG_SHA3_256; + else if (!strcmp(name, "sha3-384")) + return SE_ALG_SHA3_384; + else if (!strcmp(name, "sha3-512")) + return SE_ALG_SHA3_512; + else if (!strcmp(name, "hmac(sha224)")) + return SE_ALG_HMAC_SHA224; + else if (!strcmp(name, "hmac(sha256)")) + return SE_ALG_HMAC_SHA256; + else if (!strcmp(name, "hmac(sha384)")) + return SE_ALG_HMAC_SHA384; + else if (!strcmp(name, "hmac(sha512)")) + return SE_ALG_HMAC_SHA512; + else + return -EINVAL; +} + +/* Functions */ +int tegra_init_aes(struct tegra_se *se); +int tegra_init_hash(struct tegra_se *se); +void tegra_deinit_aes(struct tegra_se *se); +void tegra_deinit_hash(struct tegra_se *se); +int tegra_key_submit(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid); +void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg); +int tegra_se_host1x_submit(struct tegra_se *se, u32 size); + +/* HOST1x OPCODES */ +static inline u32 host1x_opcode_setpayload(unsigned int payload) +{ + return (9 << 28) | payload; +} + +static inline u32 host1x_opcode_incr_w(unsigned int offset) +{ + /* 22-bit offset supported */ + return (10 << 28) | offset; +} + +static inline u32 host1x_opcode_nonincr_w(unsigned int offset) +{ + /* 22-bit offset supported */ + return (11 << 28) | offset; +} + +static inline u32 host1x_opcode_incr(unsigned int offset, unsigned int count) +{ + return (1 << 28) | (offset << 16) | count; +} + +static inline u32 host1x_opcode_nonincr(unsigned int offset, unsigned int count) +{ + return (2 << 28) | (offset << 16) | count; +} + +static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v) +{ + return (v & 0xff) << 10; +} + +static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v) +{ + return (v & 0x3ff) << 0; +} + +static inline u32 host1x_uclass_wait_syncpt_r(void) +{ + return 0x8; +} + +static inline u32 host1x_uclass_incr_syncpt_r(void) +{ + return 0x0; +} + +#define se_host1x_opcode_incr_w(x) host1x_opcode_incr_w((x) / 4) +#define se_host1x_opcode_nonincr_w(x) host1x_opcode_nonincr_w((x) / 4) +#define se_host1x_opcode_incr(x, y) host1x_opcode_incr((x) / 4, y) +#define se_host1x_opcode_nonincr(x, y) host1x_opcode_nonincr((x) / 4, y) + +#endif /*_TEGRA_SE_H*/ diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index 6a67d70e7f..30cd040aa0 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -581,7 +581,6 @@ static const struct virtio_device_id id_table[] = { static struct virtio_driver virtio_crypto_driver = { .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .id_table = id_table, diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig index 5f3c9c5529..99b5c25be0 100644 --- a/drivers/cxl/Kconfig +++ b/drivers/cxl/Kconfig @@ -6,6 +6,7 @@ menuconfig CXL_BUS select FW_UPLOAD select PCI_DOE select FIRMWARE_TABLE + select NUMA_KEEP_MEMINFO if (NUMA && X86) help CXL is a bus that is electrically compatible with PCI Express, but layers three protocols on that signalling (CXL.io, CXL.cache, and diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index cb8c155a2c..571069863c 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -316,28 +316,59 @@ static const struct cxl_root_ops acpi_root_ops = { .qos_class = cxl_acpi_qos_class, }; +static void del_cxl_resource(struct resource *res) +{ + if (!res) + return; + kfree(res->name); + kfree(res); +} + +static struct resource *alloc_cxl_resource(resource_size_t base, + resource_size_t n, int id) +{ + struct resource *res __free(kfree) = kzalloc(sizeof(*res), GFP_KERNEL); + + if (!res) + return NULL; + + res->start = base; + res->end = base + n - 1; + res->flags = IORESOURCE_MEM; + res->name = kasprintf(GFP_KERNEL, "CXL Window %d", id); + if (!res->name) + return NULL; + + return no_free_ptr(res); +} + +static int add_or_reset_cxl_resource(struct resource *parent, struct resource *res) +{ + int rc = insert_resource(parent, res); + + if (rc) + del_cxl_resource(res); + return rc; +} + +DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *, + if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev)) +DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T)) static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, struct cxl_cfmws_context *ctx) { int target_map[CXL_DECODER_MAX_INTERLEAVE]; struct cxl_port *root_port = ctx->root_port; - struct resource *cxl_res = ctx->cxl_res; struct cxl_cxims_context cxims_ctx; - struct cxl_root_decoder *cxlrd; struct device *dev = ctx->dev; cxl_calc_hb_fn cxl_calc_hb; struct cxl_decoder *cxld; unsigned int ways, i, ig; - struct resource *res; int rc; rc = cxl_acpi_cfmws_verify(dev, cfmws); - if (rc) { - dev_err(dev, "CFMWS range %#llx-%#llx not registered\n", - cfmws->base_hpa, - cfmws->base_hpa + cfmws->window_size - 1); + if (rc) return rc; - } rc = eiw_to_ways(cfmws->interleave_ways, &ways); if (rc) @@ -348,29 +379,23 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, for (i = 0; i < ways; i++) target_map[i] = cfmws->interleave_targets[i]; - res = kzalloc(sizeof(*res), GFP_KERNEL); + struct resource *res __free(del_cxl_resource) = alloc_cxl_resource( + cfmws->base_hpa, cfmws->window_size, ctx->id++); if (!res) return -ENOMEM; - res->name = kasprintf(GFP_KERNEL, "CXL Window %d", ctx->id++); - if (!res->name) - goto err_name; - - res->start = cfmws->base_hpa; - res->end = cfmws->base_hpa + cfmws->window_size - 1; - res->flags = IORESOURCE_MEM; - /* add to the local resource tracking to establish a sort order */ - rc = insert_resource(cxl_res, res); + rc = add_or_reset_cxl_resource(ctx->cxl_res, no_free_ptr(res)); if (rc) - goto err_insert; + return rc; if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) cxl_calc_hb = cxl_hb_modulo; else cxl_calc_hb = cxl_hb_xor; - cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb); + struct cxl_root_decoder *cxlrd __free(put_cxlrd) = + cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb); if (IS_ERR(cxlrd)) return PTR_ERR(cxlrd); @@ -378,8 +403,8 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions); cxld->target_type = CXL_DECODER_HOSTONLYMEM; cxld->hpa_range = (struct range) { - .start = res->start, - .end = res->end, + .start = cfmws->base_hpa, + .end = cfmws->base_hpa + cfmws->window_size - 1, }; cxld->interleave_ways = ways; /* @@ -399,11 +424,10 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CXIMS, cxl_parse_cxims, &cxims_ctx); if (rc < 0) - goto err_xormap; + return rc; if (!cxlrd->platform_data) { dev_err(dev, "No CXIMS for HBIG %u\n", ig); - rc = -EINVAL; - goto err_xormap; + return -EINVAL; } } } @@ -411,18 +435,9 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, cxlrd->qos_class = cfmws->qtg_id; rc = cxl_decoder_add(cxld, target_map); -err_xormap: if (rc) - put_device(&cxld->dev); - else - rc = cxl_decoder_autoremove(dev, cxld); - return rc; - -err_insert: - kfree(res->name); -err_name: - kfree(res); - return -ENOMEM; + return rc; + return cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd)); } static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, @@ -683,12 +698,6 @@ static void cxl_acpi_lock_reset_class(void *dev) device_lock_reset_class(dev); } -static void del_cxl_resource(struct resource *res) -{ - kfree(res->name); - kfree(res); -} - static void cxl_set_public_resource(struct resource *priv, struct resource *pub) { priv->desc = (unsigned long) pub; diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index 87008505f8..6253944864 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -28,8 +28,15 @@ int cxl_region_init(void); void cxl_region_exit(void); int cxl_get_poison_by_endpoint(struct cxl_port *port); struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa); +u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, + u64 dpa); #else +static inline u64 +cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, u64 dpa) +{ + return ULLONG_MAX; +} static inline struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa) { diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index e01c16fdc7..3df10517a3 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -332,8 +332,8 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, else if (resource_contains(&cxlds->ram_res, res)) cxled->mode = CXL_DECODER_RAM; else { - dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id, - cxled->cxld.id, cxled->dpa_res); + dev_warn(dev, "decoder%d.%d: %pr mixed mode not supported\n", + port->id, cxled->cxld.id, cxled->dpa_res); cxled->mode = CXL_DECODER_MIXED; } @@ -532,8 +532,7 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size) if (size > avail) { dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size, - cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem", - &avail); + cxl_decoder_mode_name(cxled->mode), &avail); rc = -ENOSPC; goto out; } @@ -901,8 +900,12 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, } rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl), &cxld->interleave_granularity); - if (rc) + if (rc) { + dev_warn(&port->dev, + "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n", + port->id, cxld->id, ctrl); return rc; + } dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n", port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end, diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 65185c9fa0..2626f3fff2 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -56,6 +56,9 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), + CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0), + CXL_CMD(CLEAR_LOG, 0x10, 0, 0), + CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0), CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), @@ -331,6 +334,15 @@ static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in) return false; break; } + case CXL_MBOX_OP_CLEAR_LOG: { + const uuid_t *uuid = (uuid_t *)payload_in; + + /* + * Restrict the ‘Clear log’ action to only apply to + * Vendor debug logs. + */ + return uuid_equal(uuid, &DEFINE_CXL_VENDOR_DEBUG_UUID); + } default: break; } @@ -842,14 +854,38 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd, enum cxl_event_type event_type, const uuid_t *uuid, union cxl_event *evt) { - if (event_type == CXL_CPER_EVENT_GEN_MEDIA) - trace_cxl_general_media(cxlmd, type, &evt->gen_media); - else if (event_type == CXL_CPER_EVENT_DRAM) - trace_cxl_dram(cxlmd, type, &evt->dram); - else if (event_type == CXL_CPER_EVENT_MEM_MODULE) + if (event_type == CXL_CPER_EVENT_MEM_MODULE) { trace_cxl_memory_module(cxlmd, type, &evt->mem_module); - else + return; + } + if (event_type == CXL_CPER_EVENT_GENERIC) { trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic); + return; + } + + if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) { + u64 dpa, hpa = ULLONG_MAX; + struct cxl_region *cxlr; + + /* + * These trace points are annotated with HPA and region + * translations. Take topology mutation locks and lookup + * { HPA, REGION } from { DPA, MEMDEV } in the event record. + */ + guard(rwsem_read)(&cxl_region_rwsem); + guard(rwsem_read)(&cxl_dpa_rwsem); + + dpa = le64_to_cpu(evt->common.phys_addr) & CXL_DPA_MASK; + cxlr = cxl_dpa_to_region(cxlmd, dpa); + if (cxlr) + hpa = cxl_trace_hpa(cxlr, cxlmd, dpa); + + if (event_type == CXL_CPER_EVENT_GEN_MEDIA) + trace_cxl_general_media(cxlmd, type, cxlr, hpa, + &evt->gen_media); + else if (event_type == CXL_CPER_EVENT_DRAM) + trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram); + } } EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL); diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 2773f05adb..8567dd11ea 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -525,7 +525,7 @@ static int cxl_cdat_get_length(struct device *dev, __le32 response[2]; int rc; - rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL, + rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS, &request, sizeof(request), &response, sizeof(response)); @@ -555,7 +555,7 @@ static int cxl_cdat_read_table(struct device *dev, __le32 request = CDAT_DOE_REQ(entry_handle); int rc; - rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL, + rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS, &request, sizeof(request), rsp, sizeof(*rsp) + remaining); @@ -640,7 +640,7 @@ void read_cdat_data(struct cxl_port *port) if (!pdev) return; - doe_mb = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL, + doe_mb = pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS); if (!doe_mb) { dev_dbg(dev, "No CDAT mailbox\n"); diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index a600feb8a4..538ebd5a64 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -2816,6 +2816,97 @@ struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa) return ctx.cxlr; } +static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos) +{ + struct cxl_region_params *p = &cxlr->params; + int gran = p->interleave_granularity; + int ways = p->interleave_ways; + u64 offset; + + /* Is the hpa within this region at all */ + if (hpa < p->res->start || hpa > p->res->end) { + dev_dbg(&cxlr->dev, + "Addr trans fail: hpa 0x%llx not in region\n", hpa); + return false; + } + + /* Is the hpa in an expected chunk for its pos(-ition) */ + offset = hpa - p->res->start; + offset = do_div(offset, gran * ways); + if ((offset >= pos * gran) && (offset < (pos + 1) * gran)) + return true; + + dev_dbg(&cxlr->dev, + "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa); + + return false; +} + +static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled) +{ + u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa; + struct cxl_region_params *p = &cxlr->params; + int pos = cxled->pos; + u16 eig = 0; + u8 eiw = 0; + + ways_to_eiw(p->interleave_ways, &eiw); + granularity_to_eig(p->interleave_granularity, &eig); + + /* + * The device position in the region interleave set was removed + * from the offset at HPA->DPA translation. To reconstruct the + * HPA, place the 'pos' in the offset. + * + * The placement of 'pos' in the HPA is determined by interleave + * ways and granularity and is defined in the CXL Spec 3.0 Section + * 8.2.4.19.13 Implementation Note: Device Decode Logic + */ + + /* Remove the dpa base */ + dpa_offset = dpa - cxl_dpa_resource_start(cxled); + + mask_upper = GENMASK_ULL(51, eig + 8); + + if (eiw < 8) { + hpa_offset = (dpa_offset & mask_upper) << eiw; + hpa_offset |= pos << (eig + 8); + } else { + bits_upper = (dpa_offset & mask_upper) >> (eig + 8); + bits_upper = bits_upper * 3; + hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8); + } + + /* The lower bits remain unchanged */ + hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0); + + /* Apply the hpa_offset to the region base address */ + hpa = hpa_offset + p->res->start; + + if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos)) + return ULLONG_MAX; + + return hpa; +} + +u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, + u64 dpa) +{ + struct cxl_region_params *p = &cxlr->params; + struct cxl_endpoint_decoder *cxled = NULL; + + for (int i = 0; i < p->nr_targets; i++) { + cxled = p->targets[i]; + if (cxlmd == cxled_to_memdev(cxled)) + break; + } + if (!cxled || cxlmd != cxled_to_memdev(cxled)) + return ULLONG_MAX; + + return cxl_dpa_to_hpa(dpa, cxlr, cxled); +} + static struct lock_class_key cxl_pmem_region_key; static int cxl_pmem_region_alloc(struct cxl_region *cxlr) diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index 3c42f984ee..e1082e749c 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -314,7 +314,7 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type, .resource = CXL_RESOURCE_NONE, }; - regloc = pci_find_dvsec_capability(pdev, PCI_DVSEC_VENDOR_ID_CXL, + regloc = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL, CXL_DVSEC_REG_LOCATOR); if (!regloc) return -ENXIO; diff --git a/drivers/cxl/core/trace.c b/drivers/cxl/core/trace.c index d0403dc3c8..7f2a9dd0d0 100644 --- a/drivers/cxl/core/trace.c +++ b/drivers/cxl/core/trace.c @@ -6,94 +6,3 @@ #define CREATE_TRACE_POINTS #include "trace.h" - -static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos) -{ - struct cxl_region_params *p = &cxlr->params; - int gran = p->interleave_granularity; - int ways = p->interleave_ways; - u64 offset; - - /* Is the hpa within this region at all */ - if (hpa < p->res->start || hpa > p->res->end) { - dev_dbg(&cxlr->dev, - "Addr trans fail: hpa 0x%llx not in region\n", hpa); - return false; - } - - /* Is the hpa in an expected chunk for its pos(-ition) */ - offset = hpa - p->res->start; - offset = do_div(offset, gran * ways); - if ((offset >= pos * gran) && (offset < (pos + 1) * gran)) - return true; - - dev_dbg(&cxlr->dev, - "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa); - - return false; -} - -static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr, - struct cxl_endpoint_decoder *cxled) -{ - u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa; - struct cxl_region_params *p = &cxlr->params; - int pos = cxled->pos; - u16 eig = 0; - u8 eiw = 0; - - ways_to_eiw(p->interleave_ways, &eiw); - granularity_to_eig(p->interleave_granularity, &eig); - - /* - * The device position in the region interleave set was removed - * from the offset at HPA->DPA translation. To reconstruct the - * HPA, place the 'pos' in the offset. - * - * The placement of 'pos' in the HPA is determined by interleave - * ways and granularity and is defined in the CXL Spec 3.0 Section - * 8.2.4.19.13 Implementation Note: Device Decode Logic - */ - - /* Remove the dpa base */ - dpa_offset = dpa - cxl_dpa_resource_start(cxled); - - mask_upper = GENMASK_ULL(51, eig + 8); - - if (eiw < 8) { - hpa_offset = (dpa_offset & mask_upper) << eiw; - hpa_offset |= pos << (eig + 8); - } else { - bits_upper = (dpa_offset & mask_upper) >> (eig + 8); - bits_upper = bits_upper * 3; - hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8); - } - - /* The lower bits remain unchanged */ - hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0); - - /* Apply the hpa_offset to the region base address */ - hpa = hpa_offset + p->res->start; - - if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos)) - return ULLONG_MAX; - - return hpa; -} - -u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *cxlmd, - u64 dpa) -{ - struct cxl_region_params *p = &cxlr->params; - struct cxl_endpoint_decoder *cxled = NULL; - - for (int i = 0; i < p->nr_targets; i++) { - cxled = p->targets[i]; - if (cxlmd == cxled_to_memdev(cxled)) - break; - } - if (!cxled || cxlmd != cxled_to_memdev(cxled)) - return ULLONG_MAX; - - return cxl_dpa_to_hpa(dpa, cxlr, cxled); -} diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index 7c5cd069f1..ee5cd4eb2f 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -60,8 +60,8 @@ TRACE_EVENT(cxl_aer_uncorrectable_error, __array(u32, header_log, CXL_HEADERLOG_SIZE_U32) ), TP_fast_assign( - __assign_str(memdev, dev_name(&cxlmd->dev)); - __assign_str(host, dev_name(cxlmd->dev.parent)); + __assign_str(memdev); + __assign_str(host); __entry->serial = cxlmd->cxlds->serial; __entry->status = status; __entry->first_error = fe; @@ -106,8 +106,8 @@ TRACE_EVENT(cxl_aer_correctable_error, __field(u32, status) ), TP_fast_assign( - __assign_str(memdev, dev_name(&cxlmd->dev)); - __assign_str(host, dev_name(cxlmd->dev.parent)); + __assign_str(memdev); + __assign_str(host); __entry->serial = cxlmd->cxlds->serial; __entry->status = status; ), @@ -142,8 +142,8 @@ TRACE_EVENT(cxl_overflow, ), TP_fast_assign( - __assign_str(memdev, dev_name(&cxlmd->dev)); - __assign_str(host, dev_name(cxlmd->dev.parent)); + __assign_str(memdev); + __assign_str(host); __entry->serial = cxlmd->cxlds->serial; __entry->log = log; __entry->count = le16_to_cpu(payload->overflow_err_count); @@ -200,8 +200,8 @@ TRACE_EVENT(cxl_overflow, __field(u8, hdr_maint_op_class) #define CXL_EVT_TP_fast_assign(cxlmd, l, hdr) \ - __assign_str(memdev, dev_name(&(cxlmd)->dev)); \ - __assign_str(host, dev_name((cxlmd)->dev.parent)); \ + __assign_str(memdev); \ + __assign_str(host); \ __entry->log = (l); \ __entry->serial = (cxlmd)->cxlds->serial; \ __entry->hdr_length = (hdr).length; \ @@ -316,9 +316,9 @@ TRACE_EVENT(cxl_generic_event, TRACE_EVENT(cxl_general_media, TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, - struct cxl_event_gen_media *rec), + struct cxl_region *cxlr, u64 hpa, struct cxl_event_gen_media *rec), - TP_ARGS(cxlmd, log, rec), + TP_ARGS(cxlmd, log, cxlr, hpa, rec), TP_STRUCT__entry( CXL_EVT_TP_entry @@ -330,10 +330,13 @@ TRACE_EVENT(cxl_general_media, __field(u8, channel) __field(u32, device) __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE) - __field(u16, validity_flags) /* Following are out of order to pack trace record */ + __field(u64, hpa) + __field_struct(uuid_t, region_uuid) + __field(u16, validity_flags) __field(u8, rank) __field(u8, dpa_flags) + __string(region_name, cxlr ? dev_name(&cxlr->dev) : "") ), TP_fast_assign( @@ -354,18 +357,28 @@ TRACE_EVENT(cxl_general_media, memcpy(__entry->comp_id, &rec->component_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE); __entry->validity_flags = get_unaligned_le16(&rec->validity_flags); + __entry->hpa = hpa; + if (cxlr) { + __assign_str(region_name); + uuid_copy(&__entry->region_uuid, &cxlr->params.uuid); + } else { + __assign_str(region_name); + uuid_copy(&__entry->region_uuid, &uuid_null); + } ), CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \ "descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \ - "device=%x comp_id=%s validity_flags='%s'", + "device=%x comp_id=%s validity_flags='%s' " \ + "hpa=%llx region=%s region_uuid=%pUb", __entry->dpa, show_dpa_flags(__entry->dpa_flags), show_event_desc_flags(__entry->descriptor), show_mem_event_type(__entry->type), show_trans_type(__entry->transaction_type), __entry->channel, __entry->rank, __entry->device, __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE), - show_valid_flags(__entry->validity_flags) + show_valid_flags(__entry->validity_flags), + __entry->hpa, __get_str(region_name), &__entry->region_uuid ) ); @@ -400,9 +413,9 @@ TRACE_EVENT(cxl_general_media, TRACE_EVENT(cxl_dram, TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, - struct cxl_event_dram *rec), + struct cxl_region *cxlr, u64 hpa, struct cxl_event_dram *rec), - TP_ARGS(cxlmd, log, rec), + TP_ARGS(cxlmd, log, cxlr, hpa, rec), TP_STRUCT__entry( CXL_EVT_TP_entry @@ -417,10 +430,13 @@ TRACE_EVENT(cxl_dram, __field(u32, nibble_mask) __field(u32, row) __array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE) + __field(u64, hpa) + __field_struct(uuid_t, region_uuid) __field(u8, rank) /* Out of order to pack trace record */ __field(u8, bank_group) /* Out of order to pack trace record */ __field(u8, bank) /* Out of order to pack trace record */ __field(u8, dpa_flags) /* Out of order to pack trace record */ + __string(region_name, cxlr ? dev_name(&cxlr->dev) : "") ), TP_fast_assign( @@ -444,12 +460,21 @@ TRACE_EVENT(cxl_dram, __entry->column = get_unaligned_le16(rec->column); memcpy(__entry->cor_mask, &rec->correction_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE); + __entry->hpa = hpa; + if (cxlr) { + __assign_str(region_name); + uuid_copy(&__entry->region_uuid, &cxlr->params.uuid); + } else { + __assign_str(region_name); + uuid_copy(&__entry->region_uuid, &uuid_null); + } ), CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \ "transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \ "bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \ - "validity_flags='%s'", + "validity_flags='%s' " \ + "hpa=%llx region=%s region_uuid=%pUb", __entry->dpa, show_dpa_flags(__entry->dpa_flags), show_event_desc_flags(__entry->descriptor), show_mem_event_type(__entry->type), @@ -458,7 +483,8 @@ TRACE_EVENT(cxl_dram, __entry->bank_group, __entry->bank, __entry->row, __entry->column, __print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE), - show_dram_valid_flags(__entry->validity_flags) + show_dram_valid_flags(__entry->validity_flags), + __entry->hpa, __get_str(region_name), &__entry->region_uuid ) ); @@ -642,8 +668,6 @@ TRACE_EVENT(cxl_memory_module, #define cxl_poison_overflow(flags, time) \ (flags & CXL_POISON_FLAG_OVERFLOW ? le64_to_cpu(time) : 0) -u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa); - TRACE_EVENT(cxl_poison, TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *cxlr, @@ -668,8 +692,8 @@ TRACE_EVENT(cxl_poison, ), TP_fast_assign( - __assign_str(memdev, dev_name(&cxlmd->dev)); - __assign_str(host, dev_name(cxlmd->dev.parent)); + __assign_str(memdev); + __assign_str(host); __entry->serial = cxlmd->cxlds->serial; __entry->overflow_ts = cxl_poison_overflow(flags, overflow_ts); __entry->dpa = cxl_poison_record_dpa(record); @@ -678,12 +702,12 @@ TRACE_EVENT(cxl_poison, __entry->trace_type = trace_type; __entry->flags = flags; if (cxlr) { - __assign_str(region, dev_name(&cxlr->dev)); + __assign_str(region); memcpy(__entry->uuid, &cxlr->params.uuid, 16); __entry->hpa = cxl_trace_hpa(cxlr, cxlmd, __entry->dpa); } else { - __assign_str(region, ""); + __assign_str(region); memset(__entry->uuid, 0, 16); __entry->hpa = ULLONG_MAX; } diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 6f9270f2fa..a6613a6f89 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -12,6 +12,8 @@ #include #include +extern const struct nvdimm_security_ops *cxl_security_ops; + /** * DOC: cxl objects * @@ -781,6 +783,11 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map); struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port); int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map); int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld); +static inline int cxl_root_decoder_autoremove(struct device *host, + struct cxl_root_decoder *cxlrd) +{ + return cxl_decoder_autoremove(host, &cxlrd->cxlsd.cxld); +} int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint); /** diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 07e65a7605..af8169ccdb 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -395,9 +395,9 @@ enum cxl_devtype { /** * struct cxl_dpa_perf - DPA performance property entry - * @dpa_range - range for DPA address - * @coord - QoS performance data (i.e. latency, bandwidth) - * @qos_class - QoS Class cookies + * @dpa_range: range for DPA address + * @coord: QoS performance data (i.e. latency, bandwidth) + * @qos_class: QoS Class cookies */ struct cxl_dpa_perf { struct range dpa_range; @@ -464,13 +464,14 @@ struct cxl_dev_state { * @active_persistent_bytes: sum of hard + soft persistent * @next_volatile_bytes: volatile capacity change pending device reset * @next_persistent_bytes: persistent capacity change pending device reset + * @ram_perf: performance data entry matched to RAM partition + * @pmem_perf: performance data entry matched to PMEM partition * @event: event log driver state * @poison: poison driver state info * @security: security driver state info * @fw: firmware upload / activation state + * @mbox_wait: RCU wait for mbox send completely * @mbox_send: @dev specific transport for transmitting mailbox commands - * @ram_perf: performance data entry matched to RAM partition - * @pmem_perf: performance data entry matched to PMEM partition * * See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for * details on capacity parameters. @@ -527,6 +528,9 @@ enum cxl_opcode { CXL_MBOX_OP_SET_TIMESTAMP = 0x0301, CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400, CXL_MBOX_OP_GET_LOG = 0x0401, + CXL_MBOX_OP_GET_LOG_CAPS = 0x0402, + CXL_MBOX_OP_CLEAR_LOG = 0x0403, + CXL_MBOX_OP_GET_SUP_LOG_SUBLIST = 0x0405, CXL_MBOX_OP_IDENTIFY = 0x4000, CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100, CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101, diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h index 93992a1c8e..4da07727ab 100644 --- a/drivers/cxl/cxlpci.h +++ b/drivers/cxl/cxlpci.h @@ -13,7 +13,6 @@ * "DVSEC" redundancies removed. When obvious, abbreviations may be used. */ #define PCI_DVSEC_HEADER1_LENGTH_MASK GENMASK(31, 20) -#define PCI_DVSEC_VENDOR_ID_CXL 0x1E98 /* CXL 2.0 8.1.3: PCIe DVSEC for CXL Device */ #define CXL_DVSEC_PCIE_DEVICE 0 diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 659f9d46b1..e53646e9f2 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -817,7 +817,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) cxlds->rcd = is_cxl_restricted(pdev); cxlds->serial = pci_get_dsn(pdev); cxlds->cxl_dvsec = pci_find_dvsec_capability( - pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE); + pdev, PCI_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE); if (!cxlds->cxl_dvsec) dev_warn(&pdev->dev, "Device DVSEC not present, skip CXL.mem init\n"); @@ -996,6 +996,75 @@ static struct pci_driver cxl_pci_driver = { }, }; -module_pci_driver(cxl_pci_driver); +#define CXL_EVENT_HDR_FLAGS_REC_SEVERITY GENMASK(1, 0) +static void cxl_handle_cper_event(enum cxl_event_type ev_type, + struct cxl_cper_event_rec *rec) +{ + struct cper_cxl_event_devid *device_id = &rec->hdr.device_id; + struct pci_dev *pdev __free(pci_dev_put) = NULL; + enum cxl_event_log_type log_type; + struct cxl_dev_state *cxlds; + unsigned int devfn; + u32 hdr_flags; + + pr_debug("CPER event %d for device %u:%u:%u.%u\n", ev_type, + device_id->segment_num, device_id->bus_num, + device_id->device_num, device_id->func_num); + + devfn = PCI_DEVFN(device_id->device_num, device_id->func_num); + pdev = pci_get_domain_bus_and_slot(device_id->segment_num, + device_id->bus_num, devfn); + if (!pdev) + return; + + guard(device)(&pdev->dev); + if (pdev->driver != &cxl_pci_driver) + return; + + cxlds = pci_get_drvdata(pdev); + if (!cxlds) + return; + + /* Fabricate a log type */ + hdr_flags = get_unaligned_le24(rec->event.generic.hdr.flags); + log_type = FIELD_GET(CXL_EVENT_HDR_FLAGS_REC_SEVERITY, hdr_flags); + + cxl_event_trace_record(cxlds->cxlmd, log_type, ev_type, + &uuid_null, &rec->event); +} + +static void cxl_cper_work_fn(struct work_struct *work) +{ + struct cxl_cper_work_data wd; + + while (cxl_cper_kfifo_get(&wd)) + cxl_handle_cper_event(wd.event_type, &wd.rec); +} +static DECLARE_WORK(cxl_cper_work, cxl_cper_work_fn); + +static int __init cxl_pci_driver_init(void) +{ + int rc; + + rc = pci_register_driver(&cxl_pci_driver); + if (rc) + return rc; + + rc = cxl_cper_register_work(&cxl_cper_work); + if (rc) + pci_unregister_driver(&cxl_pci_driver); + + return rc; +} + +static void __exit cxl_pci_driver_exit(void) +{ + cxl_cper_unregister_work(&cxl_cper_work); + cancel_work_sync(&cxl_cper_work); + pci_unregister_driver(&cxl_pci_driver); +} + +module_init(cxl_pci_driver_init); +module_exit(cxl_pci_driver_exit); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS(CXL); diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 7cb8994f88..2ecdaee630 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -11,8 +11,6 @@ #include "cxlmem.h" #include "cxl.h" -extern const struct nvdimm_security_ops *cxl_security_ops; - static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); static void clear_exclusive(void *mds) diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c index f24b67c64d..3ef9550bd2 100644 --- a/drivers/dax/bus.c +++ b/drivers/dax/bus.c @@ -792,7 +792,7 @@ static const struct attribute_group *dax_mapping_attribute_groups[] = { NULL, }; -static struct device_type dax_mapping_type = { +static const struct device_type dax_mapping_type = { .release = dax_mapping_release, .groups = dax_mapping_attribute_groups, }; @@ -1178,7 +1178,6 @@ static ssize_t mapping_store(struct device *dev, struct device_attribute *attr, if (rc) return rc; - rc = -ENXIO; rc = down_write_killable(&dax_region_rwsem); if (rc) return rc; diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 93ebedc5ec..eb61598247 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -329,14 +329,14 @@ static unsigned long dax_get_unmapped_area(struct file *filp, if ((off + len_align) < off) goto out; - addr_align = current->mm->get_unmapped_area(filp, addr, len_align, - pgoff, flags); + addr_align = mm_get_unmapped_area(current->mm, filp, addr, len_align, + pgoff, flags); if (!IS_ERR_VALUE(addr_align)) { addr_align += (off - addr_align) & (align - 1); return addr_align; } out: - return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); + return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); } static const struct address_space_operations dev_dax_aops = { @@ -377,7 +377,7 @@ static const struct file_operations dax_fops = { .release = dax_release, .get_unmapped_area = dax_get_unmapped_area, .mmap = dax_mmap, - .mmap_supported_flags = MAP_SYNC, + .fop_flags = FOP_MMAP_SYNC, }; static void dev_dax_cdev_del(void *cdev) diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c index 42ee360cf4..4fe9d040e3 100644 --- a/drivers/dax/kmem.c +++ b/drivers/dax/kmem.c @@ -55,36 +55,14 @@ static LIST_HEAD(kmem_memory_types); static struct memory_dev_type *kmem_find_alloc_memory_type(int adist) { - bool found = false; - struct memory_dev_type *mtype; - - mutex_lock(&kmem_memory_type_lock); - list_for_each_entry(mtype, &kmem_memory_types, list) { - if (mtype->adistance == adist) { - found = true; - break; - } - } - if (!found) { - mtype = alloc_memory_type(adist); - if (!IS_ERR(mtype)) - list_add(&mtype->list, &kmem_memory_types); - } - mutex_unlock(&kmem_memory_type_lock); - - return mtype; + guard(mutex)(&kmem_memory_type_lock); + return mt_find_alloc_memory_type(adist, &kmem_memory_types); } static void kmem_put_memory_types(void) { - struct memory_dev_type *mtype, *mtn; - - mutex_lock(&kmem_memory_type_lock); - list_for_each_entry_safe(mtype, mtn, &kmem_memory_types, list) { - list_del(&mtype->list); - put_memory_type(mtype); - } - mutex_unlock(&kmem_memory_type_lock); + guard(mutex)(&kmem_memory_type_lock); + mt_put_memory_types(&kmem_memory_types); } static int dev_dax_kmem_probe(struct dev_dax *dev_dax) diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c index c1cc23bcb9..5edc522f71 100644 --- a/drivers/devfreq/event/exynos-nocp.c +++ b/drivers/devfreq/event/exynos-nocp.c @@ -275,18 +275,16 @@ static int exynos_nocp_probe(struct platform_device *pdev) return 0; } -static int exynos_nocp_remove(struct platform_device *pdev) +static void exynos_nocp_remove(struct platform_device *pdev) { struct exynos_nocp *nocp = platform_get_drvdata(pdev); clk_disable_unprepare(nocp->clk); - - return 0; } static struct platform_driver exynos_nocp_driver = { .probe = exynos_nocp_probe, - .remove = exynos_nocp_remove, + .remove_new = exynos_nocp_remove, .driver = { .name = "exynos-nocp", .of_match_table = exynos_nocp_id_match, diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index 56bac47020..7002df20a4 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c @@ -692,18 +692,16 @@ static int exynos_ppmu_probe(struct platform_device *pdev) return 0; } -static int exynos_ppmu_remove(struct platform_device *pdev) +static void exynos_ppmu_remove(struct platform_device *pdev) { struct exynos_ppmu *info = platform_get_drvdata(pdev); clk_disable_unprepare(info->ppmu.clk); - - return 0; } static struct platform_driver exynos_ppmu_driver = { .probe = exynos_ppmu_probe, - .remove = exynos_ppmu_remove, + .remove_new = exynos_ppmu_remove, .driver = { .name = "exynos-ppmu", .of_match_table = exynos_ppmu_id_match, diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c index 245898f1a8..0011858090 100644 --- a/drivers/devfreq/exynos-bus.c +++ b/drivers/devfreq/exynos-bus.c @@ -467,7 +467,6 @@ static void exynos_bus_shutdown(struct platform_device *pdev) devfreq_suspend_device(bus->devfreq); } -#ifdef CONFIG_PM_SLEEP static int exynos_bus_resume(struct device *dev) { struct exynos_bus *bus = dev_get_drvdata(dev); @@ -495,11 +494,9 @@ static int exynos_bus_suspend(struct device *dev) return 0; } -#endif -static const struct dev_pm_ops exynos_bus_pm = { - SET_SYSTEM_SLEEP_PM_OPS(exynos_bus_suspend, exynos_bus_resume) -}; +static DEFINE_SIMPLE_DEV_PM_OPS(exynos_bus_pm, + exynos_bus_suspend, exynos_bus_resume); static const struct of_device_id exynos_bus_of_match[] = { { .compatible = "samsung,exynos-bus", }, @@ -512,7 +509,7 @@ static struct platform_driver exynos_bus_platdrv = { .shutdown = exynos_bus_shutdown, .driver = { .name = "exynos-bus", - .pm = &exynos_bus_pm, + .pm = pm_sleep_ptr(&exynos_bus_pm), .of_match_table = exynos_bus_of_match, }, }; diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c index 11bc3d0349..7ad5225b03 100644 --- a/drivers/devfreq/mtk-cci-devfreq.c +++ b/drivers/devfreq/mtk-cci-devfreq.c @@ -392,7 +392,7 @@ out_free_resources: return ret; } -static int mtk_ccifreq_remove(struct platform_device *pdev) +static void mtk_ccifreq_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_ccifreq_drv *drv; @@ -405,8 +405,6 @@ static int mtk_ccifreq_remove(struct platform_device *pdev) regulator_disable(drv->proc_reg); if (drv->sram_reg) regulator_disable(drv->sram_reg); - - return 0; } static const struct mtk_ccifreq_platform_data mt8183_platform_data = { @@ -432,7 +430,7 @@ MODULE_DEVICE_TABLE(of, mtk_ccifreq_machines); static struct platform_driver mtk_ccifreq_platdrv = { .probe = mtk_ccifreq_probe, - .remove = mtk_ccifreq_remove, + .remove_new = mtk_ccifreq_remove, .driver = { .name = "mtk-ccifreq", .of_match_table = mtk_ccifreq_machines, diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c index fd2c5ffedf..d405cee92c 100644 --- a/drivers/devfreq/rk3399_dmc.c +++ b/drivers/devfreq/rk3399_dmc.c @@ -459,13 +459,11 @@ err_edev: return ret; } -static int rk3399_dmcfreq_remove(struct platform_device *pdev) +static void rk3399_dmcfreq_remove(struct platform_device *pdev) { struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev); devfreq_event_disable_edev(dmcfreq->edev); - - return 0; } static const struct of_device_id rk3399dmc_devfreq_of_match[] = { @@ -476,7 +474,7 @@ MODULE_DEVICE_TABLE(of, rk3399dmc_devfreq_of_match); static struct platform_driver rk3399_dmcfreq_driver = { .probe = rk3399_dmcfreq_probe, - .remove = rk3399_dmcfreq_remove, + .remove_new = rk3399_dmcfreq_remove, .driver = { .name = "rk3399-dmc-freq", .pm = &rk3399_dmcfreq_pm, diff --git a/drivers/devfreq/sun8i-a33-mbus.c b/drivers/devfreq/sun8i-a33-mbus.c index 13d3221313..bcf654f4ff 100644 --- a/drivers/devfreq/sun8i-a33-mbus.c +++ b/drivers/devfreq/sun8i-a33-mbus.c @@ -458,7 +458,7 @@ err_disable_bus: return dev_err_probe(dev, ret, err); } -static int sun8i_a33_mbus_remove(struct platform_device *pdev) +static void sun8i_a33_mbus_remove(struct platform_device *pdev) { struct sun8i_a33_mbus *priv = platform_get_drvdata(pdev); unsigned long initial_freq = priv->profile.initial_freq; @@ -475,8 +475,6 @@ static int sun8i_a33_mbus_remove(struct platform_device *pdev) clk_rate_exclusive_put(priv->clk_mbus); clk_rate_exclusive_put(priv->clk_dram); clk_disable_unprepare(priv->clk_bus); - - return 0; } static const struct sun8i_a33_mbus_variant sun50i_a64_mbus = { @@ -497,7 +495,7 @@ static SIMPLE_DEV_PM_OPS(sun8i_a33_mbus_pm_ops, static struct platform_driver sun8i_a33_mbus_driver = { .probe = sun8i_a33_mbus_probe, - .remove = sun8i_a33_mbus_remove, + .remove_new = sun8i_a33_mbus_remove, .driver = { .name = "sun8i-a33-mbus", .of_match_table = sun8i_a33_mbus_of_match, diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 8fe5aa67b1..8892bc701a 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -35,12 +35,35 @@ static inline int is_dma_buf_file(struct file *); -struct dma_buf_list { - struct list_head head; - struct mutex lock; -}; +#if IS_ENABLED(CONFIG_DEBUG_FS) +static DEFINE_MUTEX(debugfs_list_mutex); +static LIST_HEAD(debugfs_list); -static struct dma_buf_list db_list; +static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf) +{ + mutex_lock(&debugfs_list_mutex); + list_add(&dmabuf->list_node, &debugfs_list); + mutex_unlock(&debugfs_list_mutex); +} + +static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf) +{ + if (!dmabuf) + return; + + mutex_lock(&debugfs_list_mutex); + list_del(&dmabuf->list_node); + mutex_unlock(&debugfs_list_mutex); +} +#else +static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf) +{ +} + +static void __dma_buf_debugfs_list_del(struct file *file) +{ +} +#endif static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) { @@ -89,17 +112,10 @@ static void dma_buf_release(struct dentry *dentry) static int dma_buf_file_release(struct inode *inode, struct file *file) { - struct dma_buf *dmabuf; - if (!is_dma_buf_file(file)) return -EINVAL; - dmabuf = file->private_data; - if (dmabuf) { - mutex_lock(&db_list.lock); - list_del(&dmabuf->list_node); - mutex_unlock(&db_list.lock); - } + __dma_buf_debugfs_list_del(file->private_data); return 0; } @@ -672,9 +688,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) file->f_path.dentry->d_fsdata = dmabuf; dmabuf->file = file; - mutex_lock(&db_list.lock); - list_add(&dmabuf->list_node, &db_list.head); - mutex_unlock(&db_list.lock); + __dma_buf_debugfs_list_add(dmabuf); return dmabuf; @@ -1611,7 +1625,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) size_t size = 0; int ret; - ret = mutex_lock_interruptible(&db_list.lock); + ret = mutex_lock_interruptible(&debugfs_list_mutex); if (ret) return ret; @@ -1620,7 +1634,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n", "size", "flags", "mode", "count", "ino"); - list_for_each_entry(buf_obj, &db_list.head, list_node) { + list_for_each_entry(buf_obj, &debugfs_list, list_node) { ret = dma_resv_lock_interruptible(buf_obj->resv, NULL); if (ret) @@ -1657,11 +1671,11 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); - mutex_unlock(&db_list.lock); + mutex_unlock(&debugfs_list_mutex); return 0; error_unlock: - mutex_unlock(&db_list.lock); + mutex_unlock(&debugfs_list_mutex); return ret; } @@ -1718,8 +1732,6 @@ static int __init dma_buf_init(void) if (IS_ERR(dma_buf_mnt)) return PTR_ERR(dma_buf_mnt); - mutex_init(&db_list.lock); - INIT_LIST_HEAD(&db_list.head); dma_buf_init_debugfs(); return 0; } diff --git a/drivers/dma-buf/sync_trace.h b/drivers/dma-buf/sync_trace.h index 06e468a218..d71dcf954b 100644 --- a/drivers/dma-buf/sync_trace.h +++ b/drivers/dma-buf/sync_trace.h @@ -20,7 +20,7 @@ TRACE_EVENT(sync_timeline, ), TP_fast_assign( - __assign_str(name, timeline->name); + __assign_str(name); __entry->value = timeline->value; ), diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index dfd40d14e4..802ca916f0 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -31,10 +31,12 @@ obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/ obj-$(CONFIG_DW_DMAC_CORE) += dw/ obj-$(CONFIG_DW_EDMA) += dw-edma/ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o +fsl-edma-trace-$(CONFIG_TRACING) := fsl-edma-trace.o +CFLAGS_fsl-edma-trace.o := -I$(src) obj-$(CONFIG_FSL_DMA) += fsldma.o -fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o +fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o ${fsl-edma-trace-y} obj-$(CONFIG_FSL_EDMA) += fsl-edma.o -mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o +mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o ${fsl-edma-trace-y} obj-$(CONFIG_MCF_EDMA) += mcf-edma.o obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o obj-$(CONFIG_FSL_RAID) += fsl_raid.o diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index fbf048f432..73a5cfb4da 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -2855,8 +2855,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) } /* Initialize physical channels */ - pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), - GFP_KERNEL); + pl08x->phy_chans = kcalloc(vd->channels, sizeof(*pl08x->phy_chans), + GFP_KERNEL); if (!pl08x->phy_chans) { ret = -ENOMEM; goto out_no_phychans; diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index d5a33e4a91..bdb752f118 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -1002,6 +1002,16 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version) return 0; } +static void axi_dmac_tasklet_kill(void *task) +{ + tasklet_kill(task); +} + +static void axi_dmac_free_dma_controller(void *of_node) +{ + of_dma_controller_free(of_node); +} + static int axi_dmac_probe(struct platform_device *pdev) { struct dma_device *dma_dev; @@ -1025,14 +1035,10 @@ static int axi_dmac_probe(struct platform_device *pdev) if (IS_ERR(dmac->base)) return PTR_ERR(dmac->base); - dmac->clk = devm_clk_get(&pdev->dev, NULL); + dmac->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(dmac->clk)) return PTR_ERR(dmac->clk); - ret = clk_prepare_enable(dmac->clk); - if (ret < 0) - return ret; - version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION); if (version >= ADI_AXI_PCORE_VER(4, 3, 'a')) @@ -1041,7 +1047,7 @@ static int axi_dmac_probe(struct platform_device *pdev) ret = axi_dmac_parse_dt(&pdev->dev, dmac); if (ret < 0) - goto err_clk_disable; + return ret; INIT_LIST_HEAD(&dmac->chan.active_descs); @@ -1072,7 +1078,7 @@ static int axi_dmac_probe(struct platform_device *pdev) ret = axi_dmac_detect_caps(dmac, version); if (ret) - goto err_clk_disable; + return ret; dma_dev->copy_align = (dmac->chan.address_align_mask + 1); @@ -1088,57 +1094,42 @@ static int axi_dmac_probe(struct platform_device *pdev) !AXI_DMAC_DST_COHERENT_GET(ret)) { dev_err(dmac->dma_dev.dev, "Coherent DMA not supported in hardware"); - ret = -EINVAL; - goto err_clk_disable; + return -EINVAL; } } - ret = dma_async_device_register(dma_dev); + ret = dmaenginem_async_device_register(dma_dev); + if (ret) + return ret; + + /* + * Put the action in here so it get's done before unregistering the DMA + * device. + */ + ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_tasklet_kill, + &dmac->chan.vchan.task); if (ret) - goto err_clk_disable; + return ret; ret = of_dma_controller_register(pdev->dev.of_node, of_dma_xlate_by_chan_id, dma_dev); if (ret) - goto err_unregister_device; + return ret; - ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED, - dev_name(&pdev->dev), dmac); + ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_free_dma_controller, + pdev->dev.of_node); if (ret) - goto err_unregister_of; + return ret; - platform_set_drvdata(pdev, dmac); + ret = devm_request_irq(&pdev->dev, dmac->irq, axi_dmac_interrupt_handler, + IRQF_SHARED, dev_name(&pdev->dev), dmac); + if (ret) + return ret; regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config); - if (IS_ERR(regmap)) { - ret = PTR_ERR(regmap); - goto err_free_irq; - } - - return 0; - -err_free_irq: - free_irq(dmac->irq, dmac); -err_unregister_of: - of_dma_controller_free(pdev->dev.of_node); -err_unregister_device: - dma_async_device_unregister(&dmac->dma_dev); -err_clk_disable: - clk_disable_unprepare(dmac->clk); - - return ret; -} - -static void axi_dmac_remove(struct platform_device *pdev) -{ - struct axi_dmac *dmac = platform_get_drvdata(pdev); - free_irq(dmac->irq, dmac); - of_dma_controller_free(pdev->dev.of_node); - tasklet_kill(&dmac->chan.vchan.task); - dma_async_device_unregister(&dmac->dma_dev); - clk_disable_unprepare(dmac->clk); + return PTR_ERR_OR_ZERO(regmap); } static const struct of_device_id axi_dmac_of_match_table[] = { @@ -1153,7 +1144,6 @@ static struct platform_driver axi_dmac_driver = { .of_match_table = axi_dmac_of_match_table, }, .probe = axi_dmac_probe, - .remove_new = axi_dmac_remove, }; module_platform_driver(axi_dmac_driver); diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index 321446fddd..fffafa86d9 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -1443,6 +1443,24 @@ static int parse_device_properties(struct axi_dma_chip *chip) return 0; } +static int axi_req_irqs(struct platform_device *pdev, struct axi_dma_chip *chip) +{ + int irq_count = platform_irq_count(pdev); + int ret; + + for (int i = 0; i < irq_count; i++) { + chip->irq[i] = platform_get_irq(pdev, i); + if (chip->irq[i] < 0) + return chip->irq[i]; + ret = devm_request_irq(chip->dev, chip->irq[i], dw_axi_dma_interrupt, + IRQF_SHARED, KBUILD_MODNAME, chip); + if (ret < 0) + return ret; + } + + return 0; +} + static int dw_probe(struct platform_device *pdev) { struct axi_dma_chip *chip; @@ -1469,10 +1487,6 @@ static int dw_probe(struct platform_device *pdev) chip->dev = &pdev->dev; chip->dw->hdata = hdata; - chip->irq = platform_get_irq(pdev, 0); - if (chip->irq < 0) - return chip->irq; - chip->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(chip->regs)) return PTR_ERR(chip->regs); @@ -1513,8 +1527,7 @@ static int dw_probe(struct platform_device *pdev) if (!dw->chan) return -ENOMEM; - ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt, - IRQF_SHARED, KBUILD_MODNAME, chip); + ret = axi_req_irqs(pdev, chip); if (ret) return ret; @@ -1627,7 +1640,9 @@ static void dw_remove(struct platform_device *pdev) pm_runtime_disable(chip->dev); axi_dma_suspend(chip); - devm_free_irq(chip->dev, chip->irq, chip); + for (i = 0; i < DMAC_MAX_CHANNELS; i++) + if (chip->irq[i] > 0) + devm_free_irq(chip->dev, chip->irq[i], chip); of_dma_controller_free(chip->dev->of_node); @@ -1651,6 +1666,9 @@ static const struct of_device_id dw_dma_of_id_table[] = { }, { .compatible = "starfive,jh7110-axi-dma", .data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2), + }, { + .compatible = "starfive,jh8100-axi-dma", + .data = (void *)AXI_DMA_FLAG_HAS_RESETS, }, {} }; diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h index ac571b413b..b842e6a8d9 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h @@ -65,7 +65,7 @@ struct dw_axi_dma { struct axi_dma_chip { struct device *dev; - int irq; + int irq[DMAC_MAX_CHANNELS]; void __iomem *regs; void __iomem *apb_regs; struct clk *core_clk; diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c index 5a8061a307..36384d0192 100644 --- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c +++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c @@ -362,7 +362,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev) for (i = 0; i < priv->num_pairs; i++) { err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, - i, &priv->rx_queue_attr[i]); + i, 0, &priv->rx_queue_attr[i]); if (err) { dev_err(dev, "dpdmai_get_rx_queue() failed\n"); goto exit; @@ -370,13 +370,13 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev) ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid; err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, - i, &priv->tx_fqid[i]); + i, 0, &priv->tx_queue_attr[i]); if (err) { dev_err(dev, "dpdmai_get_tx_queue() failed\n"); goto exit; } - ppriv->req_fqid = priv->tx_fqid[i]; - ppriv->prio = i; + ppriv->req_fqid = priv->tx_queue_attr[i].fqid; + ppriv->prio = DPAA2_QDMA_DEFAULT_PRIORITY; ppriv->priv = priv; ppriv++; } @@ -542,7 +542,7 @@ static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv) rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; rx_queue_cfg.dest_cfg.priority = ppriv->prio; err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, - rx_queue_cfg.dest_cfg.priority, + rx_queue_cfg.dest_cfg.priority, 0, &rx_queue_cfg); if (err) { dev_err(dev, "dpdmai_set_rx_queue() failed\n"); @@ -642,7 +642,7 @@ static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma) for (i = 0; i < dpaa2_qdma->n_chans; i++) { dpaa2_chan = &dpaa2_qdma->chans[i]; dpaa2_chan->qdma = dpaa2_qdma; - dpaa2_chan->fqid = priv->tx_fqid[i % num]; + dpaa2_chan->fqid = priv->tx_queue_attr[i % num].fqid; dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc; vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev); spin_lock_init(&dpaa2_chan->queue_lock); @@ -802,7 +802,7 @@ static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev) dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle); dpaa2_dpdmai_dpio_unbind(priv); dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle); - dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle); + dpdmai_destroy(priv->mc_io, 0, priv->dpqdma_id, ls_dev->mc_handle); } static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = { diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h index 03e2f4e0ba..2c80077cb7 100644 --- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h +++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h @@ -6,6 +6,7 @@ #define DPAA2_QDMA_STORE_SIZE 16 #define NUM_CH 8 +#define DPAA2_QDMA_DEFAULT_PRIORITY 0 struct dpaa2_qdma_sd_d { u32 rsv:32; @@ -122,8 +123,8 @@ struct dpaa2_qdma_priv { struct dpaa2_qdma_engine *dpaa2_qdma; struct dpaa2_qdma_priv_per_prio *ppriv; - struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM]; - u32 tx_fqid[DPDMAI_PRIO_NUM]; + struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_MAX_QUEUE_NUM]; + struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_MAX_QUEUE_NUM]; }; struct dpaa2_qdma_priv_per_prio { diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c index 878662aaa1..36897b41ee 100644 --- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c +++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c @@ -1,47 +1,52 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright 2019 NXP +#include #include #include #include #include #include "dpdmai.h" +#define DEST_TYPE_MASK 0xF + struct dpdmai_rsp_get_attributes { __le32 id; u8 num_of_priorities; - u8 pad0[3]; + u8 num_of_queues; + u8 pad0[2]; __le16 major; __le16 minor; }; struct dpdmai_cmd_queue { __le32 dest_id; - u8 priority; - u8 queue; + u8 dest_priority; + union { + u8 queue; + u8 pri; + }; u8 dest_type; - u8 pad; + u8 queue_idx; __le64 user_ctx; union { __le32 options; __le32 fqid; }; -}; +} __packed; struct dpdmai_rsp_get_tx_queue { __le64 pad; __le32 fqid; }; -#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \ - ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) +struct dpdmai_cmd_open { + __le32 dpdmai_id; +} __packed; -/* cmd, param, offset, width, type, arg_name */ -#define DPDMAI_CMD_CREATE(cmd, cfg) \ -do { \ - MC_CMD_OP(cmd, 0, 8, 8, u8, (cfg)->priorities[0]);\ - MC_CMD_OP(cmd, 0, 16, 8, u8, (cfg)->priorities[1]);\ -} while (0) +struct dpdmai_cmd_destroy { + __le32 dpdmai_id; +} __packed; static inline u64 mc_enc(int lsoffset, int width, u64 val) { @@ -68,16 +73,16 @@ static inline u64 mc_enc(int lsoffset, int width, u64 val) int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpdmai_id, u16 *token) { + struct dpdmai_cmd_open *cmd_params; struct fsl_mc_command cmd = { 0 }; - __le64 *cmd_dpdmai_id; int err; /* prepare command */ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN, cmd_flags, 0); - cmd_dpdmai_id = cmd.params; - *cmd_dpdmai_id = cpu_to_le32(dpdmai_id); + cmd_params = (struct dpdmai_cmd_open *)&cmd.params; + cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id); /* send command to mc*/ err = mc_send_command(mc_io, &cmd); @@ -115,66 +120,27 @@ int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) } EXPORT_SYMBOL_GPL(dpdmai_close); -/** - * dpdmai_create() - Create the DPDMAI object - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @cfg: Configuration structure - * @token: Returned token; use in subsequent API calls - * - * Create the DPDMAI object, allocate required resources and - * perform required initialization. - * - * The object can be created either by declaring it in the - * DPL file, or by calling this function. - * - * This function returns a unique authentication token, - * associated with the specific object ID and the specific MC - * portal; this token must be used in all subsequent calls to - * this specific object. For objects that are created using the - * DPL file, call dpdmai_open() function to get an authentication - * token first. - * - * Return: '0' on Success; Error code otherwise. - */ -int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags, - const struct dpdmai_cfg *cfg, u16 *token) -{ - struct fsl_mc_command cmd = { 0 }; - int err; - - /* prepare command */ - cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE, - cmd_flags, 0); - DPDMAI_CMD_CREATE(cmd, cfg); - - /* send command to mc*/ - err = mc_send_command(mc_io, &cmd); - if (err) - return err; - - /* retrieve response parameters */ - *token = mc_cmd_hdr_read_token(&cmd); - - return 0; -} - /** * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources. * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpdmai_id: The object id; it must be a valid id within the container that created this object; * @token: Token of DPDMAI object * * Return: '0' on Success; error code otherwise. */ -int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u32 dpdmai_id, u16 token) { + struct dpdmai_cmd_destroy *cmd_params; struct fsl_mc_command cmd = { 0 }; /* prepare command */ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY, cmd_flags, token); + cmd_params = (struct dpdmai_cmd_destroy *)&cmd.params; + cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id); + /* send command to mc*/ return mc_send_command(mc_io, &cmd); } @@ -274,6 +240,7 @@ int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, attr->version.major = le16_to_cpu(rsp_params->major); attr->version.minor = le16_to_cpu(rsp_params->minor); attr->num_of_priorities = rsp_params->num_of_priorities; + attr->num_of_queues = rsp_params->num_of_queues; return 0; } @@ -284,13 +251,14 @@ EXPORT_SYMBOL_GPL(dpdmai_get_attributes); * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPDMAI object + * @queue_idx: DMA queue index * @priority: Select the queue relative to number of * priorities configured at DPDMAI creation * @cfg: Rx queue configuration * * Return: '0' on Success; Error code otherwise. */ -int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 queue_idx, u8 priority, const struct dpdmai_rx_queue_cfg *cfg) { struct dpdmai_cmd_queue *cmd_params; @@ -302,11 +270,12 @@ int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, cmd_params = (struct dpdmai_cmd_queue *)cmd.params; cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); - cmd_params->priority = cfg->dest_cfg.priority; - cmd_params->queue = priority; + cmd_params->dest_priority = cfg->dest_cfg.priority; + cmd_params->pri = priority; cmd_params->dest_type = cfg->dest_cfg.dest_type; cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx); cmd_params->options = cpu_to_le32(cfg->options); + cmd_params->queue_idx = queue_idx; /* send command to mc*/ return mc_send_command(mc_io, &cmd); @@ -318,13 +287,14 @@ EXPORT_SYMBOL_GPL(dpdmai_set_rx_queue); * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPDMAI object + * @queue_idx: DMA Queue index * @priority: Select the queue relative to number of * priorities configured at DPDMAI creation * @attr: Returned Rx queue attributes * * Return: '0' on Success; Error code otherwise. */ -int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 queue_idx, u8 priority, struct dpdmai_rx_queue_attr *attr) { struct dpdmai_cmd_queue *cmd_params; @@ -337,6 +307,7 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, cmd_params = (struct dpdmai_cmd_queue *)cmd.params; cmd_params->queue = priority; + cmd_params->queue_idx = queue_idx; /* send command to mc*/ err = mc_send_command(mc_io, &cmd); @@ -345,8 +316,8 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, /* retrieve response parameters */ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id); - attr->dest_cfg.priority = cmd_params->priority; - attr->dest_cfg.dest_type = cmd_params->dest_type; + attr->dest_cfg.priority = cmd_params->dest_priority; + attr->dest_cfg.dest_type = FIELD_GET(DEST_TYPE_MASK, cmd_params->dest_type); attr->user_ctx = le64_to_cpu(cmd_params->user_ctx); attr->fqid = le32_to_cpu(cmd_params->fqid); @@ -359,14 +330,15 @@ EXPORT_SYMBOL_GPL(dpdmai_get_rx_queue); * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPDMAI object + * @queue_idx: DMA queue index * @priority: Select the queue relative to number of * priorities configured at DPDMAI creation - * @fqid: Returned Tx queue + * @attr: Returned DMA Tx queue attributes * * Return: '0' on Success; Error code otherwise. */ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, - u16 token, u8 priority, u32 *fqid) + u16 token, u8 queue_idx, u8 priority, struct dpdmai_tx_queue_attr *attr) { struct dpdmai_rsp_get_tx_queue *rsp_params; struct dpdmai_cmd_queue *cmd_params; @@ -379,6 +351,7 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, cmd_params = (struct dpdmai_cmd_queue *)cmd.params; cmd_params->queue = priority; + cmd_params->queue_idx = queue_idx; /* send command to mc*/ err = mc_send_command(mc_io, &cmd); @@ -388,7 +361,7 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, /* retrieve response parameters */ rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params; - *fqid = le32_to_cpu(rsp_params->fqid); + attr->fqid = le32_to_cpu(rsp_params->fqid); return 0; } diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h index b13b9bf0c0..3fe7d83273 100644 --- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h +++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h @@ -5,14 +5,19 @@ #define __FSL_DPDMAI_H /* DPDMAI Version */ -#define DPDMAI_VER_MAJOR 2 -#define DPDMAI_VER_MINOR 2 +#define DPDMAI_VER_MAJOR 3 +#define DPDMAI_VER_MINOR 3 -#define DPDMAI_CMD_BASE_VERSION 0 +#define DPDMAI_CMD_BASE_VERSION 1 #define DPDMAI_CMD_ID_OFFSET 4 -#define DPDMAI_CMDID_FORMAT(x) (((x) << DPDMAI_CMD_ID_OFFSET) | \ - DPDMAI_CMD_BASE_VERSION) +/* + * Maximum number of Tx/Rx queues per DPDMAI object + */ +#define DPDMAI_MAX_QUEUE_NUM 8 + +#define DPDMAI_CMDID_FORMAT_V(x, v) (((x) << DPDMAI_CMD_ID_OFFSET) | (v)) +#define DPDMAI_CMDID_FORMAT(x) DPDMAI_CMDID_FORMAT_V(x, DPDMAI_CMD_BASE_VERSION) /* Command IDs */ #define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800) @@ -26,18 +31,9 @@ #define DPDMAI_CMDID_RESET DPDMAI_CMDID_FORMAT(0x005) #define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMDID_FORMAT(0x006) -#define DPDMAI_CMDID_SET_IRQ DPDMAI_CMDID_FORMAT(0x010) -#define DPDMAI_CMDID_GET_IRQ DPDMAI_CMDID_FORMAT(0x011) -#define DPDMAI_CMDID_SET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x012) -#define DPDMAI_CMDID_GET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x013) -#define DPDMAI_CMDID_SET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x014) -#define DPDMAI_CMDID_GET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x015) -#define DPDMAI_CMDID_GET_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x016) -#define DPDMAI_CMDID_CLEAR_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x017) - -#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A0) -#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A1) -#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT(0x1A2) +#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A0, 2) +#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A1, 2) +#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A2, 2) #define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */ #define MC_CMD_HDR_TOKEN_S 16 /* Token field size */ @@ -49,30 +45,32 @@ * Contains initialization APIs and runtime control APIs for DPDMAI */ -/** +/* * Maximum number of Tx/Rx priorities per DPDMAI object */ #define DPDMAI_PRIO_NUM 2 /* DPDMAI queue modification options */ -/** +/* * Select to modify the user's context associated with the queue */ #define DPDMAI_QUEUE_OPT_USER_CTX 0x1 -/** +/* * Select to modify the queue's destination */ #define DPDMAI_QUEUE_OPT_DEST 0x2 /** * struct dpdmai_cfg - Structure representing DPDMAI configuration + * @num_queues: Number of the DMA queues * @priorities: Priorities for the DMA hardware processing; valid priorities are * configured with values 1-8; the entry following last valid entry * should be configured with 0 */ struct dpdmai_cfg { + u8 num_queues; u8 priorities[DPDMAI_PRIO_NUM]; }; @@ -80,20 +78,19 @@ struct dpdmai_cfg { * struct dpdmai_attr - Structure representing DPDMAI attributes * @id: DPDMAI object ID * @version: DPDMAI version + * @version.major: DPDMAI major version + * @version.minor: DPDMAI minor version * @num_of_priorities: number of priorities + * @num_of_queues: number of the DMA queues */ struct dpdmai_attr { int id; - /** - * struct version - DPDMAI version - * @major: DPDMAI major version - * @minor: DPDMAI minor version - */ struct { u16 major; u16 minor; } version; u8 num_of_priorities; + u8 num_of_queues; }; /** @@ -158,22 +155,24 @@ struct dpdmai_rx_queue_attr { u32 fqid; }; +struct dpdmai_tx_queue_attr { + u32 fqid; +}; + int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpdmai_id, u16 *token); int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); -int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); -int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags, - const struct dpdmai_cfg *cfg, u16 *token); +int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u32 dpdmai_id, u16 token); int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, struct dpdmai_attr *attr); int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, - u8 priority, const struct dpdmai_rx_queue_cfg *cfg); + u8 queue_idx, u8 priority, const struct dpdmai_rx_queue_cfg *cfg); int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, - u8 priority, struct dpdmai_rx_queue_attr *attr); + u8 queue_idx, u8 priority, struct dpdmai_rx_queue_attr *attr); int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, - u16 token, u8 priority, u32 *fqid); + u16 token, u8 queue_idx, u8 priority, struct dpdmai_tx_queue_attr *attr); #endif /* __FSL_DPDMAI_H */ diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index b18faa7cfe..0af934b56a 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -3,6 +3,8 @@ // Copyright (c) 2013-2014 Freescale Semiconductor, Inc // Copyright (c) 2017 Sysam, Angelo Dureghello +#include +#include #include #include #include @@ -74,18 +76,10 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan) flags = fsl_edma_drvflags(fsl_chan); val = edma_readl_chreg(fsl_chan, ch_sbr); - /* Remote/local swapped wrongly on iMX8 QM Audio edma */ - if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) { - if (!fsl_chan->is_rxchan) - val |= EDMA_V3_CH_SBR_RD; - else - val |= EDMA_V3_CH_SBR_WR; - } else { - if (fsl_chan->is_rxchan) - val |= EDMA_V3_CH_SBR_RD; - else - val |= EDMA_V3_CH_SBR_WR; - } + if (fsl_chan->is_rxchan) + val |= EDMA_V3_CH_SBR_RD; + else + val |= EDMA_V3_CH_SBR_WR; if (fsl_chan->is_remote) val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR); @@ -546,6 +540,8 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan, csr |= EDMA_TCD_CSR_START; fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr); + + trace_edma_fill_tcd(fsl_chan, tcd); } static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, @@ -762,6 +758,8 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan, fsl_desc->iscyclic = false; fsl_chan->is_sw = true; + if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_MEM_REMOTE) + fsl_chan->is_remote = true; /* To match with copy_align and max_seg_size so 1 tcd is enough */ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst, @@ -810,6 +808,9 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan) { struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); + if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK) + clk_prepare_enable(fsl_chan->clk); + fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev, fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ? sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd), @@ -838,6 +839,9 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan) fsl_chan->tcd_pool = NULL; fsl_chan->is_sw = false; fsl_chan->srcid = 0; + fsl_chan->is_remote = false; + if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK) + clk_disable_unprepare(fsl_chan->clk); } void fsl_edma_cleanup_vchan(struct dma_device *dmadev) diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index 7bf0aba471..268db38767 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -151,7 +151,6 @@ struct fsl_edma_chan { enum dma_status status; enum fsl_edma_pm_state pm_state; bool idle; - u32 slave_id; struct fsl_edma_engine *edma; struct fsl_edma_desc *edesc; struct dma_slave_config cfg; @@ -195,8 +194,7 @@ struct fsl_edma_desc { #define FSL_EDMA_DRV_HAS_PD BIT(5) #define FSL_EDMA_DRV_HAS_CHCLK BIT(6) #define FSL_EDMA_DRV_HAS_CHMUX BIT(7) -/* imx8 QM audio edma remote local swapped */ -#define FSL_EDMA_DRV_QUIRK_SWAPPED BIT(8) +#define FSL_EDMA_DRV_MEM_REMOTE BIT(8) /* control and status register is in tcd address space, edma3 reg layout */ #define FSL_EDMA_DRV_SPLIT_REG BIT(9) #define FSL_EDMA_DRV_BUS_8BYTE BIT(10) @@ -238,7 +236,6 @@ struct fsl_edma_engine { void __iomem *muxbase[DMAMUX_NR]; struct clk *muxclk[DMAMUX_NR]; struct clk *dmaclk; - struct clk *chclk; struct mutex fsl_edma_mutex; const struct fsl_edma_drvdata *drvdata; u32 n_chans; @@ -250,13 +247,17 @@ struct fsl_edma_engine { struct fsl_edma_chan chans[] __counted_by(n_chans); }; +static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan) +{ + return fsl_chan->edma->drvdata->flags; +} + #define edma_read_tcdreg_c(chan, _tcd, __name) \ -(sizeof((_tcd)->__name) == sizeof(u64) ? \ - edma_readq(chan->edma, &(_tcd)->__name) : \ - ((sizeof((_tcd)->__name) == sizeof(u32)) ? \ - edma_readl(chan->edma, &(_tcd)->__name) : \ - edma_readw(chan->edma, &(_tcd)->__name) \ - )) +_Generic(((_tcd)->__name), \ + __iomem __le64 : edma_readq(chan->edma, &(_tcd)->__name), \ + __iomem __le32 : edma_readl(chan->edma, &(_tcd)->__name), \ + __iomem __le16 : edma_readw(chan->edma, &(_tcd)->__name) \ + ) #define edma_read_tcdreg(chan, __name) \ ((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \ @@ -264,23 +265,13 @@ struct fsl_edma_engine { edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd __iomem *)chan->tcd), __name) \ ) -#define edma_write_tcdreg_c(chan, _tcd, _val, __name) \ -do { \ - switch (sizeof(_tcd->__name)) { \ - case sizeof(u64): \ - edma_writeq(chan->edma, (u64 __force)_val, &_tcd->__name); \ - break; \ - case sizeof(u32): \ - edma_writel(chan->edma, (u32 __force)_val, &_tcd->__name); \ - break; \ - case sizeof(u16): \ - edma_writew(chan->edma, (u16 __force)_val, &_tcd->__name); \ - break; \ - case sizeof(u8): \ - edma_writeb(chan->edma, (u8 __force)_val, &_tcd->__name); \ - break; \ - } \ -} while (0) +#define edma_write_tcdreg_c(chan, _tcd, _val, __name) \ +_Generic((_tcd->__name), \ + __iomem __le64 : edma_writeq(chan->edma, (u64 __force)(_val), &_tcd->__name), \ + __iomem __le32 : edma_writel(chan->edma, (u32 __force)(_val), &_tcd->__name), \ + __iomem __le16 : edma_writew(chan->edma, (u16 __force)(_val), &_tcd->__name), \ + __iomem u8 : edma_writeb(chan->edma, _val, &_tcd->__name) \ + ) #define edma_write_tcdreg(chan, val, __name) \ do { \ @@ -321,9 +312,11 @@ do { \ (((struct fsl_edma_hw_tcd *)_tcd)->_field)) #define fsl_edma_le_to_cpu(x) \ -(sizeof(x) == sizeof(u64) ? le64_to_cpu((__force __le64)(x)) : \ - (sizeof(x) == sizeof(u32) ? le32_to_cpu((__force __le32)(x)) : \ - le16_to_cpu((__force __le16)(x)))) +_Generic((x), \ + __le64 : le64_to_cpu((x)), \ + __le32 : le32_to_cpu((x)), \ + __le16 : le16_to_cpu((x)) \ +) #define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \ (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \ @@ -331,19 +324,11 @@ do { \ fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field)) #define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \ -do { \ - switch (sizeof((_tcd)->_field)) { \ - case sizeof(u64): \ - *(__force __le64 *)(&((_tcd)->_field)) = cpu_to_le64(_val); \ - break; \ - case sizeof(u32): \ - *(__force __le32 *)(&((_tcd)->_field)) = cpu_to_le32(_val); \ - break; \ - case sizeof(u16): \ - *(__force __le16 *)(&((_tcd)->_field)) = cpu_to_le16(_val); \ - break; \ - } \ -} while (0) +_Generic(((_tcd)->_field), \ + __le64 : (_tcd)->_field = cpu_to_le64(_val), \ + __le32 : (_tcd)->_field = cpu_to_le32(_val), \ + __le16 : (_tcd)->_field = cpu_to_le16(_val) \ +) #define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \ do { \ @@ -353,6 +338,9 @@ do { \ fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \ } while (0) +/* Need after struct defination */ +#include "fsl-edma-trace.h" + /* * R/W functions for big- or little-endian registers: * The eDMA controller's endian is independent of the CPU core's endian. @@ -371,23 +359,38 @@ static inline u64 edma_readq(struct fsl_edma_engine *edma, void __iomem *addr) h = ioread32(addr + 4); } + trace_edma_readl(edma, addr, l); + trace_edma_readl(edma, addr + 4, h); + return (h << 32) | l; } static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr) { + u32 val; + if (edma->big_endian) - return ioread32be(addr); + val = ioread32be(addr); else - return ioread32(addr); + val = ioread32(addr); + + trace_edma_readl(edma, addr, val); + + return val; } static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr) { + u16 val; + if (edma->big_endian) - return ioread16be(addr); + val = ioread16be(addr); else - return ioread16(addr); + val = ioread16(addr); + + trace_edma_readw(edma, addr, val); + + return val; } static inline void edma_writeb(struct fsl_edma_engine *edma, @@ -398,6 +401,8 @@ static inline void edma_writeb(struct fsl_edma_engine *edma, iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3)); else iowrite8(val, addr); + + trace_edma_writeb(edma, addr, val); } static inline void edma_writew(struct fsl_edma_engine *edma, @@ -408,6 +413,8 @@ static inline void edma_writew(struct fsl_edma_engine *edma, iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2)); else iowrite16(val, addr); + + trace_edma_writew(edma, addr, val); } static inline void edma_writel(struct fsl_edma_engine *edma, @@ -417,6 +424,8 @@ static inline void edma_writel(struct fsl_edma_engine *edma, iowrite32be(val, addr); else iowrite32(val, addr); + + trace_edma_writel(edma, addr, val); } static inline void edma_writeq(struct fsl_edma_engine *edma, @@ -429,6 +438,9 @@ static inline void edma_writeq(struct fsl_edma_engine *edma, iowrite32(val & 0xFFFFFFFF, addr); iowrite32(val >> 32, addr + 4); } + + trace_edma_writel(edma, addr, val & 0xFFFFFFFF); + trace_edma_writel(edma, addr + 4, val >> 32); } static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan) @@ -436,11 +448,6 @@ static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan) return container_of(chan, struct fsl_edma_chan, vchan.chan); } -static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan) -{ - return fsl_chan->edma->drvdata->flags; -} - static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd) { return container_of(vd, struct fsl_edma_desc, vdesc); diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index 402f0058a1..43d84cfefb 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -105,7 +105,8 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, if (dma_spec->args_count != 2) return NULL; - mutex_lock(&fsl_edma->fsl_edma_mutex); + guard(mutex)(&fsl_edma->fsl_edma_mutex); + list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) { if (chan->client_count) continue; @@ -114,15 +115,20 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, if (chan) { chan->device->privatecnt++; fsl_chan = to_fsl_edma_chan(chan); - fsl_chan->slave_id = dma_spec->args[1]; - fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, + fsl_chan->srcid = dma_spec->args[1]; + + if (!fsl_chan->srcid) { + dev_err(&fsl_chan->pdev->dev, "Invalidate srcid %d\n", + fsl_chan->srcid); + return NULL; + } + + fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid, true); - mutex_unlock(&fsl_edma->fsl_edma_mutex); return chan; } } } - mutex_unlock(&fsl_edma->fsl_edma_mutex); return NULL; } @@ -336,16 +342,19 @@ static struct fsl_edma_drvdata imx7ulp_data = { }; static struct fsl_edma_drvdata imx8qm_data = { - .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3, + .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE, .chreg_space_sz = 0x10000, .chreg_off = 0x10000, .setup_irq = fsl_edma3_irq_init, }; -static struct fsl_edma_drvdata imx8qm_audio_data = { - .flags = FSL_EDMA_DRV_QUIRK_SWAPPED | FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3, +static struct fsl_edma_drvdata imx8ulp_data = { + .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_CHCLK | FSL_EDMA_DRV_HAS_DMACLK | + FSL_EDMA_DRV_EDMA3, .chreg_space_sz = 0x10000, .chreg_off = 0x10000, + .mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux), + .mux_skip = 0x10000, .setup_irq = fsl_edma3_irq_init, }; @@ -380,7 +389,7 @@ static const struct of_device_id fsl_edma_dt_ids[] = { { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data}, { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data}, { .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data}, - { .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data}, + { .compatible = "fsl,imx8ulp-edma", .data = &imx8ulp_data}, { .compatible = "fsl,imx93-edma3", .data = &imx93_data3}, { .compatible = "fsl,imx93-edma4", .data = &imx93_data4}, { .compatible = "fsl,imx95-edma5", .data = &imx95_data5}, @@ -434,6 +443,7 @@ static int fsl_edma_probe(struct platform_device *pdev) struct fsl_edma_engine *fsl_edma; const struct fsl_edma_drvdata *drvdata = NULL; u32 chan_mask[2] = {0, 0}; + char clk_name[36]; struct edma_regs *regs; int chans; int ret, i; @@ -476,14 +486,6 @@ static int fsl_edma_probe(struct platform_device *pdev) } } - if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) { - fsl_edma->chclk = devm_clk_get_enabled(&pdev->dev, "mp"); - if (IS_ERR(fsl_edma->chclk)) { - dev_err(&pdev->dev, "Missing MP block clock.\n"); - return PTR_ERR(fsl_edma->chclk); - } - } - ret = of_property_read_variable_u32_array(np, "dma-channel-mask", chan_mask, 1, 2); if (ret > 0) { @@ -540,7 +542,7 @@ static int fsl_edma_probe(struct platform_device *pdev) fsl_chan->edma = fsl_edma; fsl_chan->pm_state = RUNNING; - fsl_chan->slave_id = 0; + fsl_chan->srcid = 0; fsl_chan->idle = true; fsl_chan->dma_dir = DMA_NONE; fsl_chan->vchan.desc_free = fsl_edma_free_desc; @@ -551,11 +553,21 @@ static int fsl_edma_probe(struct platform_device *pdev) + i * drvdata->chreg_space_sz + drvdata->chreg_off + len; fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip; + if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) { + snprintf(clk_name, sizeof(clk_name), "ch%02d", i); + fsl_chan->clk = devm_clk_get_enabled(&pdev->dev, + (const char *)clk_name); + + if (IS_ERR(fsl_chan->clk)) + return PTR_ERR(fsl_chan->clk); + } fsl_chan->pdev = pdev; vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr); fsl_edma_chan_mux(fsl_chan, 0, false); + if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) + clk_disable_unprepare(fsl_chan->clk); } ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma); @@ -682,8 +694,8 @@ static int fsl_edma_resume_early(struct device *dev) continue; fsl_chan->pm_state = RUNNING; edma_write_tcdreg(fsl_chan, 0, csr); - if (fsl_chan->slave_id != 0) - fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true); + if (fsl_chan->srcid != 0) + fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid, true); } if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)) diff --git a/drivers/dma/fsl-edma-trace.c b/drivers/dma/fsl-edma-trace.c new file mode 100644 index 0000000000..28300ad80b --- /dev/null +++ b/drivers/dma/fsl-edma-trace.c @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define CREATE_TRACE_POINTS +#include "fsl-edma-common.h" diff --git a/drivers/dma/fsl-edma-trace.h b/drivers/dma/fsl-edma-trace.h new file mode 100644 index 0000000000..d3541301a2 --- /dev/null +++ b/drivers/dma/fsl-edma-trace.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2023 NXP. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM fsl_edma + +#if !defined(__LINUX_FSL_EDMA_TRACE) || defined(TRACE_HEADER_MULTI_READ) +#define __LINUX_FSL_EDMA_TRACE + +#include +#include + +DECLARE_EVENT_CLASS(edma_log_io, + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), + TP_ARGS(edma, addr, value), + TP_STRUCT__entry( + __field(struct fsl_edma_engine *, edma) + __field(void __iomem *, addr) + __field(u32, value) + ), + TP_fast_assign( + __entry->edma = edma; + __entry->addr = addr; + __entry->value = value; + ), + TP_printk("offset %08x: value %08x", + (u32)(__entry->addr - __entry->edma->membase), __entry->value) +); + +DEFINE_EVENT(edma_log_io, edma_readl, + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), + TP_ARGS(edma, addr, value) +); + +DEFINE_EVENT(edma_log_io, edma_writel, + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), + TP_ARGS(edma, addr, value) +); + +DEFINE_EVENT(edma_log_io, edma_readw, + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), + TP_ARGS(edma, addr, value) +); + +DEFINE_EVENT(edma_log_io, edma_writew, + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), + TP_ARGS(edma, addr, value) +); + +DEFINE_EVENT(edma_log_io, edma_readb, + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), + TP_ARGS(edma, addr, value) +); + +DEFINE_EVENT(edma_log_io, edma_writeb, + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), + TP_ARGS(edma, addr, value) +); + +DECLARE_EVENT_CLASS(edma_log_tcd, + TP_PROTO(struct fsl_edma_chan *chan, void *tcd), + TP_ARGS(chan, tcd), + TP_STRUCT__entry( + __field(u64, saddr) + __field(u16, soff) + __field(u16, attr) + __field(u32, nbytes) + __field(u64, slast) + __field(u64, daddr) + __field(u16, doff) + __field(u16, citer) + __field(u64, dlast_sga) + __field(u16, csr) + __field(u16, biter) + + ), + TP_fast_assign( + __entry->saddr = fsl_edma_get_tcd_to_cpu(chan, tcd, saddr), + __entry->soff = fsl_edma_get_tcd_to_cpu(chan, tcd, soff), + __entry->attr = fsl_edma_get_tcd_to_cpu(chan, tcd, attr), + __entry->nbytes = fsl_edma_get_tcd_to_cpu(chan, tcd, nbytes), + __entry->slast = fsl_edma_get_tcd_to_cpu(chan, tcd, slast), + __entry->daddr = fsl_edma_get_tcd_to_cpu(chan, tcd, daddr), + __entry->doff = fsl_edma_get_tcd_to_cpu(chan, tcd, doff), + __entry->citer = fsl_edma_get_tcd_to_cpu(chan, tcd, citer), + __entry->dlast_sga = fsl_edma_get_tcd_to_cpu(chan, tcd, dlast_sga), + __entry->csr = fsl_edma_get_tcd_to_cpu(chan, tcd, csr), + __entry->biter = fsl_edma_get_tcd_to_cpu(chan, tcd, biter); + ), + TP_printk("\n==== TCD =====\n" + " saddr: 0x%016llx\n" + " soff: 0x%04x\n" + " attr: 0x%04x\n" + " nbytes: 0x%08x\n" + " slast: 0x%016llx\n" + " daddr: 0x%016llx\n" + " doff: 0x%04x\n" + " citer: 0x%04x\n" + " dlast: 0x%016llx\n" + " csr: 0x%04x\n" + " biter: 0x%04x\n", + __entry->saddr, + __entry->soff, + __entry->attr, + __entry->nbytes, + __entry->slast, + __entry->daddr, + __entry->doff, + __entry->citer, + __entry->dlast_sga, + __entry->csr, + __entry->biter) +); + +DEFINE_EVENT(edma_log_tcd, edma_fill_tcd, + TP_PROTO(struct fsl_edma_chan *chan, void *tcd), + TP_ARGS(chan, tcd) +); + +#endif + +/* this part must be outside header guard */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE fsl-edma-trace + +#include diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index fd9bbee4cc..57f1bf2ab2 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -592,6 +592,14 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) if (idxd->state != IDXD_DEV_ENABLED) return -ENXIO; + mutex_lock(&wq->wq_lock); + + if (!idxd_wq_driver_name_match(wq, dev)) { + idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; + rc = -ENODEV; + goto wq_err; + } + /* * User type WQ is enabled only when SVA is enabled for two reasons: * - If no IOMMU or IOMMU Passthrough without SVA, userspace @@ -607,14 +615,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) dev_dbg(&idxd->pdev->dev, "User type WQ cannot be enabled without SVA.\n"); - return -EOPNOTSUPP; - } - - mutex_lock(&wq->wq_lock); - - if (!idxd_wq_driver_name_match(wq, dev)) { - idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; - rc = -ENODEV; + rc = -EOPNOTSUPP; goto wq_err; } diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 9b42f5e96b..003e1580b9 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -137,7 +138,11 @@ * 0: Source on AIPS * 12 Destination Bit(DP) 1: Destination on SPBA * 0: Destination on AIPS - * 13-15 --------- MUST BE 0 + * 13 Source FIFO 1: Source is dual FIFO + * 0: Source is single FIFO + * 14 Destination FIFO 1: Destination is dual FIFO + * 0: Destination is single FIFO + * 15 --------- MUST BE 0 * 16-23 Higher WML HWML * 24-27 N Total number of samples after * which Pad adding/Swallowing @@ -168,6 +173,8 @@ #define SDMA_WATERMARK_LEVEL_SPDIF BIT(10) #define SDMA_WATERMARK_LEVEL_SP BIT(11) #define SDMA_WATERMARK_LEVEL_DP BIT(12) +#define SDMA_WATERMARK_LEVEL_SD BIT(13) +#define SDMA_WATERMARK_LEVEL_DD BIT(14) #define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16) #define SDMA_WATERMARK_LEVEL_LWE BIT(28) #define SDMA_WATERMARK_LEVEL_HWE BIT(29) @@ -175,6 +182,7 @@ #define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) #define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \ @@ -232,20 +240,23 @@ struct sdma_script_start_addrs { s32 utra_addr; s32 ram_code_start_addr; /* End of v1 array */ - s32 mcu_2_ssish_addr; + union { s32 v1_end; s32 mcu_2_ssish_addr; }; s32 ssish_2_mcu_addr; s32 hdmi_dma_addr; /* End of v2 array */ - s32 zcanfd_2_mcu_addr; + union { s32 v2_end; s32 zcanfd_2_mcu_addr; }; s32 zqspi_2_mcu_addr; s32 mcu_2_ecspi_addr; s32 mcu_2_sai_addr; s32 sai_2_mcu_addr; s32 uart_2_mcu_rom_addr; s32 uartsh_2_mcu_rom_addr; + s32 i2c_2_mcu_addr; + s32 mcu_2_i2c_addr; /* End of v3 array */ - s32 mcu_2_zqspi_addr; + union { s32 v3_end; s32 mcu_2_zqspi_addr; }; /* End of v4 array */ + s32 v4_end[0]; }; /* @@ -531,6 +542,7 @@ struct sdma_engine { /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/ bool clk_ratio; bool fw_loaded; + struct gen_pool *iram_pool; }; static int sdma_config_write(struct dma_chan *chan, @@ -1072,6 +1084,11 @@ static int sdma_get_pc(struct sdma_channel *sdmac, per_2_emi = sdma->script_addrs->sai_2_mcu_addr; emi_2_per = sdma->script_addrs->mcu_2_sai_addr; break; + case IMX_DMATYPE_I2C: + per_2_emi = sdma->script_addrs->i2c_2_mcu_addr; + emi_2_per = sdma->script_addrs->mcu_2_i2c_addr; + sdmac->is_ram_script = true; + break; case IMX_DMATYPE_HDMI: emi_2_per = sdma->script_addrs->hdmi_dma_addr; sdmac->is_ram_script = true; @@ -1255,6 +1272,16 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP; sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT; + + /* + * Limitation: The p2p script support dual fifos in maximum, + * So when fifo number is larger than 1, force enable dual + * fifos. + */ + if (sdmac->n_fifos_src > 1) + sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SD; + if (sdmac->n_fifos_dst > 1) + sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DD; } static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac) @@ -1358,8 +1385,14 @@ static int sdma_request_channel0(struct sdma_engine *sdma) { int ret = -EBUSY; - sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys, - GFP_NOWAIT); + if (sdma->iram_pool) + sdma->bd0 = gen_pool_dma_alloc(sdma->iram_pool, + sizeof(struct sdma_buffer_descriptor), + &sdma->bd0_phys); + else + sdma->bd0 = dma_alloc_coherent(sdma->dev, + sizeof(struct sdma_buffer_descriptor), + &sdma->bd0_phys, GFP_NOWAIT); if (!sdma->bd0) { ret = -ENOMEM; goto out; @@ -1379,10 +1412,14 @@ out: static int sdma_alloc_bd(struct sdma_desc *desc) { u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); + struct sdma_engine *sdma = desc->sdmac->sdma; int ret = 0; - desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size, - &desc->bd_phys, GFP_NOWAIT); + if (sdma->iram_pool) + desc->bd = gen_pool_dma_alloc(sdma->iram_pool, bd_size, &desc->bd_phys); + else + desc->bd = dma_alloc_coherent(sdma->dev, bd_size, &desc->bd_phys, GFP_NOWAIT); + if (!desc->bd) { ret = -ENOMEM; goto out; @@ -1394,9 +1431,12 @@ out: static void sdma_free_bd(struct sdma_desc *desc) { u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); + struct sdma_engine *sdma = desc->sdmac->sdma; - dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, - desc->bd_phys); + if (sdma->iram_pool) + gen_pool_free(sdma->iram_pool, (unsigned long)desc->bd, bd_size); + else + dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, desc->bd_phys); } static void sdma_desc_free(struct virt_dma_desc *vd) @@ -1643,6 +1683,9 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( if (count & 3 || sg->dma_address & 3) goto err_bd_out; break; + case DMA_SLAVE_BUSWIDTH_3_BYTES: + bd->mode.command = 3; + break; case DMA_SLAVE_BUSWIDTH_2_BYTES: bd->mode.command = 2; if (count & 1 || sg->dma_address & 1) @@ -1880,10 +1923,17 @@ static void sdma_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&sdmac->vc.lock, flags); } -#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 -#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 -#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 45 -#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 46 +#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 \ +(offsetof(struct sdma_script_start_addrs, v1_end) / sizeof(s32)) + +#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 \ +(offsetof(struct sdma_script_start_addrs, v2_end) / sizeof(s32)) + +#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 \ +(offsetof(struct sdma_script_start_addrs, v3_end) / sizeof(s32)) + +#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 \ +(offsetof(struct sdma_script_start_addrs, v4_end) / sizeof(s32)) static void sdma_add_scripts(struct sdma_engine *sdma, const struct sdma_script_start_addrs *addr) @@ -2068,6 +2118,7 @@ static int sdma_init(struct sdma_engine *sdma) { int i, ret; dma_addr_t ccb_phys; + int ccbsize; ret = clk_enable(sdma->clk_ipg); if (ret) @@ -2083,10 +2134,14 @@ static int sdma_init(struct sdma_engine *sdma) /* Be sure SDMA has not started yet */ writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); - sdma->channel_control = dma_alloc_coherent(sdma->dev, - MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control) + - sizeof(struct sdma_context_data), - &ccb_phys, GFP_KERNEL); + ccbsize = MAX_DMA_CHANNELS * (sizeof(struct sdma_channel_control) + + sizeof(struct sdma_context_data)); + + if (sdma->iram_pool) + sdma->channel_control = gen_pool_dma_alloc(sdma->iram_pool, ccbsize, &ccb_phys); + else + sdma->channel_control = dma_alloc_coherent(sdma->dev, ccbsize, &ccb_phys, + GFP_KERNEL); if (!sdma->channel_control) { ret = -ENOMEM; @@ -2272,6 +2327,12 @@ static int sdma_probe(struct platform_device *pdev) vchan_init(&sdmac->vc, &sdma->dma_device); } + if (np) { + sdma->iram_pool = of_gen_pool_get(np, "iram", 0); + if (sdma->iram_pool) + dev_info(&pdev->dev, "alloc bd from iram.\n"); + } + ret = sdma_init(sdma); if (ret) goto err_init; diff --git a/drivers/dma/mcf-edma-main.c b/drivers/dma/mcf-edma-main.c index dba6317838..78c606f6d0 100644 --- a/drivers/dma/mcf-edma-main.c +++ b/drivers/dma/mcf-edma-main.c @@ -195,7 +195,7 @@ static int mcf_edma_probe(struct platform_device *pdev) struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i]; mcf_chan->edma = mcf_edma; - mcf_chan->slave_id = i; + mcf_chan->srcid = i; mcf_chan->idle = true; mcf_chan->dma_dir = DMA_NONE; mcf_chan->vchan.desc_free = fsl_edma_free_desc; @@ -277,7 +277,7 @@ bool mcf_edma_filter_fn(struct dma_chan *chan, void *param) if (chan->device->dev->driver == &mcf_edma_driver.driver) { struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan); - return (mcf_chan->slave_id == (uintptr_t)param); + return (mcf_chan->srcid == (uintptr_t)param); } return false; diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index c359decc07..6b2793b076 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -155,11 +155,6 @@ static inline struct device *chan2dev(struct dma_chan *chan) return &chan->dev->device; } -static inline struct device *chan2parent(struct dma_chan *chan) -{ - return chan->dev->device.parent; -} - static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) { diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index ad8e3da1b2..60c4de8dac 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -3262,7 +3262,6 @@ MODULE_DEVICE_TABLE(amba, pl330_ids); static struct amba_driver pl330_driver = { .drv = { - .owner = THIS_MODULE, .name = "dma-pl330", .pm = &pl330_pm, }, diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 202ac95227..721b4ac085 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -50,7 +50,6 @@ #include #include #include -#include #include #include #include @@ -947,22 +946,12 @@ static const struct acpi_device_id hidma_acpi_ids[] = { MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids); #endif -static const struct of_device_id hidma_match[] = { - {.compatible = "qcom,hidma-1.0",}, - {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),}, - {.compatible = "qcom,hidma-1.2", - .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),}, - {}, -}; -MODULE_DEVICE_TABLE(of, hidma_match); - static struct platform_driver hidma_driver = { .probe = hidma_probe, .remove_new = hidma_remove, .shutdown = hidma_shutdown, .driver = { .name = "hidma", - .of_match_table = hidma_match, .acpi_match_table = ACPI_PTR(hidma_acpi_ids), }, }; diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index 1d675f3125..bb883e138e 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c @@ -7,12 +7,7 @@ #include #include -#include #include -#include -#include -#include -#include #include #include #include @@ -327,115 +322,13 @@ static const struct acpi_device_id hidma_mgmt_acpi_ids[] = { MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids); #endif -static const struct of_device_id hidma_mgmt_match[] = { - {.compatible = "qcom,hidma-mgmt-1.0",}, - {}, -}; -MODULE_DEVICE_TABLE(of, hidma_mgmt_match); - static struct platform_driver hidma_mgmt_driver = { .probe = hidma_mgmt_probe, .driver = { .name = "hidma-mgmt", - .of_match_table = hidma_mgmt_match, .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids), }, }; -#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) -static int object_counter; - -static int __init hidma_mgmt_of_populate_channels(struct device_node *np) -{ - struct platform_device *pdev_parent = of_find_device_by_node(np); - struct platform_device_info pdevinfo; - struct device_node *child; - struct resource *res; - int ret = 0; - - /* allocate a resource array */ - res = kcalloc(3, sizeof(*res), GFP_KERNEL); - if (!res) - return -ENOMEM; - - for_each_available_child_of_node(np, child) { - struct platform_device *new_pdev; - - ret = of_address_to_resource(child, 0, &res[0]); - if (!ret) - goto out; - - ret = of_address_to_resource(child, 1, &res[1]); - if (!ret) - goto out; - - ret = of_irq_to_resource(child, 0, &res[2]); - if (ret <= 0) - goto out; - - memset(&pdevinfo, 0, sizeof(pdevinfo)); - pdevinfo.fwnode = &child->fwnode; - pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL; - pdevinfo.name = child->name; - pdevinfo.id = object_counter++; - pdevinfo.res = res; - pdevinfo.num_res = 3; - pdevinfo.data = NULL; - pdevinfo.size_data = 0; - pdevinfo.dma_mask = DMA_BIT_MASK(64); - new_pdev = platform_device_register_full(&pdevinfo); - if (IS_ERR(new_pdev)) { - ret = PTR_ERR(new_pdev); - goto out; - } - new_pdev->dev.of_node = child; - of_dma_configure(&new_pdev->dev, child, true); - /* - * It is assumed that calling of_msi_configure is safe on - * platforms with or without MSI support. - */ - of_msi_configure(&new_pdev->dev, child); - } - - kfree(res); - - return ret; - -out: - of_node_put(child); - kfree(res); - - return ret; -} -#endif - -static int __init hidma_mgmt_init(void) -{ -#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) - struct device_node *child; - - for_each_matching_node(child, hidma_mgmt_match) { - /* device tree based firmware here */ - hidma_mgmt_of_populate_channels(child); - } -#endif - /* - * We do not check for return value here, as it is assumed that - * platform_driver_register must not fail. The reason for this is that - * the (potential) hidma_mgmt_of_populate_channels calls above are not - * cleaned up if it does fail, and to do this work is quite - * complicated. In particular, various calls of of_address_to_resource, - * of_irq_to_resource, platform_device_register_full, of_dma_configure, - * and of_msi_configure which then call other functions and so on, must - * be cleaned up - this is not a trivial exercise. - * - * Currently, this module is not intended to be unloaded, and there is - * no module_exit function defined which does the needed cleanup. For - * this reason, we have to assume success here. - */ - platform_driver_register(&hidma_mgmt_driver); - - return 0; -} -module_init(hidma_mgmt_init); +module_platform_driver(hidma_mgmt_driver); MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 6400d06588..df507d9666 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -4472,7 +4472,9 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud) ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); break; case DMA_TYPE_BCDMA: - ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2); + ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2) + + BCDMA_CAP3_HBCHAN_CNT(cap3) + + BCDMA_CAP3_UBCHAN_CNT(cap3); ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2); ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2); ud->rflow_cnt = ud->rchan_cnt; diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index e9f5250fbe..59d9eabc8b 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -81,6 +81,8 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan */ static inline bool vchan_issue_pending(struct virt_dma_chan *vc) { + lockdep_assert_held(&vc->lock); + list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); return !list_empty(&vc->desc_issued); } @@ -96,6 +98,8 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd) struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); dma_cookie_t cookie; + lockdep_assert_held(&vc->lock); + cookie = vd->tx.cookie; dma_cookie_complete(&vd->tx); dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", @@ -146,6 +150,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) { struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + lockdep_assert_held(&vc->lock); + list_add_tail(&vd->node, &vc->desc_terminated); if (vc->cyclic == vd) @@ -160,6 +166,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) */ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) { + lockdep_assert_held(&vc->lock); + return list_first_entry_or_null(&vc->desc_issued, struct virt_dma_desc, node); } @@ -177,6 +185,8 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, struct list_head *head) { + lockdep_assert_held(&vc->lock); + list_splice_tail_init(&vc->desc_allocated, head); list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_issued, head); diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c index ae6e06057a..718842fdaf 100644 --- a/drivers/dma/xilinx/xdma.c +++ b/drivers/dma/xilinx/xdma.c @@ -1307,6 +1307,7 @@ static const struct platform_device_id xdma_id_table[] = { { "xdma", 0}, { }, }; +MODULE_DEVICE_TABLE(platform, xdma_id_table); static struct platform_driver xdma_driver = { .driver = { diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c index eb0637d903..36bd4825d3 100644 --- a/drivers/dma/xilinx/xilinx_dpdma.c +++ b/drivers/dma/xilinx/xilinx_dpdma.c @@ -1043,9 +1043,8 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan) static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan) { struct xilinx_dpdma_tx_desc *active; - unsigned long flags; - spin_lock_irqsave(&chan->lock, flags); + spin_lock(&chan->lock); xilinx_dpdma_debugfs_desc_done_irq(chan); @@ -1057,7 +1056,7 @@ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan) "chan%u: DONE IRQ with no active descriptor!\n", chan->id); - spin_unlock_irqrestore(&chan->lock, flags); + spin_unlock(&chan->lock); } /** @@ -1072,10 +1071,9 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan) { struct xilinx_dpdma_tx_desc *pending; struct xilinx_dpdma_sw_desc *sw_desc; - unsigned long flags; u32 desc_id; - spin_lock_irqsave(&chan->lock, flags); + spin_lock(&chan->lock); pending = chan->desc.pending; if (!chan->running || !pending) @@ -1108,7 +1106,7 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan) spin_unlock(&chan->vchan.lock); out: - spin_unlock_irqrestore(&chan->lock, flags); + spin_unlock(&chan->lock); } /** diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 9c09893695..4edfb83ffb 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -54,11 +54,13 @@ obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac_mod.o layerscape_edac_mod-y := fsl_ddr_edac.o layerscape_edac.o obj-$(CONFIG_EDAC_LAYERSCAPE) += layerscape_edac_mod.o -skx_edac-y := skx_common.o skx_base.o -obj-$(CONFIG_EDAC_SKX) += skx_edac.o +skx_edac_common-y := skx_common.o -i10nm_edac-y := skx_common.o i10nm_base.o -obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o +skx_edac-y := skx_base.o +obj-$(CONFIG_EDAC_SKX) += skx_edac.o skx_edac_common.o + +i10nm_edac-y := i10nm_base.o +obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o obj-$(CONFIG_EDAC_CELL) += cell_edac.o obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index ae17ce4d97..fe89f5c483 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -737,8 +737,7 @@ static int altr_edac_device_probe(struct platform_device *pdev) } dci = edac_device_alloc_ctl_info(sizeof(*drvdata), ecc_name, - 1, ecc_name, 1, 0, NULL, 0, - dev_instance++); + 1, ecc_name, 1, 0, dev_instance++); if (!dci) { edac_printk(KERN_ERR, EDAC_DEVICE, @@ -1514,7 +1513,7 @@ static int altr_portb_setup(struct altr_edac_device_dev *device) /* Create the PortB EDAC device */ edac_idx = edac_device_alloc_index(); dci = edac_device_alloc_ctl_info(sizeof(*altdev), ecc_name, 1, - ecc_name, 1, 0, NULL, 0, edac_idx); + ecc_name, 1, 0, edac_idx); if (!dci) { edac_printk(KERN_ERR, EDAC_DEVICE, "%s: Unable to allocate PortB EDAC device\n", @@ -1921,8 +1920,7 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac, edac_idx = edac_device_alloc_index(); dci = edac_device_alloc_ctl_info(sizeof(*altdev), ecc_name, - 1, ecc_name, 1, 0, NULL, 0, - edac_idx); + 1, ecc_name, 1, 0, edac_idx); if (!dci) { edac_printk(KERN_ERR, EDAC_DEVICE, diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 1665f7932b..b879b12971 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -345,7 +345,6 @@ struct amd64_pvt { u32 dchr1; /* DRAM Configuration High DCT1 reg */ u32 nbcap; /* North Bridge Capabilities */ u32 nbcfg; /* F10 North Bridge Configuration */ - u32 ext_nbcfg; /* Extended F10 North Bridge Configuration */ u32 dhar; /* DRAM Hoist reg */ u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */ u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c index ca718f63fc..a6d3013d58 100644 --- a/drivers/edac/amd8111_edac.c +++ b/drivers/edac/amd8111_edac.c @@ -366,8 +366,7 @@ static int amd8111_dev_probe(struct pci_dev *dev, dev_info->edac_idx = edac_device_alloc_index(); dev_info->edac_dev = edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1, - NULL, 0, 0, - NULL, 0, dev_info->edac_idx); + NULL, 0, 0, dev_info->edac_idx); if (!dev_info->edac_dev) { ret = -ENOMEM; goto err_dev_put; diff --git a/drivers/edac/armada_xp_edac.c b/drivers/edac/armada_xp_edac.c index 25517c99b3..589bc81f12 100644 --- a/drivers/edac/armada_xp_edac.c +++ b/drivers/edac/armada_xp_edac.c @@ -523,7 +523,7 @@ static int aurora_l2_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "data ECC is not enabled\n"); dci = edac_device_alloc_ctl_info(sizeof(*drvdata), - "cpu", 1, "L", 1, 2, NULL, 0, 0); + "cpu", 1, "L", 1, 2, 0); if (!dci) return -ENOMEM; diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c index 5075dc7526..eb702bc3aa 100644 --- a/drivers/edac/cpc925_edac.c +++ b/drivers/edac/cpc925_edac.c @@ -797,7 +797,7 @@ static void cpc925_add_edac_devices(void __iomem *vbase) dev_info->edac_idx = edac_device_alloc_index(); dev_info->edac_dev = edac_device_alloc_ctl_info(0, dev_info->ctl_name, - 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx); + 1, NULL, 0, 0, dev_info->edac_idx); if (!dev_info->edac_dev) { cpc925_printk(KERN_ERR, "No memory for edac device\n"); goto err1; diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index 0689e15107..621dc2a5d0 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c @@ -56,14 +56,12 @@ static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) struct edac_device_ctl_info * edac_device_alloc_ctl_info(unsigned pvt_sz, char *dev_name, unsigned nr_instances, char *blk_name, unsigned nr_blocks, unsigned off_val, - struct edac_dev_sysfs_block_attribute *attrib_spec, - unsigned nr_attrib, int device_index) + int device_index) { - struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib; struct edac_device_block *dev_blk, *blk_p, *blk; struct edac_device_instance *dev_inst, *inst; struct edac_device_ctl_info *dev_ctl; - unsigned instance, block, attr; + unsigned instance, block; void *pvt; int err; @@ -85,15 +83,6 @@ edac_device_alloc_ctl_info(unsigned pvt_sz, char *dev_name, unsigned nr_instance dev_ctl->blocks = dev_blk; - if (nr_attrib) { - dev_attrib = kcalloc(nr_attrib, sizeof(struct edac_dev_sysfs_block_attribute), - GFP_KERNEL); - if (!dev_attrib) - goto free; - - dev_ctl->attribs = dev_attrib; - } - if (pvt_sz) { pvt = kzalloc(pvt_sz, GFP_KERNEL); if (!pvt) @@ -132,44 +121,6 @@ edac_device_alloc_ctl_info(unsigned pvt_sz, char *dev_name, unsigned nr_instance edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n", instance, inst, block, blk, blk->name); - - /* if there are NO attributes OR no attribute pointer - * then continue on to next block iteration - */ - if ((nr_attrib == 0) || (attrib_spec == NULL)) - continue; - - /* setup the attribute array for this block */ - blk->nr_attribs = nr_attrib; - attrib_p = &dev_attrib[block*nr_instances*nr_attrib]; - blk->block_attributes = attrib_p; - - edac_dbg(4, "THIS BLOCK_ATTRIB=%p\n", - blk->block_attributes); - - /* Initialize every user specified attribute in this - * block with the data the caller passed in - * Each block gets its own copy of pointers, - * and its unique 'value' - */ - for (attr = 0; attr < nr_attrib; attr++) { - attrib = &attrib_p[attr]; - - /* populate the unique per attrib - * with the code pointers and info - */ - attrib->attr = attrib_spec[attr].attr; - attrib->show = attrib_spec[attr].show; - attrib->store = attrib_spec[attr].store; - - attrib->block = blk; /* up link */ - - edac_dbg(4, "alloc-attrib=%p attrib_name='%s' attrib-spec=%p spec-name=%s\n", - attrib, attrib->attr.name, - &attrib_spec[attr], - attrib_spec[attr].attr.name - ); - } } } diff --git a/drivers/edac/edac_device.h b/drivers/edac/edac_device.h index 7db22a4c83..034711d71e 100644 --- a/drivers/edac/edac_device.h +++ b/drivers/edac/edac_device.h @@ -22,7 +22,6 @@ #ifndef _EDAC_DEVICE_H_ #define _EDAC_DEVICE_H_ -#include #include #include #include @@ -95,22 +94,13 @@ struct edac_dev_sysfs_attribute { * * used in leaf 'block' nodes for adding controls/attributes * - * each block in each instance of the containing control structure - * can have an array of the following. The show and store functions - * will be filled in with the show/store function in the - * low level driver. - * - * The 'value' field will be the actual value field used for - * counting + * each block in each instance of the containing control structure can + * have an array of the following. The show function will be filled in + * with the show function in the low level driver. */ struct edac_dev_sysfs_block_attribute { struct attribute attr; ssize_t (*show)(struct kobject *, struct attribute *, char *); - ssize_t (*store)(struct kobject *, struct attribute *, - const char *, size_t); - struct edac_device_block *block; - - unsigned int value; }; /* device block control structure */ @@ -200,8 +190,6 @@ struct edac_device_ctl_info { unsigned long start_time; /* edac_device load start time (jiffies) */ - struct completion removal_complete; - /* sysfs top name under 'edac' directory * and instance name: * cpu/cpu0/... @@ -217,7 +205,6 @@ struct edac_device_ctl_info { u32 nr_instances; struct edac_device_instance *instances; struct edac_device_block *blocks; - struct edac_dev_sysfs_block_attribute *attribs; /* Event counters for the this whole EDAC Device */ struct edac_device_counter counters; @@ -245,8 +232,6 @@ extern struct edac_device_ctl_info *edac_device_alloc_ctl_info( char *edac_device_name, unsigned nr_instances, char *edac_block_name, unsigned nr_blocks, unsigned offset_value, - struct edac_dev_sysfs_block_attribute *block_attributes, - unsigned nr_attribs, int device_index); /* The offset value can be: @@ -356,7 +341,6 @@ static inline void __edac_device_free_ctl_info(struct edac_device_ctl_info *ci) { if (ci) { kfree(ci->pvt_info); - kfree(ci->attribs); kfree(ci->blocks); kfree(ci->instances); kfree(ci); diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c index 237a542e04..fcebc4ffea 100644 --- a/drivers/edac/edac_device_sysfs.c +++ b/drivers/edac/edac_device_sysfs.c @@ -457,35 +457,19 @@ static ssize_t edac_dev_block_show(struct kobject *kobj, return -EIO; } -/* Function to 'store' fields into the edac_dev 'block' structure */ -static ssize_t edac_dev_block_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, size_t count) -{ - struct edac_dev_sysfs_block_attribute *block_attr; - - block_attr = to_block_attr(attr); - - if (block_attr->store) - return block_attr->store(kobj, attr, buffer, count); - return -EIO; -} - /* edac_dev file operations for a 'block' */ static const struct sysfs_ops device_block_ops = { .show = edac_dev_block_show, - .store = edac_dev_block_store }; -#define BLOCK_ATTR(_name,_mode,_show,_store) \ +#define BLOCK_ATTR(_name,_mode,_show) \ static struct edac_dev_sysfs_block_attribute attr_block_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .show = _show, \ - .store = _store, \ }; -BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL); -BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show, NULL); +BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show); +BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show); /* list of edac_dev 'block' attributes */ static struct attribute *device_block_attrs[] = { diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 5116873c33..4200aec048 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -146,7 +146,7 @@ static ssize_t csrow_ue_count_show(struct device *dev, { struct csrow_info *csrow = to_csrow(dev); - return sprintf(data, "%u\n", csrow->ue_count); + return sysfs_emit(data, "%u\n", csrow->ue_count); } static ssize_t csrow_ce_count_show(struct device *dev, @@ -154,7 +154,7 @@ static ssize_t csrow_ce_count_show(struct device *dev, { struct csrow_info *csrow = to_csrow(dev); - return sprintf(data, "%u\n", csrow->ce_count); + return sysfs_emit(data, "%u\n", csrow->ce_count); } static ssize_t csrow_size_show(struct device *dev, @@ -166,7 +166,7 @@ static ssize_t csrow_size_show(struct device *dev, for (i = 0; i < csrow->nr_channels; i++) nr_pages += csrow->channels[i]->dimm->nr_pages; - return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); + return sysfs_emit(data, "%u\n", PAGES_TO_MiB(nr_pages)); } static ssize_t csrow_mem_type_show(struct device *dev, @@ -174,7 +174,7 @@ static ssize_t csrow_mem_type_show(struct device *dev, { struct csrow_info *csrow = to_csrow(dev); - return sprintf(data, "%s\n", edac_mem_types[csrow->channels[0]->dimm->mtype]); + return sysfs_emit(data, "%s\n", edac_mem_types[csrow->channels[0]->dimm->mtype]); } static ssize_t csrow_dev_type_show(struct device *dev, @@ -182,7 +182,7 @@ static ssize_t csrow_dev_type_show(struct device *dev, { struct csrow_info *csrow = to_csrow(dev); - return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]); + return sysfs_emit(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]); } static ssize_t csrow_edac_mode_show(struct device *dev, @@ -191,7 +191,7 @@ static ssize_t csrow_edac_mode_show(struct device *dev, { struct csrow_info *csrow = to_csrow(dev); - return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]); + return sysfs_emit(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]); } /* show/store functions for DIMM Label attributes */ @@ -207,8 +207,7 @@ static ssize_t channel_dimm_label_show(struct device *dev, if (!rank->dimm->label[0]) return 0; - return snprintf(data, sizeof(rank->dimm->label) + 1, "%s\n", - rank->dimm->label); + return sysfs_emit(data, "%s\n", rank->dimm->label); } static ssize_t channel_dimm_label_store(struct device *dev, @@ -243,7 +242,7 @@ static ssize_t channel_ce_count_show(struct device *dev, unsigned int chan = to_channel(mattr); struct rank_info *rank = csrow->channels[chan]; - return sprintf(data, "%u\n", rank->ce_count); + return sysfs_emit(data, "%u\n", rank->ce_count); } /* cwrow/attribute files */ @@ -515,7 +514,7 @@ static ssize_t dimmdev_label_show(struct device *dev, if (!dimm->label[0]) return 0; - return snprintf(data, sizeof(dimm->label) + 1, "%s\n", dimm->label); + return sysfs_emit(data, "%s\n", dimm->label); } static ssize_t dimmdev_label_store(struct device *dev, @@ -546,7 +545,7 @@ static ssize_t dimmdev_size_show(struct device *dev, { struct dimm_info *dimm = to_dimm(dev); - return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages)); + return sysfs_emit(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages)); } static ssize_t dimmdev_mem_type_show(struct device *dev, @@ -554,7 +553,7 @@ static ssize_t dimmdev_mem_type_show(struct device *dev, { struct dimm_info *dimm = to_dimm(dev); - return sprintf(data, "%s\n", edac_mem_types[dimm->mtype]); + return sysfs_emit(data, "%s\n", edac_mem_types[dimm->mtype]); } static ssize_t dimmdev_dev_type_show(struct device *dev, @@ -562,7 +561,7 @@ static ssize_t dimmdev_dev_type_show(struct device *dev, { struct dimm_info *dimm = to_dimm(dev); - return sprintf(data, "%s\n", dev_types[dimm->dtype]); + return sysfs_emit(data, "%s\n", dev_types[dimm->dtype]); } static ssize_t dimmdev_edac_mode_show(struct device *dev, @@ -571,7 +570,7 @@ static ssize_t dimmdev_edac_mode_show(struct device *dev, { struct dimm_info *dimm = to_dimm(dev); - return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]); + return sysfs_emit(data, "%s\n", edac_caps[dimm->edac_mode]); } static ssize_t dimmdev_ce_count_show(struct device *dev, @@ -580,7 +579,7 @@ static ssize_t dimmdev_ce_count_show(struct device *dev, { struct dimm_info *dimm = to_dimm(dev); - return sprintf(data, "%u\n", dimm->ce_count); + return sysfs_emit(data, "%u\n", dimm->ce_count); } static ssize_t dimmdev_ue_count_show(struct device *dev, @@ -589,7 +588,7 @@ static ssize_t dimmdev_ue_count_show(struct device *dev, { struct dimm_info *dimm = to_dimm(dev); - return sprintf(data, "%u\n", dimm->ue_count); + return sysfs_emit(data, "%u\n", dimm->ue_count); } /* dimm/rank attribute files */ @@ -758,7 +757,7 @@ static ssize_t mci_sdram_scrub_rate_show(struct device *dev, return bandwidth; } - return sprintf(data, "%d\n", bandwidth); + return sysfs_emit(data, "%d\n", bandwidth); } /* default attribute files for the MCI object */ @@ -768,7 +767,7 @@ static ssize_t mci_ue_count_show(struct device *dev, { struct mem_ctl_info *mci = to_mci(dev); - return sprintf(data, "%u\n", mci->ue_mc); + return sysfs_emit(data, "%u\n", mci->ue_mc); } static ssize_t mci_ce_count_show(struct device *dev, @@ -777,7 +776,7 @@ static ssize_t mci_ce_count_show(struct device *dev, { struct mem_ctl_info *mci = to_mci(dev); - return sprintf(data, "%u\n", mci->ce_mc); + return sysfs_emit(data, "%u\n", mci->ce_mc); } static ssize_t mci_ce_noinfo_show(struct device *dev, @@ -786,7 +785,7 @@ static ssize_t mci_ce_noinfo_show(struct device *dev, { struct mem_ctl_info *mci = to_mci(dev); - return sprintf(data, "%u\n", mci->ce_noinfo_count); + return sysfs_emit(data, "%u\n", mci->ce_noinfo_count); } static ssize_t mci_ue_noinfo_show(struct device *dev, @@ -795,7 +794,7 @@ static ssize_t mci_ue_noinfo_show(struct device *dev, { struct mem_ctl_info *mci = to_mci(dev); - return sprintf(data, "%u\n", mci->ue_noinfo_count); + return sysfs_emit(data, "%u\n", mci->ue_noinfo_count); } static ssize_t mci_seconds_show(struct device *dev, @@ -804,7 +803,7 @@ static ssize_t mci_seconds_show(struct device *dev, { struct mem_ctl_info *mci = to_mci(dev); - return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ); + return sysfs_emit(data, "%ld\n", (jiffies - mci->start_time) / HZ); } static ssize_t mci_ctl_name_show(struct device *dev, @@ -813,7 +812,7 @@ static ssize_t mci_ctl_name_show(struct device *dev, { struct mem_ctl_info *mci = to_mci(dev); - return sprintf(data, "%s\n", mci->ctl_name); + return sysfs_emit(data, "%s\n", mci->ctl_name); } static ssize_t mci_size_mb_show(struct device *dev, @@ -833,7 +832,7 @@ static ssize_t mci_size_mb_show(struct device *dev, } } - return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages)); + return sysfs_emit(data, "%u\n", PAGES_TO_MiB(total_pages)); } static ssize_t mci_max_location_show(struct device *dev, diff --git a/drivers/edac/edac_pci.h b/drivers/edac/edac_pci.h index 5175f5724c..3f47cd9b2b 100644 --- a/drivers/edac/edac_pci.h +++ b/drivers/edac/edac_pci.h @@ -22,7 +22,6 @@ #ifndef _EDAC_PCI_H_ #define _EDAC_PCI_H_ -#include #include #include #include @@ -48,8 +47,6 @@ struct edac_pci_ctl_info { int pci_idx; - struct bus_type *edac_subsys; /* pointer to subsystem */ - /* the internal state of this controller instance */ int op_state; /* work struct for this instance */ @@ -72,8 +69,6 @@ struct edac_pci_ctl_info { unsigned long start_time; /* edac_pci load start time (jiffies) */ - struct completion complete; - /* sysfs top name under 'edac' directory * and instance name: * cpu/cpu0/... diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c index 5646c049a9..282ca6535f 100644 --- a/drivers/edac/highbank_l2_edac.c +++ b/drivers/edac/highbank_l2_edac.c @@ -54,7 +54,7 @@ static int highbank_l2_err_probe(struct platform_device *pdev) int res = 0; dci = edac_device_alloc_ctl_info(sizeof(*drvdata), "cpu", - 1, "L", 1, 2, NULL, 0, 0); + 1, "L", 1, 2, 0); if (!dci) return -ENOMEM; diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index c1bc53f4e1..e8945d4adb 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c @@ -496,7 +496,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op) return -ENOMEM; edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata), - "cpu", 1, "L", 1, 2, NULL, 0, + "cpu", 1, "L", 1, 2, edac_dev_idx); if (!edac_dev) { devres_release_group(&op->dev, mpc85xx_l2_err_probe); diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c index 4015eb9af6..919095d105 100644 --- a/drivers/edac/octeon_edac-l2c.c +++ b/drivers/edac/octeon_edac-l2c.c @@ -138,7 +138,7 @@ static int octeon_l2c_probe(struct platform_device *pdev) /* 'Tags' are block 0, 'Data' is block 1*/ l2c = edac_device_alloc_ctl_info(0, "l2c", num_tads, "l2c", 2, 0, - NULL, 0, edac_device_alloc_index()); + edac_device_alloc_index()); if (!l2c) return -ENOMEM; diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c index ea8a8e337b..b8404cc7b6 100644 --- a/drivers/edac/octeon_edac-pc.c +++ b/drivers/edac/octeon_edac-pc.c @@ -92,7 +92,7 @@ static int co_cache_error_probe(struct platform_device *pdev) platform_set_drvdata(pdev, p); p->ed = edac_device_alloc_ctl_info(0, "cpu", num_possible_cpus(), - "cache", 2, 0, NULL, 0, + "cache", 2, 0, edac_device_alloc_index()); if (!p->ed) goto err; diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c index 5539917c01..d3cd4cc54a 100644 --- a/drivers/edac/qcom_edac.c +++ b/drivers/edac/qcom_edac.c @@ -349,7 +349,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev) /* Allocate edac control info */ edev_ctl = edac_device_alloc_ctl_info(0, "qcom-llcc", 1, "bank", llcc_driv_data->num_banks, 1, - NULL, 0, edac_device_alloc_index()); if (!edev_ctl) diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c index b844e2626f..a2b193dc66 100644 --- a/drivers/edac/sifive_edac.c +++ b/drivers/edac/sifive_edac.c @@ -52,8 +52,7 @@ static int ecc_register(struct platform_device *pdev) platform_set_drvdata(pdev, p); p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc", - 1, 1, NULL, 0, - edac_device_alloc_index()); + 1, 1, edac_device_alloc_index()); if (!p->dci) return -ENOMEM; diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 27996b7924..8d18099fd5 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -48,7 +48,7 @@ static u64 skx_tolm, skx_tohm; static LIST_HEAD(dev_edac_list); static bool skx_mem_cfg_2lm; -int __init skx_adxl_get(void) +int skx_adxl_get(void) { const char * const *names; int i, j; @@ -110,12 +110,14 @@ err: return -ENODEV; } +EXPORT_SYMBOL_GPL(skx_adxl_get); -void __exit skx_adxl_put(void) +void skx_adxl_put(void) { kfree(adxl_values); kfree(adxl_msg); } +EXPORT_SYMBOL_GPL(skx_adxl_put); static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_mem) { @@ -187,12 +189,14 @@ void skx_set_mem_cfg(bool mem_cfg_2lm) { skx_mem_cfg_2lm = mem_cfg_2lm; } +EXPORT_SYMBOL_GPL(skx_set_mem_cfg); void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log) { driver_decode = decode; skx_show_retry_rd_err_log = show_retry_log; } +EXPORT_SYMBOL_GPL(skx_set_decode); int skx_get_src_id(struct skx_dev *d, int off, u8 *id) { @@ -206,6 +210,7 @@ int skx_get_src_id(struct skx_dev *d, int off, u8 *id) *id = GET_BITFIELD(reg, 12, 14); return 0; } +EXPORT_SYMBOL_GPL(skx_get_src_id); int skx_get_node_id(struct skx_dev *d, u8 *id) { @@ -219,6 +224,7 @@ int skx_get_node_id(struct skx_dev *d, u8 *id) *id = GET_BITFIELD(reg, 0, 2); return 0; } +EXPORT_SYMBOL_GPL(skx_get_node_id); static int get_width(u32 mtr) { @@ -284,6 +290,7 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list) *list = &dev_edac_list; return ndev; } +EXPORT_SYMBOL_GPL(skx_get_all_bus_mappings); int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm) { @@ -323,6 +330,7 @@ fail: pci_dev_put(pdev); return -ENODEV; } +EXPORT_SYMBOL_GPL(skx_get_hi_lo); static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval, int maxval, const char *name) @@ -394,6 +402,7 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm, return 1; } +EXPORT_SYMBOL_GPL(skx_get_dimm_info); int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc, int chan, int dimmno, const char *mod_str) @@ -442,6 +451,7 @@ unknown_size: return (size == 0 || size == ~0ull) ? 0 : 1; } +EXPORT_SYMBOL_GPL(skx_get_nvdimm_info); int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev, const char *ctl_name, const char *mod_str, @@ -512,6 +522,7 @@ fail0: imc->mci = NULL; return rc; } +EXPORT_SYMBOL_GPL(skx_register_mci); static void skx_unregister_mci(struct skx_imc *imc) { @@ -688,6 +699,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val, mce->kflags |= MCE_HANDLED_EDAC; return NOTIFY_DONE; } +EXPORT_SYMBOL_GPL(skx_mce_check_error); void skx_remove(void) { @@ -725,3 +737,8 @@ void skx_remove(void) kfree(d); } } +EXPORT_SYMBOL_GPL(skx_remove); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Tony Luck"); +MODULE_DESCRIPTION("MC Driver for Intel server processors"); diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h index b6d3607dff..11faf1db4f 100644 --- a/drivers/edac/skx_common.h +++ b/drivers/edac/skx_common.h @@ -231,8 +231,8 @@ typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci, typedef bool (*skx_decode_f)(struct decoded_addr *res); typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len, bool scrub_err); -int __init skx_adxl_get(void); -void __exit skx_adxl_put(void); +int skx_adxl_get(void); +void skx_adxl_put(void); void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log); void skx_set_mem_cfg(bool mem_cfg_2lm); diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c index 90d46e5c4f..fab9891e56 100644 --- a/drivers/edac/thunderx_edac.c +++ b/drivers/edac/thunderx_edac.c @@ -1365,8 +1365,7 @@ static int thunderx_ocx_probe(struct pci_dev *pdev, idx = edac_device_alloc_index(); snprintf(name, sizeof(name), "OCX%d", idx); edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_ocx), - name, 1, "CCPI", 1, - 0, NULL, 0, idx); + name, 1, "CCPI", 1, 0, idx); if (!edac_dev) { dev_err(&pdev->dev, "Cannot allocate EDAC device\n"); return -ENOMEM; @@ -2004,8 +2003,7 @@ static int thunderx_l2c_probe(struct pci_dev *pdev, snprintf(name, sizeof(name), fmt, idx); edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_l2c), - name, 1, "L2C", 1, 0, - NULL, 0, idx); + name, 1, "L2C", 1, 0, idx); if (!edac_dev) { dev_err(&pdev->dev, "Cannot allocate EDAC device\n"); return -ENOMEM; diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c index 1b50f81600..fd87f1b2c1 100644 --- a/drivers/edac/xgene_edac.c +++ b/drivers/edac/xgene_edac.c @@ -913,8 +913,8 @@ static int xgene_edac_pmd_add(struct xgene_edac *edac, struct device_node *np, snprintf(edac_name, sizeof(edac_name), "l2c%d", pmd); edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx), - edac_name, 1, "l2c", 1, 2, NULL, - 0, edac_device_alloc_index()); + edac_name, 1, "l2c", 1, 2, + edac_device_alloc_index()); if (!edac_dev) { rc = -ENOMEM; goto err_group; @@ -1208,8 +1208,7 @@ static int xgene_edac_l3_add(struct xgene_edac *edac, struct device_node *np, edac_idx = edac_device_alloc_index(); edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx), - "l3c", 1, "l3c", 1, 0, NULL, 0, - edac_idx); + "l3c", 1, "l3c", 1, 0, edac_idx); if (!edac_dev) { rc = -ENOMEM; goto err_release_group; @@ -1748,8 +1747,7 @@ static int xgene_edac_soc_add(struct xgene_edac *edac, struct device_node *np, edac_idx = edac_device_alloc_index(); edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx), - "SOC", 1, "SOC", 1, 2, NULL, 0, - edac_idx); + "SOC", 1, "SOC", 1, 2, edac_idx); if (!edac_dev) { rc = -ENOMEM; goto err_release_group; diff --git a/drivers/edac/zynqmp_edac.c b/drivers/edac/zynqmp_edac.c index 2d9a5cfd89..c9dc78d8c8 100644 --- a/drivers/edac/zynqmp_edac.c +++ b/drivers/edac/zynqmp_edac.c @@ -381,7 +381,7 @@ static int edac_probe(struct platform_device *pdev) } dci = edac_device_alloc_ctl_info(sizeof(*priv), ZYNQMP_OCM_EDAC_STRING, - 1, ZYNQMP_OCM_EDAC_STRING, 1, 0, NULL, 0, + 1, ZYNQMP_OCM_EDAC_STRING, 1, 0, edac_device_alloc_index()); if (!dci) return -ENOMEM; diff --git a/drivers/eisa/Kconfig b/drivers/eisa/Kconfig index c8bbf90209..a66b3be502 100644 --- a/drivers/eisa/Kconfig +++ b/drivers/eisa/Kconfig @@ -44,17 +44,16 @@ config EISA_PCI_EISA When in doubt, say Y. -# Using EISA_VIRTUAL_ROOT on something other than an Alpha or -# an X86 may lead to crashes... +# Using EISA_VIRTUAL_ROOT on something other than an X86 may lead +# to crashes... config EISA_VIRTUAL_ROOT bool "EISA virtual root device" - depends on EISA && (ALPHA || X86) + depends on EISA && X86 default y help Activate this option if your system only have EISA bus - (no PCI slots). The Alpha Jensen is an example of such - a system. + (no PCI slots). When in doubt, say Y. diff --git a/drivers/eisa/virtual_root.c b/drivers/eisa/virtual_root.c index 37e6dd219c..cd9515d9d8 100644 --- a/drivers/eisa/virtual_root.c +++ b/drivers/eisa/virtual_root.c @@ -13,7 +13,7 @@ #include #include -#if defined(CONFIG_ALPHA_JENSEN) || defined(CONFIG_EISA_VLB_PRIMING) +#if defined(CONFIG_EISA_VLB_PRIMING) #define EISA_FORCE_PROBE_DEFAULT 1 #else #define EISA_FORCE_PROBE_DEFAULT 0 diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c index 0317b614b6..125016da7f 100644 --- a/drivers/extcon/extcon-adc-jack.c +++ b/drivers/extcon/extcon-adc-jack.c @@ -26,6 +26,7 @@ /** * struct adc_jack_data - internal data for adc_jack device driver + * @dev: The device structure associated with the adc_jack. * @edev: extcon device. * @cable_names: list of supported cables. * @adc_conditions: list of adc value conditions. @@ -35,6 +36,7 @@ * handling at handling_delay jiffies. * @handler: extcon event handler called by interrupt handler. * @chan: iio channel being queried. + * @wakeup_source: Indicates if the device can wake up the system. */ struct adc_jack_data { struct device *dev; @@ -158,14 +160,12 @@ static int adc_jack_probe(struct platform_device *pdev) return 0; } -static int adc_jack_remove(struct platform_device *pdev) +static void adc_jack_remove(struct platform_device *pdev) { struct adc_jack_data *data = platform_get_drvdata(pdev); free_irq(data->irq, data); cancel_work_sync(&data->handler.work); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -196,7 +196,7 @@ static SIMPLE_DEV_PM_OPS(adc_jack_pm_ops, static struct platform_driver adc_jack_driver = { .probe = adc_jack_probe, - .remove = adc_jack_remove, + .remove_new = adc_jack_remove, .driver = { .name = "adc-jack", .pm = &adc_jack_pm_ops, diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c index 2c55f06ba6..733c470c31 100644 --- a/drivers/extcon/extcon-intel-cht-wc.c +++ b/drivers/extcon/extcon-intel-cht-wc.c @@ -617,13 +617,11 @@ disable_sw_control: return ret; } -static int cht_wc_extcon_remove(struct platform_device *pdev) +static void cht_wc_extcon_remove(struct platform_device *pdev) { struct cht_wc_extcon_data *ext = platform_get_drvdata(pdev); cht_wc_extcon_sw_control(ext, false); - - return 0; } static const struct platform_device_id cht_wc_extcon_table[] = { @@ -634,7 +632,7 @@ MODULE_DEVICE_TABLE(platform, cht_wc_extcon_table); static struct platform_driver cht_wc_extcon_driver = { .probe = cht_wc_extcon_probe, - .remove = cht_wc_extcon_remove, + .remove_new = cht_wc_extcon_remove, .id_table = cht_wc_extcon_table, .driver = { .name = "cht_wcove_pwrsrc", diff --git a/drivers/extcon/extcon-intel-mrfld.c b/drivers/extcon/extcon-intel-mrfld.c index cd1a5f2300..a1f737f13d 100644 --- a/drivers/extcon/extcon-intel-mrfld.c +++ b/drivers/extcon/extcon-intel-mrfld.c @@ -214,27 +214,21 @@ static int mrfld_extcon_probe(struct platform_device *pdev) data->edev = devm_extcon_dev_allocate(dev, mrfld_extcon_cable); if (IS_ERR(data->edev)) - return -ENOMEM; + return PTR_ERR(data->edev); ret = devm_extcon_dev_register(dev, data->edev); - if (ret < 0) { - dev_err(dev, "can't register extcon device: %d\n", ret); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, "can't register extcon device\n"); ret = devm_request_threaded_irq(dev, irq, NULL, mrfld_extcon_interrupt, IRQF_ONESHOT | IRQF_SHARED, pdev->name, data); - if (ret) { - dev_err(dev, "can't register IRQ handler: %d\n", ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "can't register IRQ handler\n"); ret = regmap_read(regmap, BCOVE_ID, &id); - if (ret) { - dev_err(dev, "can't read PMIC ID: %d\n", ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "can't read PMIC ID\n"); data->id = id; @@ -263,13 +257,11 @@ static int mrfld_extcon_probe(struct platform_device *pdev) return 0; } -static int mrfld_extcon_remove(struct platform_device *pdev) +static void mrfld_extcon_remove(struct platform_device *pdev) { struct mrfld_extcon_data *data = platform_get_drvdata(pdev); mrfld_extcon_sw_control(data, false); - - return 0; } static const struct platform_device_id mrfld_extcon_id_table[] = { @@ -283,7 +275,7 @@ static struct platform_driver mrfld_extcon_driver = { .name = "mrfld_bcove_pwrsrc", }, .probe = mrfld_extcon_probe, - .remove = mrfld_extcon_remove, + .remove_new = mrfld_extcon_remove, .id_table = mrfld_extcon_id_table, }; module_platform_driver(mrfld_extcon_driver); diff --git a/drivers/extcon/extcon-max3355.c b/drivers/extcon/extcon-max3355.c index d7795607f6..e62ce7a8d1 100644 --- a/drivers/extcon/extcon-max3355.c +++ b/drivers/extcon/extcon-max3355.c @@ -112,13 +112,11 @@ static int max3355_probe(struct platform_device *pdev) return 0; } -static int max3355_remove(struct platform_device *pdev) +static void max3355_remove(struct platform_device *pdev) { struct max3355_data *data = platform_get_drvdata(pdev); gpiod_set_value_cansleep(data->shdn_gpiod, 0); - - return 0; } static const struct of_device_id max3355_match_table[] = { @@ -129,7 +127,7 @@ MODULE_DEVICE_TABLE(of, max3355_match_table); static struct platform_driver max3355_driver = { .probe = max3355_probe, - .remove = max3355_remove, + .remove_new = max3355_remove, .driver = { .name = "extcon-max3355", .of_match_table = max3355_match_table, diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c index acb11a54f8..9849e3b832 100644 --- a/drivers/extcon/extcon-max77843.c +++ b/drivers/extcon/extcon-max77843.c @@ -928,7 +928,7 @@ err_muic_irq: return ret; } -static int max77843_muic_remove(struct platform_device *pdev) +static void max77843_muic_remove(struct platform_device *pdev) { struct max77843_muic_info *info = platform_get_drvdata(pdev); struct max77693_dev *max77843 = info->max77843; @@ -936,8 +936,6 @@ static int max77843_muic_remove(struct platform_device *pdev) cancel_work_sync(&info->irq_work); regmap_del_irq_chip(max77843->irq, max77843->irq_data_muic); i2c_unregister_device(max77843->i2c_muic); - - return 0; } static const struct platform_device_id max77843_muic_id[] = { @@ -958,7 +956,7 @@ static struct platform_driver max77843_muic_driver = { .of_match_table = of_max77843_muic_dt_match, }, .probe = max77843_muic_probe, - .remove = max77843_muic_remove, + .remove_new = max77843_muic_remove, .id_table = max77843_muic_id, }; diff --git a/drivers/extcon/extcon-rtk-type-c.c b/drivers/extcon/extcon-rtk-type-c.c index a592bab775..19a01e6637 100644 --- a/drivers/extcon/extcon-rtk-type-c.c +++ b/drivers/extcon/extcon-rtk-type-c.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c index 40d967a11e..9b61eb99b7 100644 --- a/drivers/extcon/extcon-usb-gpio.c +++ b/drivers/extcon/extcon-usb-gpio.c @@ -193,14 +193,12 @@ static int usb_extcon_probe(struct platform_device *pdev) return 0; } -static int usb_extcon_remove(struct platform_device *pdev) +static void usb_extcon_remove(struct platform_device *pdev) { struct usb_extcon_info *info = platform_get_drvdata(pdev); cancel_delayed_work_sync(&info->wq_detcable); device_init_wakeup(&pdev->dev, false); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -281,7 +279,7 @@ MODULE_DEVICE_TABLE(platform, usb_extcon_platform_ids); static struct platform_driver usb_extcon_driver = { .probe = usb_extcon_probe, - .remove = usb_extcon_remove, + .remove_new = usb_extcon_remove, .driver = { .name = "extcon-usb-gpio", .pm = &usb_extcon_pm_ops, diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c index fde1db62be..805a472306 100644 --- a/drivers/extcon/extcon-usbc-cros-ec.c +++ b/drivers/extcon/extcon-usbc-cros-ec.c @@ -480,14 +480,12 @@ unregister_notifier: return ret; } -static int extcon_cros_ec_remove(struct platform_device *pdev) +static void extcon_cros_ec_remove(struct platform_device *pdev) { struct cros_ec_extcon_info *info = platform_get_drvdata(pdev); blocking_notifier_chain_unregister(&info->ec->event_notifier, &info->notifier); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -531,7 +529,7 @@ static struct platform_driver extcon_cros_ec_driver = { .of_match_table = of_match_ptr(extcon_cros_ec_of_match), .pm = DEV_PM_OPS, }, - .remove = extcon_cros_ec_remove, + .remove_new = extcon_cros_ec_remove, .probe = extcon_cros_ec_probe, }; diff --git a/drivers/firewire/.kunitconfig b/drivers/firewire/.kunitconfig index 76444a2d5e..60d9e7c354 100644 --- a/drivers/firewire/.kunitconfig +++ b/drivers/firewire/.kunitconfig @@ -3,3 +3,4 @@ CONFIG_PCI=y CONFIG_FIREWIRE=y CONFIG_FIREWIRE_KUNIT_UAPI_TEST=y CONFIG_FIREWIRE_KUNIT_DEVICE_ATTRIBUTE_TEST=y +CONFIG_FIREWIRE_KUNIT_PACKET_SERDES_TEST=y diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index 552a39df8c..5268b3f0a2 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig @@ -11,7 +11,7 @@ config FIREWIRE This is the new-generation IEEE 1394 (FireWire) driver stack a.k.a. Juju, a new implementation designed for robustness and simplicity. - See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration + See http://ieee1394.docs.kernel.org/en/latest/migration.html for information about migration from the older Linux 1394 stack to the new driver stack. @@ -50,6 +50,22 @@ config FIREWIRE_KUNIT_DEVICE_ATTRIBUTE_TEST For more information on KUnit and unit tests in general, refer to the KUnit documentation in Documentation/dev-tools/kunit/. +config FIREWIRE_KUNIT_PACKET_SERDES_TEST + tristate "KUnit tests for packet serialization/deserialization" if !KUNIT_ALL_TESTS + depends on FIREWIRE && KUNIT + default KUNIT_ALL_TESTS + help + This builds the KUnit tests for packet serialization and + deserialization. + + KUnit tests run during boot and output the results to the debug + log in TAP format (https://testanything.org/). Only useful for + kernel devs running KUnit test harness and are not for inclusion + into a production build. + + For more information on KUnit and unit tests in general, refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + config FIREWIRE_OHCI tristate "OHCI-1394 controllers" depends on PCI && FIREWIRE && MMU diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile index b24b2879ac..75c47d0469 100644 --- a/drivers/firewire/Makefile +++ b/drivers/firewire/Makefile @@ -3,7 +3,7 @@ # Makefile for the Linux IEEE 1394 implementation # -firewire-core-y += core-card.o core-cdev.o core-device.o \ +firewire-core-y += core-trace.o core-card.o core-cdev.o core-device.o \ core-iso.o core-topology.o core-transaction.o firewire-ohci-y += ohci.o firewire-sbp2-y += sbp2.o @@ -16,5 +16,5 @@ obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o obj-$(CONFIG_FIREWIRE_NOSY) += nosy.o obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o -firewire-uapi-test-objs += uapi-test.o -obj-$(CONFIG_FIREWIRE_KUNIT_UAPI_TEST) += firewire-uapi-test.o +obj-$(CONFIG_FIREWIRE_KUNIT_UAPI_TEST) += uapi-test.o +obj-$(CONFIG_FIREWIRE_KUNIT_PACKET_SERDES_TEST) += packet-serdes-test.o diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 401a77e3b5..f8b99dd6cd 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -23,6 +23,7 @@ #include #include "core.h" +#include #define define_fw_printk_level(func, kern_level) \ void func(const struct fw_card *card, const char *fmt, ...) \ @@ -221,11 +222,15 @@ static int reset_bus(struct fw_card *card, bool short_reset) int reg = short_reset ? 5 : 1; int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; + trace_bus_reset_initiate(card->index, card->generation, short_reset); + return card->driver->update_phy_reg(card, reg, 0, bit); } void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset) { + trace_bus_reset_schedule(card->index, card->generation, short_reset); + /* We don't try hard to sort out requests of long vs. short resets. */ card->br_short = short_reset; @@ -244,6 +249,8 @@ static void br_work(struct work_struct *work) /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ if (card->reset_jiffies != 0 && time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) { + trace_bus_reset_postpone(card->index, card->generation, card->br_short); + if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ)) fw_card_put(card); return; diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 6274b86eb9..9a7dc90330 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -35,6 +35,7 @@ #include "core.h" +#include /* * ABI version history is documented in linux/firewire-cdev.h. @@ -1558,6 +1559,9 @@ static void outbound_phy_packet_callback(struct fw_packet *packet, struct client *e_client = e->client; u32 rcode; + trace_async_phy_outbound_complete((uintptr_t)packet, card->index, status, packet->generation, + packet->timestamp); + switch (status) { // expected: case ACK_COMPLETE: @@ -1655,6 +1659,9 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) memcpy(pp->data, a->data, sizeof(a->data)); } + trace_async_phy_outbound_initiate((uintptr_t)&e->p, card->index, e->p.generation, + e->p.header[1], e->p.header[2]); + card->driver->send_request(card, &e->p); return 0; diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index f40c815343..8107eebd42 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c @@ -20,6 +20,7 @@ #include #include "core.h" +#include #define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f) #define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01) @@ -507,6 +508,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, struct fw_node *local_node; unsigned long flags; + trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count); + spin_lock_irqsave(&card->lock, flags); /* diff --git a/drivers/firewire/core-trace.c b/drivers/firewire/core-trace.c new file mode 100644 index 0000000000..96cbd9d384 --- /dev/null +++ b/drivers/firewire/core-trace.c @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright (c) 2024 Takashi Sakamoto + +#define CREATE_TRACE_POINTS +#include diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 130b95aca6..76ab6a2097 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -29,29 +29,11 @@ #include #include "core.h" +#include +#include "packet-header-definitions.h" -#define HEADER_PRI(pri) ((pri) << 0) -#define HEADER_TCODE(tcode) ((tcode) << 4) -#define HEADER_RETRY(retry) ((retry) << 8) -#define HEADER_TLABEL(tlabel) ((tlabel) << 10) -#define HEADER_DESTINATION(destination) ((destination) << 16) -#define HEADER_SOURCE(source) ((source) << 16) -#define HEADER_RCODE(rcode) ((rcode) << 12) -#define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0) -#define HEADER_DATA_LENGTH(length) ((length) << 16) -#define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0) - -#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) -#define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f) -#define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f) -#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) -#define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff) -#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) -#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) -#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) - -#define HEADER_DESTINATION_IS_BROADCAST(q) \ - (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f)) +#define HEADER_DESTINATION_IS_BROADCAST(header) \ + ((async_header_get_destination(header) & 0x3f) == 0x3f) #define PHY_PACKET_CONFIG 0x0 #define PHY_PACKET_LINK_ON 0x1 @@ -192,6 +174,9 @@ static void transmit_complete_callback(struct fw_packet *packet, struct fw_transaction *t = container_of(packet, struct fw_transaction, packet); + trace_async_request_outbound_complete((uintptr_t)t, card->index, packet->generation, + packet->speed, status, packet->timestamp); + switch (status) { case ACK_COMPLETE: close_transaction(t, card, RCODE_COMPLETE, packet->timestamp); @@ -231,10 +216,11 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, int ext_tcode; if (tcode == TCODE_STREAM_DATA) { - packet->header[0] = - HEADER_DATA_LENGTH(length) | - destination_id | - HEADER_TCODE(TCODE_STREAM_DATA); + // The value of destination_id argument should include tag, channel, and sy fields + // as isochronous packet header has. + packet->header[0] = destination_id; + isoc_header_set_data_length(packet->header, length); + isoc_header_set_tcode(packet->header, TCODE_STREAM_DATA); packet->header_length = 4; packet->payload = payload; packet->payload_length = length; @@ -248,28 +234,24 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, } else ext_tcode = 0; - packet->header[0] = - HEADER_RETRY(RETRY_X) | - HEADER_TLABEL(tlabel) | - HEADER_TCODE(tcode) | - HEADER_DESTINATION(destination_id); - packet->header[1] = - HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id); - packet->header[2] = - offset; + async_header_set_retry(packet->header, RETRY_X); + async_header_set_tlabel(packet->header, tlabel); + async_header_set_tcode(packet->header, tcode); + async_header_set_destination(packet->header, destination_id); + async_header_set_source(packet->header, source_id); + async_header_set_offset(packet->header, offset); switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: - packet->header[3] = *(u32 *)payload; + async_header_set_quadlet_data(packet->header, *(u32 *)payload); packet->header_length = 16; packet->payload_length = 0; break; case TCODE_LOCK_REQUEST: case TCODE_WRITE_BLOCK_REQUEST: - packet->header[3] = - HEADER_DATA_LENGTH(length) | - HEADER_EXTENDED_TCODE(ext_tcode); + async_header_set_data_length(packet->header, length); + async_header_set_extended_tcode(packet->header, ext_tcode); packet->header_length = 16; packet->payload = payload; packet->payload_length = length; @@ -281,9 +263,8 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, break; case TCODE_READ_BLOCK_REQUEST: - packet->header[3] = - HEADER_DATA_LENGTH(length) | - HEADER_EXTENDED_TCODE(ext_tcode); + async_header_set_data_length(packet->header, length); + async_header_set_extended_tcode(packet->header, ext_tcode); packet->header_length = 16; packet->payload_length = 0; break; @@ -417,6 +398,10 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode spin_unlock_irqrestore(&card->lock, flags); + trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed, + t->packet.header, payload, + tcode_is_read_request(tcode) ? 0 : length / 4); + card->driver->send_request(card, &t->packet); } EXPORT_SYMBOL_GPL(__fw_send_request); @@ -479,6 +464,8 @@ static DECLARE_COMPLETION(phy_config_done); static void transmit_phy_packet_callback(struct fw_packet *packet, struct fw_card *card, int status) { + trace_async_phy_outbound_complete((uintptr_t)packet, card->index, packet->generation, status, + packet->timestamp); complete(&phy_config_done); } @@ -517,6 +504,10 @@ void fw_send_phy_config(struct fw_card *card, phy_config_packet.generation = generation; reinit_completion(&phy_config_done); + trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet, card->index, + phy_config_packet.generation, phy_config_packet.header[1], + phy_config_packet.header[2]); + card->driver->send_request(card, &phy_config_packet); wait_for_completion_timeout(&phy_config_done, timeout); @@ -655,7 +646,7 @@ EXPORT_SYMBOL(fw_core_remove_address_handler); struct fw_request { struct kref kref; struct fw_packet response; - u32 request_header[4]; + u32 request_header[ASYNC_HEADER_QUADLET_COUNT]; int ack; u32 timestamp; u32 length; @@ -684,6 +675,9 @@ static void free_response_callback(struct fw_packet *packet, { struct fw_request *request = container_of(packet, struct fw_request, response); + trace_async_response_outbound_complete((uintptr_t)request, card->index, packet->generation, + packet->speed, status, packet->timestamp); + // Decrease the reference count since not at in-flight. fw_request_put(request); @@ -695,7 +689,7 @@ int fw_get_response_length(struct fw_request *r) { int tcode, ext_tcode, data_length; - tcode = HEADER_GET_TCODE(r->request_header[0]); + tcode = async_header_get_tcode(r->request_header); switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: @@ -706,12 +700,12 @@ int fw_get_response_length(struct fw_request *r) return 4; case TCODE_READ_BLOCK_REQUEST: - data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); + data_length = async_header_get_data_length(r->request_header); return data_length; case TCODE_LOCK_REQUEST: - ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]); - data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); + ext_tcode = async_header_get_extended_tcode(r->request_header); + data_length = async_header_get_data_length(r->request_header); switch (ext_tcode) { case EXTCODE_FETCH_ADD: case EXTCODE_LITTLE_ADD: @@ -731,46 +725,42 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header, { int tcode, tlabel, extended_tcode, source, destination; - tcode = HEADER_GET_TCODE(request_header[0]); - tlabel = HEADER_GET_TLABEL(request_header[0]); - source = HEADER_GET_DESTINATION(request_header[0]); - destination = HEADER_GET_SOURCE(request_header[1]); - extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]); - - response->header[0] = - HEADER_RETRY(RETRY_1) | - HEADER_TLABEL(tlabel) | - HEADER_DESTINATION(destination); - response->header[1] = - HEADER_SOURCE(source) | - HEADER_RCODE(rcode); - response->header[2] = 0; + tcode = async_header_get_tcode(request_header); + tlabel = async_header_get_tlabel(request_header); + source = async_header_get_destination(request_header); // Exchange. + destination = async_header_get_source(request_header); // Exchange. + extended_tcode = async_header_get_extended_tcode(request_header); + + async_header_set_retry(response->header, RETRY_1); + async_header_set_tlabel(response->header, tlabel); + async_header_set_destination(response->header, destination); + async_header_set_source(response->header, source); + async_header_set_rcode(response->header, rcode); + response->header[2] = 0; // The field is reserved. switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: case TCODE_WRITE_BLOCK_REQUEST: - response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE); + async_header_set_tcode(response->header, TCODE_WRITE_RESPONSE); response->header_length = 12; response->payload_length = 0; break; case TCODE_READ_QUADLET_REQUEST: - response->header[0] |= - HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE); + async_header_set_tcode(response->header, TCODE_READ_QUADLET_RESPONSE); if (payload != NULL) - response->header[3] = *(u32 *)payload; + async_header_set_quadlet_data(response->header, *(u32 *)payload); else - response->header[3] = 0; + async_header_set_quadlet_data(response->header, 0); response->header_length = 16; response->payload_length = 0; break; case TCODE_READ_BLOCK_REQUEST: case TCODE_LOCK_REQUEST: - response->header[0] |= HEADER_TCODE(tcode + 2); - response->header[3] = - HEADER_DATA_LENGTH(length) | - HEADER_EXTENDED_TCODE(extended_tcode); + async_header_set_tcode(response->header, tcode + 2); + async_header_set_data_length(response->header, length); + async_header_set_extended_tcode(response->header, extended_tcode); response->header_length = 16; response->payload = payload; response->payload_length = length; @@ -807,7 +797,7 @@ static struct fw_request *allocate_request(struct fw_card *card, u32 *data, length; int request_tcode; - request_tcode = HEADER_GET_TCODE(p->header[0]); + request_tcode = async_header_get_tcode(p->header); switch (request_tcode) { case TCODE_WRITE_QUADLET_REQUEST: data = &p->header[3]; @@ -817,7 +807,7 @@ static struct fw_request *allocate_request(struct fw_card *card, case TCODE_WRITE_BLOCK_REQUEST: case TCODE_LOCK_REQUEST: data = p->payload; - length = HEADER_GET_DATA_LENGTH(p->header[3]); + length = async_header_get_data_length(p->header); break; case TCODE_READ_QUADLET_REQUEST: @@ -827,7 +817,7 @@ static struct fw_request *allocate_request(struct fw_card *card, case TCODE_READ_BLOCK_REQUEST: data = NULL; - length = HEADER_GET_DATA_LENGTH(p->header[3]); + length = async_header_get_data_length(p->header); break; default: @@ -870,24 +860,31 @@ static struct fw_request *allocate_request(struct fw_card *card, void fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) { + u32 *data = NULL; + unsigned int data_length = 0; + /* unified transaction or broadcast transaction: don't respond */ if (request->ack != ACK_PENDING || - HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) { + HEADER_DESTINATION_IS_BROADCAST(request->request_header)) { fw_request_put(request); return; } - if (rcode == RCODE_COMPLETE) - fw_fill_response(&request->response, request->request_header, - rcode, request->data, - fw_get_response_length(request)); - else - fw_fill_response(&request->response, request->request_header, - rcode, NULL, 0); + if (rcode == RCODE_COMPLETE) { + data = request->data; + data_length = fw_get_response_length(request); + } + + fw_fill_response(&request->response, request->request_header, rcode, data, data_length); // Increase the reference count so that the object is kept during in-flight. fw_request_get(request); + trace_async_response_outbound_initiate((uintptr_t)request, card->index, + request->response.generation, request->response.speed, + request->response.header, data, + data ? data_length / 4 : 0); + card->driver->send_response(card, &request->response); } EXPORT_SYMBOL(fw_send_response); @@ -926,11 +923,11 @@ static void handle_exclusive_region_request(struct fw_card *card, struct fw_address_handler *handler; int tcode, destination, source; - destination = HEADER_GET_DESTINATION(p->header[0]); - source = HEADER_GET_SOURCE(p->header[1]); - tcode = HEADER_GET_TCODE(p->header[0]); + destination = async_header_get_destination(p->header); + source = async_header_get_source(p->header); + tcode = async_header_get_tcode(p->header); if (tcode == TCODE_LOCK_REQUEST) - tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]); + tcode = 0x10 + async_header_get_extended_tcode(p->header); rcu_read_lock(); handler = lookup_enclosing_address_handler(&address_handler_list, @@ -963,9 +960,9 @@ static void handle_fcp_region_request(struct fw_card *card, return; } - tcode = HEADER_GET_TCODE(p->header[0]); - destination = HEADER_GET_DESTINATION(p->header[0]); - source = HEADER_GET_SOURCE(p->header[1]); + tcode = async_header_get_tcode(p->header); + destination = async_header_get_destination(p->header); + source = async_header_get_source(p->header); if (tcode != TCODE_WRITE_QUADLET_REQUEST && tcode != TCODE_WRITE_BLOCK_REQUEST) { @@ -993,11 +990,15 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) { struct fw_request *request; unsigned long long offset; + unsigned int tcode; if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) return; - if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) { + tcode = async_header_get_tcode(p->header); + if (tcode_is_link_internal(tcode)) { + trace_async_phy_inbound((uintptr_t)p, card->index, p->generation, p->ack, p->timestamp, + p->header[1], p->header[2]); fw_cdev_handle_phy_packet(card, p); return; } @@ -1008,8 +1009,11 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) return; } - offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | - p->header[2]; + trace_async_request_inbound((uintptr_t)request, card->index, p->generation, p->speed, + p->ack, p->timestamp, p->header, request->data, + tcode_is_read_request(tcode) ? 0 : request->length / 4); + + offset = async_header_get_offset(p->header); if (!is_in_fcp_region(offset, request->length)) handle_exclusive_region_request(card, p, request, offset); @@ -1027,37 +1031,15 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) size_t data_length; int tcode, tlabel, source, rcode; - tcode = HEADER_GET_TCODE(p->header[0]); - tlabel = HEADER_GET_TLABEL(p->header[0]); - source = HEADER_GET_SOURCE(p->header[1]); - rcode = HEADER_GET_RCODE(p->header[1]); + tcode = async_header_get_tcode(p->header); + tlabel = async_header_get_tlabel(p->header); + source = async_header_get_source(p->header); + rcode = async_header_get_rcode(p->header); - spin_lock_irqsave(&card->lock, flags); - list_for_each_entry(iter, &card->transaction_list, link) { - if (iter->node_id == source && iter->tlabel == tlabel) { - if (!try_cancel_split_timeout(iter)) { - spin_unlock_irqrestore(&card->lock, flags); - goto timed_out; - } - list_del_init(&iter->link); - card->tlabel_mask &= ~(1ULL << iter->tlabel); - t = iter; - break; - } - } - spin_unlock_irqrestore(&card->lock, flags); - - if (!t) { - timed_out: - fw_notice(card, "unsolicited response (source %x, tlabel %x)\n", - source, tlabel); - return; - } - - /* - * FIXME: sanity check packet, is length correct, does tcodes - * and addresses match. - */ + // FIXME: sanity check packet, is length correct, does tcodes + // and addresses match to the transaction request queried later. + // + // For the tracepoints event, let us decode the header here against the concern. switch (tcode) { case TCODE_READ_QUADLET_RESPONSE: @@ -1073,7 +1055,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) case TCODE_READ_BLOCK_RESPONSE: case TCODE_LOCK_RESPONSE: data = p->payload; - data_length = HEADER_GET_DATA_LENGTH(p->header[3]); + data_length = async_header_get_data_length(p->header); break; default: @@ -1083,6 +1065,31 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) break; } + spin_lock_irqsave(&card->lock, flags); + list_for_each_entry(iter, &card->transaction_list, link) { + if (iter->node_id == source && iter->tlabel == tlabel) { + if (!try_cancel_split_timeout(iter)) { + spin_unlock_irqrestore(&card->lock, flags); + goto timed_out; + } + list_del_init(&iter->link); + card->tlabel_mask &= ~(1ULL << iter->tlabel); + t = iter; + break; + } + } + spin_unlock_irqrestore(&card->lock, flags); + + trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack, + p->timestamp, p->header, data, data_length / 4); + + if (!t) { + timed_out: + fw_notice(card, "unsolicited response (source %x, tlabel %x)\n", + source, tlabel); + return; + } + /* * The response handler may be executed while the request handler * is still pending. Cancel the request handler. @@ -1135,7 +1142,7 @@ static void handle_topology_map(struct fw_card *card, struct fw_request *request { int start; - if (!TCODE_IS_READ_REQUEST(tcode)) { + if (!tcode_is_read_request(tcode)) { fw_send_response(card, request, RCODE_TYPE_ERROR); return; } diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index 95c10f3d22..7c36d2628e 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -225,13 +225,20 @@ static inline bool is_next_generation(int new_generation, int old_generation) #define TCODE_LINK_INTERNAL 0xe -#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) -#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) -#define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL) -#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) -#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) -#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) -#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0) +static inline bool tcode_is_read_request(unsigned int tcode) +{ + return (tcode & ~1u) == 4u; +} + +static inline bool tcode_is_block_packet(unsigned int tcode) +{ + return (tcode & 1u) != 0u; +} + +static inline bool tcode_is_link_internal(unsigned int tcode) +{ + return (tcode == TCODE_LINK_INTERNAL); +} #define LOCAL_BUS 0xffc0 diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index b9ae0340b8..f6de0b3a9a 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -40,6 +40,7 @@ #include "core.h" #include "ohci.h" +#include "packet-header-definitions.h" #define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args) #define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args) @@ -393,7 +394,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" #define OHCI_PARAM_DEBUG_AT_AR 1 #define OHCI_PARAM_DEBUG_SELFIDS 2 #define OHCI_PARAM_DEBUG_IRQS 4 -#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ static int param_debug; module_param_named(debug, param_debug, int, 0644); @@ -401,7 +401,6 @@ MODULE_PARM_DESC(debug, "Verbose logging (default = 0" ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR) ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS) ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS) - ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS) ", or a combination, or all = -1)"); static bool param_remote_dma; @@ -410,12 +409,7 @@ MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)"); static void log_irqs(struct fw_ohci *ohci, u32 evt) { - if (likely(!(param_debug & - (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS)))) - return; - - if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) && - !(evt & OHCI1394_busReset)) + if (likely(!(param_debug & OHCI_PARAM_DEBUG_IRQS))) return; ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, @@ -516,14 +510,14 @@ static const char *tcodes[] = { static void log_ar_at_event(struct fw_ohci *ohci, char dir, int speed, u32 *header, int evt) { - int tcode = header[0] >> 4 & 0xf; + int tcode = async_header_get_tcode(header); char specific[12]; if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR))) return; if (unlikely(evt >= ARRAY_SIZE(evts))) - evt = 0x1f; + evt = 0x1f; if (evt == OHCI1394_evt_bus_reset) { ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n", @@ -532,20 +526,27 @@ static void log_ar_at_event(struct fw_ohci *ohci, } switch (tcode) { - case 0x0: case 0x6: case 0x8: + case TCODE_WRITE_QUADLET_REQUEST: + case TCODE_READ_QUADLET_RESPONSE: + case TCODE_CYCLE_START: snprintf(specific, sizeof(specific), " = %08x", be32_to_cpu((__force __be32)header[3])); break; - case 0x1: case 0x5: case 0x7: case 0x9: case 0xb: + case TCODE_WRITE_BLOCK_REQUEST: + case TCODE_READ_BLOCK_REQUEST: + case TCODE_READ_BLOCK_RESPONSE: + case TCODE_LOCK_REQUEST: + case TCODE_LOCK_RESPONSE: snprintf(specific, sizeof(specific), " %x,%x", - header[3] >> 16, header[3] & 0xffff); + async_header_get_data_length(header), + async_header_get_extended_tcode(header)); break; default: specific[0] = '\0'; } switch (tcode) { - case 0xa: + case TCODE_STREAM_DATA: ohci_notice(ohci, "A%c %s, %s\n", dir, evts[evt], tcodes[tcode]); break; @@ -553,19 +554,23 @@ static void log_ar_at_event(struct fw_ohci *ohci, ohci_notice(ohci, "A%c %s, PHY %08x %08x\n", dir, evts[evt], header[1], header[2]); break; - case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: + case TCODE_WRITE_QUADLET_REQUEST: + case TCODE_WRITE_BLOCK_REQUEST: + case TCODE_READ_QUADLET_REQUEST: + case TCODE_READ_BLOCK_REQUEST: + case TCODE_LOCK_REQUEST: ohci_notice(ohci, - "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n", - dir, speed, header[0] >> 10 & 0x3f, - header[1] >> 16, header[0] >> 16, evts[evt], - tcodes[tcode], header[1] & 0xffff, header[2], specific); + "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %012llx%s\n", + dir, speed, async_header_get_tlabel(header), + async_header_get_source(header), async_header_get_destination(header), + evts[evt], tcodes[tcode], async_header_get_offset(header), specific); break; default: ohci_notice(ohci, "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n", - dir, speed, header[0] >> 10 & 0x3f, - header[1] >> 16, header[0] >> 16, evts[evt], - tcodes[tcode], specific); + dir, speed, async_header_get_tlabel(header), + async_header_get_source(header), async_header_get_destination(header), + evts[evt], tcodes[tcode], specific); } } @@ -853,7 +858,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) p.header[1] = cond_le32_to_cpu(buffer[1]); p.header[2] = cond_le32_to_cpu(buffer[2]); - tcode = (p.header[0] >> 4) & 0x0f; + tcode = async_header_get_tcode(p.header); switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: case TCODE_READ_QUADLET_RESPONSE: @@ -874,7 +879,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) case TCODE_LOCK_RESPONSE: p.header[3] = cond_le32_to_cpu(buffer[3]); p.header_length = 16; - p.payload_length = p.header[3] >> 16; + p.payload_length = async_header_get_data_length(p.header); if (p.payload_length > MAX_ASYNC_PAYLOAD) { ar_context_abort(ctx, "invalid packet length"); return NULL; @@ -911,8 +916,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) * Several controllers, notably from NEC and VIA, forget to * write ack_complete status at PHY packet reception. */ - if (evt == OHCI1394_evt_no_status && - (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4)) + if (evt == OHCI1394_evt_no_status && tcode == OHCI1394_phy_tcode) p.ack = ACK_COMPLETE; /* @@ -1353,7 +1357,7 @@ static int at_context_queue_packet(struct context *ctx, * accordingly. */ - tcode = (packet->header[0] >> 4) & 0x0f; + tcode = async_header_get_tcode(packet->header); header = (__le32 *) &d[1]; switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: @@ -1371,7 +1375,7 @@ static int at_context_queue_packet(struct context *ctx, (packet->header[0] & 0xffff0000)); header[2] = cpu_to_le32(packet->header[2]); - if (TCODE_IS_BLOCK_PACKET(tcode)) + if (tcode_is_block_packet(tcode)) header[3] = cpu_to_le32(packet->header[3]); else header[3] = (__force __le32) packet->header[3]; @@ -1550,12 +1554,6 @@ static int handle_at_packet(struct context *context, return 1; } -#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) -#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) -#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) -#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) -#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) - static u32 get_cycle_time(struct fw_ohci *ohci); static void handle_local_rom(struct fw_ohci *ohci, @@ -1564,9 +1562,9 @@ static void handle_local_rom(struct fw_ohci *ohci, struct fw_packet response; int tcode, length, i; - tcode = HEADER_GET_TCODE(packet->header[0]); - if (TCODE_IS_BLOCK_PACKET(tcode)) - length = HEADER_GET_DATA_LENGTH(packet->header[3]); + tcode = async_header_get_tcode(packet->header); + if (tcode_is_block_packet(tcode)) + length = async_header_get_data_length(packet->header); else length = 4; @@ -1574,7 +1572,7 @@ static void handle_local_rom(struct fw_ohci *ohci, if (i + length > CONFIG_ROM_SIZE) { fw_fill_response(&response, packet->header, RCODE_ADDRESS_ERROR, NULL, 0); - } else if (!TCODE_IS_READ_REQUEST(tcode)) { + } else if (!tcode_is_read_request(tcode)) { fw_fill_response(&response, packet->header, RCODE_TYPE_ERROR, NULL, 0); } else { @@ -1595,10 +1593,10 @@ static void handle_local_lock(struct fw_ohci *ohci, __be32 *payload, lock_old; u32 lock_arg, lock_data; - tcode = HEADER_GET_TCODE(packet->header[0]); - length = HEADER_GET_DATA_LENGTH(packet->header[3]); + tcode = async_header_get_tcode(packet->header); + length = async_header_get_data_length(packet->header); payload = packet->payload; - ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); + ext_tcode = async_header_get_extended_tcode(packet->header); if (tcode == TCODE_LOCK_REQUEST && ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) { @@ -1646,10 +1644,7 @@ static void handle_local_request(struct context *ctx, struct fw_packet *packet) packet->callback(packet, &ctx->ohci->card, packet->ack); } - offset = - ((unsigned long long) - HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | - packet->header[2]; + offset = async_header_get_offset(packet->header); csr = offset - CSR_REGISTER_BASE; /* Handle config rom reads. */ @@ -1683,7 +1678,7 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet) spin_lock_irqsave(&ctx->ohci->lock, flags); - if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && + if (async_header_get_destination(packet->header) == ctx->ohci->node_id && ctx->ohci->generation == packet->generation) { spin_unlock_irqrestore(&ctx->ohci->lock, flags); @@ -2064,8 +2059,7 @@ static void bus_reset_work(struct work_struct *work) ohci->generation = generation; reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); - if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) - reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); + reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); if (ohci->quirks & QUIRK_RESET_PACKET) ohci->request_generation = generation; @@ -2137,6 +2131,7 @@ static irqreturn_t irq_handler(int irq, void *data) reg_write(ohci, OHCI1394_IntEventClear, event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr)); log_irqs(ohci, event); + // The flag is masked again at bus_reset_work() scheduled by selfID event. if (event & OHCI1394_busReset) reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset); @@ -2476,9 +2471,8 @@ static int ohci_enable(struct fw_card *card, OHCI1394_cycleInconsistent | OHCI1394_unrecoverableError | OHCI1394_cycleTooLong | - OHCI1394_masterIntEnable; - if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) - irqs |= OHCI1394_busReset; + OHCI1394_masterIntEnable | + OHCI1394_busReset; reg_write(ohci, OHCI1394_IntMaskSet, irqs); reg_write(ohci, OHCI1394_HCControlSet, @@ -3631,7 +3625,7 @@ static int pci_probe(struct pci_dev *dev, struct fw_ohci *ohci; u32 bus_options, max_receive, link_speed, version; u64 guid; - int i, err; + int i, flags, irq, err; size_t size; if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) { @@ -3756,18 +3750,29 @@ static int pci_probe(struct pci_dev *dev, guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | reg_read(ohci, OHCI1394_GUIDLo); + flags = PCI_IRQ_INTX; if (!(ohci->quirks & QUIRK_NO_MSI)) - pci_enable_msi(dev); - err = devm_request_irq(&dev->dev, dev->irq, irq_handler, - pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name, ohci); + flags |= PCI_IRQ_MSI; + err = pci_alloc_irq_vectors(dev, 1, 1, flags); + if (err < 0) + return err; + irq = pci_irq_vector(dev, 0); + if (irq < 0) { + err = irq; + goto fail_msi; + } + + err = request_threaded_irq(irq, irq_handler, NULL, + pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name, + ohci); if (err < 0) { - ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq); + ohci_err(ohci, "failed to allocate interrupt %d\n", irq); goto fail_msi; } err = fw_card_add(&ohci->card, max_receive, link_speed, guid); if (err) - goto fail_msi; + goto fail_irq; version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; ohci_notice(ohci, @@ -3780,9 +3785,10 @@ static int pci_probe(struct pci_dev *dev, return 0; + fail_irq: + free_irq(irq, ohci); fail_msi: - devm_free_irq(&dev->dev, dev->irq, ohci); - pci_disable_msi(dev); + pci_free_irq_vectors(dev); return err; } @@ -3790,6 +3796,7 @@ static int pci_probe(struct pci_dev *dev, static void pci_remove(struct pci_dev *dev) { struct fw_ohci *ohci = pci_get_drvdata(dev); + int irq; /* * If the removal is happening from the suspend state, LPS won't be @@ -3809,8 +3816,10 @@ static void pci_remove(struct pci_dev *dev) software_reset(ohci); - devm_free_irq(&dev->dev, dev->irq, ohci); - pci_disable_msi(dev); + irq = pci_irq_vector(dev, 0); + if (irq >= 0) + free_irq(irq, ohci); + pci_free_irq_vectors(dev); dev_notice(&dev->dev, "removing fw-ohci device\n"); } diff --git a/drivers/firewire/packet-header-definitions.h b/drivers/firewire/packet-header-definitions.h new file mode 100644 index 0000000000..ab9d0fa790 --- /dev/null +++ b/drivers/firewire/packet-header-definitions.h @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// +// packet-header-definitions.h - The definitions of header fields for IEEE 1394 packet. +// +// Copyright (c) 2024 Takashi Sakamoto + +#ifndef _FIREWIRE_PACKET_HEADER_DEFINITIONS_H +#define _FIREWIRE_PACKET_HEADER_DEFINITIONS_H + +#define ASYNC_HEADER_QUADLET_COUNT 4 + +#define ASYNC_HEADER_Q0_DESTINATION_SHIFT 16 +#define ASYNC_HEADER_Q0_DESTINATION_MASK 0xffff0000 +#define ASYNC_HEADER_Q0_TLABEL_SHIFT 10 +#define ASYNC_HEADER_Q0_TLABEL_MASK 0x0000fc00 +#define ASYNC_HEADER_Q0_RETRY_SHIFT 8 +#define ASYNC_HEADER_Q0_RETRY_MASK 0x00000300 +#define ASYNC_HEADER_Q0_TCODE_SHIFT 4 +#define ASYNC_HEADER_Q0_TCODE_MASK 0x000000f0 +#define ASYNC_HEADER_Q0_PRIORITY_SHIFT 0 +#define ASYNC_HEADER_Q0_PRIORITY_MASK 0x0000000f +#define ASYNC_HEADER_Q1_SOURCE_SHIFT 16 +#define ASYNC_HEADER_Q1_SOURCE_MASK 0xffff0000 +#define ASYNC_HEADER_Q1_RCODE_SHIFT 12 +#define ASYNC_HEADER_Q1_RCODE_MASK 0x0000f000 +#define ASYNC_HEADER_Q1_RCODE_SHIFT 12 +#define ASYNC_HEADER_Q1_RCODE_MASK 0x0000f000 +#define ASYNC_HEADER_Q1_OFFSET_HIGH_SHIFT 0 +#define ASYNC_HEADER_Q1_OFFSET_HIGH_MASK 0x0000ffff +#define ASYNC_HEADER_Q3_DATA_LENGTH_SHIFT 16 +#define ASYNC_HEADER_Q3_DATA_LENGTH_MASK 0xffff0000 +#define ASYNC_HEADER_Q3_EXTENDED_TCODE_SHIFT 0 +#define ASYNC_HEADER_Q3_EXTENDED_TCODE_MASK 0x0000ffff + +static inline unsigned int async_header_get_destination(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[0] & ASYNC_HEADER_Q0_DESTINATION_MASK) >> ASYNC_HEADER_Q0_DESTINATION_SHIFT; +} + +static inline unsigned int async_header_get_tlabel(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[0] & ASYNC_HEADER_Q0_TLABEL_MASK) >> ASYNC_HEADER_Q0_TLABEL_SHIFT; +} + +static inline unsigned int async_header_get_retry(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[0] & ASYNC_HEADER_Q0_RETRY_MASK) >> ASYNC_HEADER_Q0_RETRY_SHIFT; +} + +static inline unsigned int async_header_get_tcode(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[0] & ASYNC_HEADER_Q0_TCODE_MASK) >> ASYNC_HEADER_Q0_TCODE_SHIFT; +} + +static inline unsigned int async_header_get_priority(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[0] & ASYNC_HEADER_Q0_PRIORITY_MASK) >> ASYNC_HEADER_Q0_PRIORITY_SHIFT; +} + +static inline unsigned int async_header_get_source(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[1] & ASYNC_HEADER_Q1_SOURCE_MASK) >> ASYNC_HEADER_Q1_SOURCE_SHIFT; +} + +static inline unsigned int async_header_get_rcode(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[1] & ASYNC_HEADER_Q1_RCODE_MASK) >> ASYNC_HEADER_Q1_RCODE_SHIFT; +} + +static inline u64 async_header_get_offset(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + u32 hi = (header[1] & ASYNC_HEADER_Q1_OFFSET_HIGH_MASK) >> ASYNC_HEADER_Q1_OFFSET_HIGH_SHIFT; + return (((u64)hi) << 32) | ((u64)header[2]); +} + +static inline u32 async_header_get_quadlet_data(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return header[3]; +} + +static inline unsigned int async_header_get_data_length(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[3] & ASYNC_HEADER_Q3_DATA_LENGTH_MASK) >> ASYNC_HEADER_Q3_DATA_LENGTH_SHIFT; +} + +static inline unsigned int async_header_get_extended_tcode(const u32 header[ASYNC_HEADER_QUADLET_COUNT]) +{ + return (header[3] & ASYNC_HEADER_Q3_EXTENDED_TCODE_MASK) >> ASYNC_HEADER_Q3_EXTENDED_TCODE_SHIFT; +} + +static inline void async_header_set_destination(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int destination) +{ + header[0] &= ~ASYNC_HEADER_Q0_DESTINATION_MASK; + header[0] |= (((u32)destination) << ASYNC_HEADER_Q0_DESTINATION_SHIFT) & ASYNC_HEADER_Q0_DESTINATION_MASK; +} + +static inline void async_header_set_tlabel(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int tlabel) +{ + header[0] &= ~ASYNC_HEADER_Q0_TLABEL_MASK; + header[0] |= (((u32)tlabel) << ASYNC_HEADER_Q0_TLABEL_SHIFT) & ASYNC_HEADER_Q0_TLABEL_MASK; +} + +static inline void async_header_set_retry(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int retry) +{ + header[0] &= ~ASYNC_HEADER_Q0_RETRY_MASK; + header[0] |= (((u32)retry) << ASYNC_HEADER_Q0_RETRY_SHIFT) & ASYNC_HEADER_Q0_RETRY_MASK; +} + +static inline void async_header_set_tcode(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int tcode) +{ + header[0] &= ~ASYNC_HEADER_Q0_TCODE_MASK; + header[0] |= (((u32)tcode) << ASYNC_HEADER_Q0_TCODE_SHIFT) & ASYNC_HEADER_Q0_TCODE_MASK; +} + +static inline void async_header_set_priority(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int priority) +{ + header[0] &= ~ASYNC_HEADER_Q0_PRIORITY_MASK; + header[0] |= (((u32)priority) << ASYNC_HEADER_Q0_PRIORITY_SHIFT) & ASYNC_HEADER_Q0_PRIORITY_MASK; +} + + +static inline void async_header_set_source(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int source) +{ + header[1] &= ~ASYNC_HEADER_Q1_SOURCE_MASK; + header[1] |= (((u32)source) << ASYNC_HEADER_Q1_SOURCE_SHIFT) & ASYNC_HEADER_Q1_SOURCE_MASK; +} + +static inline void async_header_set_rcode(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int rcode) +{ + header[1] &= ~ASYNC_HEADER_Q1_RCODE_MASK; + header[1] |= (((u32)rcode) << ASYNC_HEADER_Q1_RCODE_SHIFT) & ASYNC_HEADER_Q1_RCODE_MASK; +} + +static inline void async_header_set_offset(u32 header[ASYNC_HEADER_QUADLET_COUNT], u64 offset) +{ + u32 hi = (u32)(offset >> 32); + header[1] &= ~ASYNC_HEADER_Q1_OFFSET_HIGH_MASK; + header[1] |= (hi << ASYNC_HEADER_Q1_OFFSET_HIGH_SHIFT) & ASYNC_HEADER_Q1_OFFSET_HIGH_MASK; + header[2] = (u32)(offset & 0x00000000ffffffff); +} + +static inline void async_header_set_quadlet_data(u32 header[ASYNC_HEADER_QUADLET_COUNT], u32 quadlet_data) +{ + header[3] = quadlet_data; +} + +static inline void async_header_set_data_length(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int data_length) +{ + header[3] &= ~ASYNC_HEADER_Q3_DATA_LENGTH_MASK; + header[3] |= (((u32)data_length) << ASYNC_HEADER_Q3_DATA_LENGTH_SHIFT) & ASYNC_HEADER_Q3_DATA_LENGTH_MASK; +} + +static inline void async_header_set_extended_tcode(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int extended_tcode) +{ + header[3] &= ~ASYNC_HEADER_Q3_EXTENDED_TCODE_MASK; + header[3] |= (((u32)extended_tcode) << ASYNC_HEADER_Q3_EXTENDED_TCODE_SHIFT) & ASYNC_HEADER_Q3_EXTENDED_TCODE_MASK; +} + +#define ISOC_HEADER_DATA_LENGTH_SHIFT 16 +#define ISOC_HEADER_DATA_LENGTH_MASK 0xffff0000 +#define ISOC_HEADER_TAG_SHIFT 14 +#define ISOC_HEADER_TAG_MASK 0x0000c000 +#define ISOC_HEADER_CHANNEL_SHIFT 8 +#define ISOC_HEADER_CHANNEL_MASK 0x00003f00 +#define ISOC_HEADER_TCODE_SHIFT 4 +#define ISOC_HEADER_TCODE_MASK 0x000000f0 +#define ISOC_HEADER_SY_SHIFT 0 +#define ISOC_HEADER_SY_MASK 0x0000000f + +static inline unsigned int isoc_header_get_data_length(u32 header) +{ + return (header & ISOC_HEADER_DATA_LENGTH_MASK) >> ISOC_HEADER_DATA_LENGTH_SHIFT; +} + +static inline unsigned int isoc_header_get_tag(u32 header) +{ + return (header & ISOC_HEADER_TAG_MASK) >> ISOC_HEADER_TAG_SHIFT; +} + +static inline unsigned int isoc_header_get_channel(u32 header) +{ + return (header & ISOC_HEADER_CHANNEL_MASK) >> ISOC_HEADER_CHANNEL_SHIFT; +} + +static inline unsigned int isoc_header_get_tcode(u32 header) +{ + return (header & ISOC_HEADER_TCODE_MASK) >> ISOC_HEADER_TCODE_SHIFT; +} + +static inline unsigned int isoc_header_get_sy(u32 header) +{ + return (header & ISOC_HEADER_SY_MASK) >> ISOC_HEADER_SY_SHIFT; +} + +static inline void isoc_header_set_data_length(u32 *header, unsigned int data_length) +{ + *header &= ~ISOC_HEADER_DATA_LENGTH_MASK; + *header |= (((u32)data_length) << ISOC_HEADER_DATA_LENGTH_SHIFT) & ISOC_HEADER_DATA_LENGTH_MASK; +} + +static inline void isoc_header_set_tag(u32 *header, unsigned int tag) +{ + *header &= ~ISOC_HEADER_TAG_MASK; + *header |= (((u32)tag) << ISOC_HEADER_TAG_SHIFT) & ISOC_HEADER_TAG_MASK; +} + +static inline void isoc_header_set_channel(u32 *header, unsigned int channel) +{ + *header &= ~ISOC_HEADER_CHANNEL_MASK; + *header |= (((u32)channel) << ISOC_HEADER_CHANNEL_SHIFT) & ISOC_HEADER_CHANNEL_MASK; +} + +static inline void isoc_header_set_tcode(u32 *header, unsigned int tcode) +{ + *header &= ~ISOC_HEADER_TCODE_MASK; + *header |= (((u32)tcode) << ISOC_HEADER_TCODE_SHIFT) & ISOC_HEADER_TCODE_MASK; +} + +static inline void isoc_header_set_sy(u32 *header, unsigned int sy) +{ + *header &= ~ISOC_HEADER_SY_MASK; + *header |= (((u32)sy) << ISOC_HEADER_SY_SHIFT) & ISOC_HEADER_SY_MASK; +} + +#endif // _FIREWIRE_PACKET_HEADER_DEFINITIONS_H diff --git a/drivers/firewire/packet-serdes-test.c b/drivers/firewire/packet-serdes-test.c new file mode 100644 index 0000000000..e83b1fece7 --- /dev/null +++ b/drivers/firewire/packet-serdes-test.c @@ -0,0 +1,583 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// +// packet-serdes-test.c - An application of Kunit to check serialization/deserialization of packets +// defined by IEEE 1394. +// +// Copyright (c) 2024 Takashi Sakamoto + +#include + +#include + +#include "packet-header-definitions.h" + +static void serialize_async_header_common(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int dst_id, unsigned int tlabel, + unsigned int retry, unsigned int tcode, + unsigned int priority, unsigned int src_id) +{ + async_header_set_destination(header, dst_id); + async_header_set_tlabel(header, tlabel); + async_header_set_retry(header, retry); + async_header_set_tcode(header, tcode); + async_header_set_priority(header, priority); + async_header_set_source(header, src_id); +} + +static void serialize_async_header_request(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int dst_id, unsigned int tlabel, + unsigned int retry, unsigned int tcode, + unsigned int priority, unsigned int src_id, u64 offset) +{ + serialize_async_header_common(header, dst_id, tlabel, retry, tcode, priority, src_id); + async_header_set_offset(header, offset); +} + +static void serialize_async_header_quadlet_request(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int dst_id, unsigned int tlabel, + unsigned int retry, unsigned int tcode, + unsigned int priority, unsigned int src_id, + u64 offset) +{ + serialize_async_header_request(header, dst_id, tlabel, retry, tcode, priority, src_id, + offset); +} + +static void serialize_async_header_block_request(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int dst_id, unsigned int tlabel, + unsigned int retry, unsigned int tcode, + unsigned int priority, unsigned int src_id, + u64 offset, unsigned int data_length, + unsigned int extended_tcode) +{ + serialize_async_header_request(header, dst_id, tlabel, retry, tcode, priority, src_id, + offset); + async_header_set_data_length(header, data_length); + async_header_set_extended_tcode(header, extended_tcode); +} + +static void serialize_async_header_response(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int dst_id, unsigned int tlabel, + unsigned int retry, unsigned int tcode, + unsigned int priority, unsigned int src_id, + unsigned int rcode) +{ + serialize_async_header_common(header, dst_id, tlabel, retry, tcode, priority, src_id); + async_header_set_rcode(header, rcode); +} + +static void serialize_async_header_quadlet_response(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int dst_id, unsigned int tlabel, + unsigned int retry, unsigned int tcode, + unsigned int priority, unsigned int src_id, + unsigned int rcode) +{ + serialize_async_header_response(header, dst_id, tlabel, retry, tcode, priority, src_id, + rcode); +} + +static void serialize_async_header_block_response(u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int dst_id, unsigned int tlabel, + unsigned int retry, unsigned int tcode, + unsigned int priority, unsigned int src_id, + unsigned int rcode, unsigned int data_length, + unsigned int extended_tcode) +{ + serialize_async_header_response(header, dst_id, tlabel, retry, tcode, priority, src_id, + rcode); + async_header_set_data_length(header, data_length); + async_header_set_extended_tcode(header, extended_tcode); +} + +static void deserialize_async_header_common(const u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int *dst_id, unsigned int *tlabel, + unsigned int *retry, unsigned int *tcode, + unsigned int *priority, unsigned int *src_id) +{ + *dst_id = async_header_get_destination(header); + *tlabel = async_header_get_tlabel(header); + *retry = async_header_get_retry(header); + *tcode = async_header_get_tcode(header); + *priority = async_header_get_priority(header); + *src_id = async_header_get_source(header); +} + +static void deserialize_async_header_request(const u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int *dst_id, unsigned int *tlabel, + unsigned int *retry, unsigned int *tcode, + unsigned int *priority, unsigned int *src_id, + u64 *offset) +{ + deserialize_async_header_common(header, dst_id, tlabel, retry, tcode, priority, src_id); + *offset = async_header_get_offset(header); +} + +static void deserialize_async_header_quadlet_request(const u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int *dst_id, unsigned int *tlabel, + unsigned int *retry, unsigned int *tcode, + unsigned int *priority, unsigned int *src_id, + u64 *offset) +{ + deserialize_async_header_request(header, dst_id, tlabel, retry, tcode, priority, src_id, + offset); +} + +static void deserialize_async_header_block_request(const u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int *dst_id, unsigned int *tlabel, + unsigned int *retry, unsigned int *tcode, + unsigned int *priority, unsigned int *src_id, + u64 *offset, + unsigned int *data_length, + unsigned int *extended_tcode) +{ + deserialize_async_header_request(header, dst_id, tlabel, retry, tcode, priority, src_id, + offset); + *data_length = async_header_get_data_length(header); + *extended_tcode = async_header_get_extended_tcode(header); +} + +static void deserialize_async_header_response(const u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int *dst_id, unsigned int *tlabel, + unsigned int *retry, unsigned int *tcode, + unsigned int *priority, unsigned int *src_id, + unsigned int *rcode) +{ + deserialize_async_header_common(header, dst_id, tlabel, retry, tcode, priority, src_id); + *rcode = async_header_get_rcode(header); +} + +static void deserialize_async_header_quadlet_response(const u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int *dst_id, unsigned int *tlabel, + unsigned int *retry, unsigned int *tcode, + unsigned int *priority, unsigned int *src_id, + unsigned int *rcode) +{ + deserialize_async_header_response(header, dst_id, tlabel, retry, tcode, priority, src_id, rcode); +} + +static void deserialize_async_header_block_response(const u32 header[ASYNC_HEADER_QUADLET_COUNT], + unsigned int *dst_id, unsigned int *tlabel, + unsigned int *retry, unsigned int *tcode, + unsigned int *priority, unsigned int *src_id, + unsigned int *rcode, unsigned int *data_length, + unsigned int *extended_tcode) +{ + deserialize_async_header_response(header, dst_id, tlabel, retry, tcode, priority, src_id, rcode); + *data_length = async_header_get_data_length(header); + *extended_tcode = async_header_get_extended_tcode(header); +} + +static void serialize_isoc_header(u32 *header, unsigned int data_length, unsigned int tag, + unsigned int channel, unsigned int tcode, unsigned int sy) +{ + isoc_header_set_data_length(header, data_length); + isoc_header_set_tag(header, tag); + isoc_header_set_channel(header, channel); + isoc_header_set_tcode(header, tcode); + isoc_header_set_sy(header, sy); +} + +static void deserialize_isoc_header(u32 header, unsigned int *data_length, unsigned int *tag, + unsigned int *channel, unsigned int *tcode, unsigned int *sy) +{ + *data_length = isoc_header_get_data_length(header); + *tag = isoc_header_get_tag(header); + *channel = isoc_header_get_channel(header); + *tcode = isoc_header_get_tcode(header); + *sy = isoc_header_get_sy(header); +} + +static void test_async_header_write_quadlet_request(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc05100, + 0xffc1ffff, + 0xf0000234, + 0x1f0000c0, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + u64 offset; + u32 quadlet_data; + + deserialize_async_header_quadlet_request(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &offset); + quadlet_data = async_header_get_quadlet_data(expected); + + KUNIT_EXPECT_EQ(test, 0xffc0, dst_id); + KUNIT_EXPECT_EQ(test, 0x14, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_WRITE_QUADLET_REQUEST, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc1, src_id); + KUNIT_EXPECT_EQ(test, 0xfffff0000234, offset); + KUNIT_EXPECT_EQ(test, 0x1f0000c0, quadlet_data); + + serialize_async_header_quadlet_request(header, dst_id, tlabel, retry, tcode, priority, + src_id, offset); + async_header_set_quadlet_data(header, quadlet_data); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_async_header_write_block_request(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc06510, + 0xffc1ecc0, + 0x00000000, + 0x00180000, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + u64 offset; + unsigned int data_length; + unsigned int extended_tcode; + + deserialize_async_header_block_request(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &offset, &data_length, + &extended_tcode); + + KUNIT_EXPECT_EQ(test, 0xffc0, dst_id); + KUNIT_EXPECT_EQ(test, 0x19, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_WRITE_BLOCK_REQUEST, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc1, src_id); + KUNIT_EXPECT_EQ(test, 0xecc000000000, offset); + KUNIT_EXPECT_EQ(test, 0x0018, data_length); + KUNIT_EXPECT_EQ(test, 0x0000, extended_tcode); + + serialize_async_header_block_request(header, dst_id, tlabel, retry, tcode, priority, src_id, + offset, data_length, extended_tcode); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_async_header_write_response(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc15120, + 0xffc00000, + 0x00000000, + 0x00000000, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + unsigned int rcode; + + deserialize_async_header_quadlet_response(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &rcode); + + KUNIT_EXPECT_EQ(test, 0xffc1, dst_id); + KUNIT_EXPECT_EQ(test, 0x14, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_WRITE_RESPONSE, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc0, src_id); + KUNIT_EXPECT_EQ(test, RCODE_COMPLETE, rcode); + + serialize_async_header_quadlet_response(header, dst_id, tlabel, retry, tcode, priority, + src_id, rcode); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected) - sizeof(expected[0])); +} + +static void test_async_header_read_quadlet_request(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc0f140, + 0xffc1ffff, + 0xf0000984, + 0x00000000, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + u64 offset; + + deserialize_async_header_quadlet_request(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &offset); + + KUNIT_EXPECT_EQ(test, 0xffc0, dst_id); + KUNIT_EXPECT_EQ(test, 0x3c, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_READ_QUADLET_REQUEST, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc1, src_id); + KUNIT_EXPECT_EQ(test, 0xfffff0000984, offset); + + serialize_async_header_quadlet_request(header, dst_id, tlabel, retry, tcode, priority, + src_id, offset); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_async_header_read_quadlet_response(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc1f160, + 0xffc00000, + 0x00000000, + 0x00000180, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + unsigned int rcode; + u32 quadlet_data; + + deserialize_async_header_quadlet_response(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &rcode); + quadlet_data = async_header_get_quadlet_data(expected); + + KUNIT_EXPECT_EQ(test, 0xffc1, dst_id); + KUNIT_EXPECT_EQ(test, 0x3c, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_READ_QUADLET_RESPONSE, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc0, src_id); + KUNIT_EXPECT_EQ(test, RCODE_COMPLETE, rcode); + KUNIT_EXPECT_EQ(test, 0x00000180, quadlet_data); + + serialize_async_header_quadlet_response(header, dst_id, tlabel, retry, tcode, priority, + src_id, rcode); + async_header_set_quadlet_data(header, quadlet_data); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_async_header_read_block_request(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc0e150, + 0xffc1ffff, + 0xf0000400, + 0x00200000, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + u64 offset; + unsigned int data_length; + unsigned int extended_tcode; + + deserialize_async_header_block_request(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &offset, &data_length, + &extended_tcode); + + KUNIT_EXPECT_EQ(test, 0xffc0, dst_id); + KUNIT_EXPECT_EQ(test, 0x38, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_READ_BLOCK_REQUEST, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc1, src_id); + KUNIT_EXPECT_EQ(test, 0xfffff0000400, offset); + KUNIT_EXPECT_EQ(test, 0x0020, data_length); + KUNIT_EXPECT_EQ(test, 0x0000, extended_tcode); + + serialize_async_header_block_request(header, dst_id, tlabel, retry, tcode, priority, src_id, + offset, data_length, extended_tcode); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_async_header_read_block_response(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc1e170, + 0xffc00000, + 0x00000000, + 0x00200000, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + unsigned int rcode; + unsigned int data_length; + unsigned int extended_tcode; + + deserialize_async_header_block_response(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &rcode, &data_length, + &extended_tcode); + + KUNIT_EXPECT_EQ(test, 0xffc1, dst_id); + KUNIT_EXPECT_EQ(test, 0x38, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_READ_BLOCK_RESPONSE, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc0, src_id); + KUNIT_EXPECT_EQ(test, RCODE_COMPLETE, rcode); + KUNIT_EXPECT_EQ(test, 0x0020, data_length); + KUNIT_EXPECT_EQ(test, 0x0000, extended_tcode); + + serialize_async_header_block_response(header, dst_id, tlabel, retry, tcode, priority, + src_id, rcode, data_length, extended_tcode); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_async_header_lock_request(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc02d90, + 0xffc1ffff, + 0xf0000984, + 0x00080002, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + u64 offset; + unsigned int data_length; + unsigned int extended_tcode; + + deserialize_async_header_block_request(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &offset, &data_length, + &extended_tcode); + + KUNIT_EXPECT_EQ(test, 0xffc0, dst_id); + KUNIT_EXPECT_EQ(test, 0x0b, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_LOCK_REQUEST, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc1, src_id); + KUNIT_EXPECT_EQ(test, 0xfffff0000984, offset); + KUNIT_EXPECT_EQ(test, 0x0008, data_length); + KUNIT_EXPECT_EQ(test, EXTCODE_COMPARE_SWAP, extended_tcode); + + serialize_async_header_block_request(header, dst_id, tlabel, retry, tcode, priority, src_id, + offset, data_length, extended_tcode); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_async_header_lock_response(struct kunit *test) +{ + static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = { + 0xffc12db0, + 0xffc00000, + 0x00000000, + 0x00040002, + }; + u32 header[ASYNC_HEADER_QUADLET_COUNT] = {0, 0, 0, 0}; + + unsigned int dst_id; + unsigned int tlabel; + unsigned int retry; + unsigned int tcode; + unsigned int priority; + unsigned int src_id; + unsigned int rcode; + unsigned int data_length; + unsigned int extended_tcode; + + deserialize_async_header_block_response(expected, &dst_id, &tlabel, &retry, &tcode, + &priority, &src_id, &rcode, &data_length, + &extended_tcode); + + KUNIT_EXPECT_EQ(test, 0xffc1, dst_id); + KUNIT_EXPECT_EQ(test, 0x0b, tlabel); + KUNIT_EXPECT_EQ(test, 0x01, retry); + KUNIT_EXPECT_EQ(test, TCODE_LOCK_RESPONSE, tcode); + KUNIT_EXPECT_EQ(test, 0x00, priority); + KUNIT_EXPECT_EQ(test, 0xffc0, src_id); + KUNIT_EXPECT_EQ(test, RCODE_COMPLETE, rcode); + KUNIT_EXPECT_EQ(test, 0x0004, data_length); + KUNIT_EXPECT_EQ(test, EXTCODE_COMPARE_SWAP, extended_tcode); + + serialize_async_header_block_response(header, dst_id, tlabel, retry, tcode, priority, + src_id, rcode, data_length, extended_tcode); + + KUNIT_EXPECT_MEMEQ(test, header, expected, sizeof(expected)); +} + +static void test_isoc_header(struct kunit *test) +{ + const u32 expected = 0x00d08dec; + u32 header = 0; + + unsigned int data_length; + unsigned int tag; + unsigned int channel; + unsigned int tcode; + unsigned int sy; + + deserialize_isoc_header(expected, &data_length, &tag, &channel, &tcode, &sy); + + KUNIT_EXPECT_EQ(test, 0xd0, data_length); + KUNIT_EXPECT_EQ(test, 0x02, tag); + KUNIT_EXPECT_EQ(test, 0x0d, channel); + KUNIT_EXPECT_EQ(test, 0x0e, tcode); + KUNIT_EXPECT_EQ(test, 0x0c, sy); + + serialize_isoc_header(&header, data_length, tag, channel, tcode, sy); + + KUNIT_EXPECT_EQ(test, header, expected); +} + +static struct kunit_case packet_serdes_test_cases[] = { + KUNIT_CASE(test_async_header_write_quadlet_request), + KUNIT_CASE(test_async_header_write_block_request), + KUNIT_CASE(test_async_header_write_response), + KUNIT_CASE(test_async_header_read_quadlet_request), + KUNIT_CASE(test_async_header_read_quadlet_response), + KUNIT_CASE(test_async_header_read_block_request), + KUNIT_CASE(test_async_header_read_block_response), + KUNIT_CASE(test_async_header_lock_request), + KUNIT_CASE(test_async_header_lock_response), + KUNIT_CASE(test_isoc_header), + {} +}; + +static struct kunit_suite packet_serdes_test_suite = { + .name = "firewire-packet-serdes", + .test_cases = packet_serdes_test_cases, +}; +kunit_test_suite(packet_serdes_test_suite); + +MODULE_DESCRIPTION("FireWire packet serialization/deserialization unit test suite"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index e779d86602..827dee0f57 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1500,19 +1500,14 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) sdev->allow_restart = 1; - /* - * SBP-2 does not require any alignment, but we set it anyway - * for compatibility with earlier versions of this driver. - */ - blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1); - if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36) sdev->inquiry_len = 36; return 0; } -static int sbp2_scsi_slave_configure(struct scsi_device *sdev) +static int sbp2_scsi_device_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct sbp2_logical_unit *lu = sdev->hostdata; @@ -1538,7 +1533,7 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev) sdev->start_stop_pwr_cond = 1; if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) - blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512); + lim->max_hw_sectors = 128 * 1024 / 512; return 0; } @@ -1596,7 +1591,7 @@ static const struct scsi_host_template scsi_driver_template = { .proc_name = "sbp2", .queuecommand = sbp2_scsi_queuecommand, .slave_alloc = sbp2_scsi_slave_alloc, - .slave_configure = sbp2_scsi_slave_configure, + .device_configure = sbp2_scsi_device_configure, .eh_abort_handler = sbp2_scsi_abort, .this_id = -1, .sg_tablesize = SG_ALL, diff --git a/drivers/firewire/uapi-test.c b/drivers/firewire/uapi-test.c index 2fcbede4fa..bc3f10a2e5 100644 --- a/drivers/firewire/uapi-test.c +++ b/drivers/firewire/uapi-test.c @@ -86,4 +86,5 @@ static struct kunit_suite structure_layout_test_suite = { }; kunit_test_suite(structure_layout_test_suite); +MODULE_DESCRIPTION("FireWire UAPI unit test suite"); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 9bc2e10381..1609247cfa 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -101,11 +101,12 @@ struct ffa_drv_info { bool bitmap_created; bool notif_enabled; unsigned int sched_recv_irq; + unsigned int notif_pend_irq; unsigned int cpuhp_state; struct ffa_pcpu_irq __percpu *irq_pcpu; struct workqueue_struct *notif_pcpu_wq; struct work_struct notif_pcpu_work; - struct work_struct irq_work; + struct work_struct sched_recv_irq_work; struct xarray partition_info; DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS)); struct mutex notify_lock; /* lock to protect notifier hashtable */ @@ -344,6 +345,38 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit, return -EINVAL; } +static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz) +{ + u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id); + struct ffa_indirect_msg_hdr *msg; + ffa_value_t ret; + int retval = 0; + + if (sz > (RXTX_BUFFER_SIZE - sizeof(*msg))) + return -ERANGE; + + mutex_lock(&drv_info->tx_lock); + + msg = drv_info->tx_buffer; + msg->flags = 0; + msg->res0 = 0; + msg->offset = sizeof(*msg); + msg->send_recv_id = src_dst_ids; + msg->size = sz; + memcpy((u8 *)msg + msg->offset, buf, sz); + + /* flags = 0, sender VMID = 0 works for both physical/virtual NS */ + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_MSG_SEND2, .a1 = 0, .a2 = 0 + }, &ret); + + if (ret.a0 == FFA_ERROR) + retval = ffa_to_linux_errno((int)ret.a2); + + mutex_unlock(&drv_info->tx_lock); + return retval; +} + static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len, u32 len, u64 *handle) { @@ -870,6 +903,11 @@ static int ffa_sync_send_receive(struct ffa_device *dev, dev->mode_32bit, data); } +static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz) +{ + return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz); +} + static int ffa_memory_share(struct ffa_mem_ops_args *args) { if (drv_info->mem_ops_native) @@ -1108,7 +1146,7 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type) } } -static void notif_pcpu_irq_work_fn(struct work_struct *work) +static void notif_get_and_handle(void *unused) { int rc; struct ffa_notify_bitmaps bitmaps; @@ -1131,10 +1169,17 @@ ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data) struct ffa_drv_info *info = cb_data; if (!is_per_vcpu) - notif_pcpu_irq_work_fn(&info->notif_pcpu_work); + notif_get_and_handle(info); else - queue_work_on(vcpu, info->notif_pcpu_wq, - &info->notif_pcpu_work); + smp_call_function_single(vcpu, notif_get_and_handle, info, 0); +} + +static void notif_pcpu_irq_work_fn(struct work_struct *work) +{ + struct ffa_drv_info *info = container_of(work, struct ffa_drv_info, + notif_pcpu_work); + + ffa_self_notif_handle(smp_processor_id(), true, info); } static const struct ffa_info_ops ffa_drv_info_ops = { @@ -1145,6 +1190,7 @@ static const struct ffa_info_ops ffa_drv_info_ops = { static const struct ffa_msg_ops ffa_drv_msg_ops = { .mode_32bit_set = ffa_mode_32bit_set, .sync_send_receive = ffa_sync_send_receive, + .indirect_send = ffa_indirect_msg_send, }; static const struct ffa_mem_ops ffa_drv_mem_ops = { @@ -1227,6 +1273,8 @@ static int ffa_setup_partitions(void) continue; } + ffa_dev->properties = tpbuf->properties; + if (drv_info->version > FFA_VERSION_1_0 && !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC)) ffa_mode_32bit_set(ffa_dev); @@ -1291,12 +1339,23 @@ static void ffa_partitions_cleanup(void) #define FFA_FEAT_SCHEDULE_RECEIVER_INT (2) #define FFA_FEAT_MANAGED_EXIT_INT (3) -static irqreturn_t irq_handler(int irq, void *irq_data) +static irqreturn_t ffa_sched_recv_irq_handler(int irq, void *irq_data) +{ + struct ffa_pcpu_irq *pcpu = irq_data; + struct ffa_drv_info *info = pcpu->info; + + queue_work(info->notif_pcpu_wq, &info->sched_recv_irq_work); + + return IRQ_HANDLED; +} + +static irqreturn_t notif_pend_irq_handler(int irq, void *irq_data) { struct ffa_pcpu_irq *pcpu = irq_data; struct ffa_drv_info *info = pcpu->info; - queue_work(info->notif_pcpu_wq, &info->irq_work); + queue_work_on(smp_processor_id(), info->notif_pcpu_wq, + &info->notif_pcpu_work); return IRQ_HANDLED; } @@ -1306,15 +1365,23 @@ static void ffa_sched_recv_irq_work_fn(struct work_struct *work) ffa_notification_info_get(); } -static int ffa_sched_recv_irq_map(void) +static int ffa_irq_map(u32 id) { - int ret, irq, sr_intid; + char *err_str; + int ret, irq, intid; - /* The returned sr_intid is assumed to be SGI donated to NS world */ - ret = ffa_features(FFA_FEAT_SCHEDULE_RECEIVER_INT, 0, &sr_intid, NULL); + if (id == FFA_FEAT_NOTIFICATION_PENDING_INT) + err_str = "Notification Pending Interrupt"; + else if (id == FFA_FEAT_SCHEDULE_RECEIVER_INT) + err_str = "Schedule Receiver Interrupt"; + else + err_str = "Unknown ID"; + + /* The returned intid is assumed to be SGI donated to NS world */ + ret = ffa_features(id, 0, &intid, NULL); if (ret < 0) { if (ret != -EOPNOTSUPP) - pr_err("Failed to retrieve scheduler Rx interrupt\n"); + pr_err("Failed to retrieve FF-A %s %u\n", err_str, id); return ret; } @@ -1329,12 +1396,12 @@ static int ffa_sched_recv_irq_map(void) oirq.np = gic; oirq.args_count = 1; - oirq.args[0] = sr_intid; + oirq.args[0] = intid; irq = irq_create_of_mapping(&oirq); of_node_put(gic); #ifdef CONFIG_ACPI } else { - irq = acpi_register_gsi(NULL, sr_intid, ACPI_EDGE_SENSITIVE, + irq = acpi_register_gsi(NULL, intid, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH); #endif } @@ -1347,23 +1414,28 @@ static int ffa_sched_recv_irq_map(void) return irq; } -static void ffa_sched_recv_irq_unmap(void) +static void ffa_irq_unmap(unsigned int irq) { - if (drv_info->sched_recv_irq) { - irq_dispose_mapping(drv_info->sched_recv_irq); - drv_info->sched_recv_irq = 0; - } + if (!irq) + return; + irq_dispose_mapping(irq); } static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu) { - enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE); + if (drv_info->sched_recv_irq) + enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE); + if (drv_info->notif_pend_irq) + enable_percpu_irq(drv_info->notif_pend_irq, IRQ_TYPE_NONE); return 0; } static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu) { - disable_percpu_irq(drv_info->sched_recv_irq); + if (drv_info->sched_recv_irq) + disable_percpu_irq(drv_info->sched_recv_irq); + if (drv_info->notif_pend_irq) + disable_percpu_irq(drv_info->notif_pend_irq); return 0; } @@ -1382,13 +1454,16 @@ static void ffa_uninit_pcpu_irq(void) if (drv_info->sched_recv_irq) free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu); + if (drv_info->notif_pend_irq) + free_percpu_irq(drv_info->notif_pend_irq, drv_info->irq_pcpu); + if (drv_info->irq_pcpu) { free_percpu(drv_info->irq_pcpu); drv_info->irq_pcpu = NULL; } } -static int ffa_init_pcpu_irq(unsigned int irq) +static int ffa_init_pcpu_irq(void) { struct ffa_pcpu_irq __percpu *irq_pcpu; int ret, cpu; @@ -1402,13 +1477,31 @@ static int ffa_init_pcpu_irq(unsigned int irq) drv_info->irq_pcpu = irq_pcpu; - ret = request_percpu_irq(irq, irq_handler, "ARM-FFA", irq_pcpu); - if (ret) { - pr_err("Error registering notification IRQ %d: %d\n", irq, ret); - return ret; + if (drv_info->sched_recv_irq) { + ret = request_percpu_irq(drv_info->sched_recv_irq, + ffa_sched_recv_irq_handler, + "ARM-FFA-SRI", irq_pcpu); + if (ret) { + pr_err("Error registering percpu SRI nIRQ %d : %d\n", + drv_info->sched_recv_irq, ret); + drv_info->sched_recv_irq = 0; + return ret; + } } - INIT_WORK(&drv_info->irq_work, ffa_sched_recv_irq_work_fn); + if (drv_info->notif_pend_irq) { + ret = request_percpu_irq(drv_info->notif_pend_irq, + notif_pend_irq_handler, + "ARM-FFA-NPI", irq_pcpu); + if (ret) { + pr_err("Error registering percpu NPI nIRQ %d : %d\n", + drv_info->notif_pend_irq, ret); + drv_info->notif_pend_irq = 0; + return ret; + } + } + + INIT_WORK(&drv_info->sched_recv_irq_work, ffa_sched_recv_irq_work_fn); INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn); drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification"); if (!drv_info->notif_pcpu_wq) @@ -1428,7 +1521,10 @@ static int ffa_init_pcpu_irq(unsigned int irq) static void ffa_notifications_cleanup(void) { ffa_uninit_pcpu_irq(); - ffa_sched_recv_irq_unmap(); + ffa_irq_unmap(drv_info->sched_recv_irq); + drv_info->sched_recv_irq = 0; + ffa_irq_unmap(drv_info->notif_pend_irq); + drv_info->notif_pend_irq = 0; if (drv_info->bitmap_created) { ffa_notification_bitmap_destroy(); @@ -1439,30 +1535,31 @@ static void ffa_notifications_cleanup(void) static void ffa_notifications_setup(void) { - int ret, irq; + int ret; ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL); - if (ret) { - pr_info("Notifications not supported, continuing with it ..\n"); - return; - } + if (!ret) { + ret = ffa_notification_bitmap_create(); + if (ret) { + pr_err("Notification bitmap create error %d\n", ret); + return; + } - ret = ffa_notification_bitmap_create(); - if (ret) { - pr_info("Notification bitmap create error %d\n", ret); - return; + drv_info->bitmap_created = true; } - drv_info->bitmap_created = true; - irq = ffa_sched_recv_irq_map(); - if (irq <= 0) { - ret = irq; - goto cleanup; - } + ret = ffa_irq_map(FFA_FEAT_SCHEDULE_RECEIVER_INT); + if (ret > 0) + drv_info->sched_recv_irq = ret; + + ret = ffa_irq_map(FFA_FEAT_NOTIFICATION_PENDING_INT); + if (ret > 0) + drv_info->notif_pend_irq = ret; - drv_info->sched_recv_irq = irq; + if (!drv_info->sched_recv_irq && !drv_info->notif_pend_irq) + goto cleanup; - ret = ffa_init_pcpu_irq(irq); + ret = ffa_init_pcpu_irq(); if (ret) goto cleanup; diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index a7bc479651..fd59f58ce8 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -10,7 +10,8 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o -scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o +scmi-protocols-y := base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o +scmi-protocols-y += pinctrl.o scmi-module-objs := $(scmi-driver-y) $(scmi-protocols-y) $(scmi-transport-y) obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-core.o diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 6affbfdd1d..b5ac25dbc1 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -301,6 +301,17 @@ extern const struct scmi_desc scmi_optee_desc; void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv); +enum scmi_bad_msg { + MSG_UNEXPECTED = -1, + MSG_INVALID = -2, + MSG_UNKNOWN = -3, + MSG_NOMEM = -4, + MSG_MBOX_SPURIOUS = -5, +}; + +void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr, + enum scmi_bad_msg err); + /* shmem related declarations */ struct scmi_shared_mem; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 2709598f30..6b6957f474 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "common.h" #include "notify.h" @@ -44,8 +45,7 @@ static DEFINE_IDA(scmi_id); -static DEFINE_IDR(scmi_protocols); -static DEFINE_SPINLOCK(protocol_lock); +static DEFINE_XARRAY(scmi_protocols); /* List of all SCMI devices active in system */ static LIST_HEAD(scmi_list); @@ -194,11 +194,94 @@ struct scmi_info { #define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb) #define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb) -static const struct scmi_protocol *scmi_protocol_get(int protocol_id) +static unsigned long +scmi_vendor_protocol_signature(unsigned int protocol_id, char *vendor_id, + char *sub_vendor_id, u32 impl_ver) { - const struct scmi_protocol *proto; + char *signature, *p; + unsigned long hash = 0; - proto = idr_find(&scmi_protocols, protocol_id); + /* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */ + signature = kasprintf(GFP_KERNEL, "%02X|%s|%s|0x%08X", protocol_id, + vendor_id ?: "", sub_vendor_id ?: "", impl_ver); + if (!signature) + return 0; + + p = signature; + while (*p) + hash = partial_name_hash(tolower(*p++), hash); + hash = end_name_hash(hash); + + kfree(signature); + + return hash; +} + +static unsigned long +scmi_protocol_key_calculate(int protocol_id, char *vendor_id, + char *sub_vendor_id, u32 impl_ver) +{ + if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE) + return protocol_id; + else + return scmi_vendor_protocol_signature(protocol_id, vendor_id, + sub_vendor_id, impl_ver); +} + +static const struct scmi_protocol * +__scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id, + char *sub_vendor_id, u32 impl_ver) +{ + unsigned long key; + struct scmi_protocol *proto = NULL; + + key = scmi_protocol_key_calculate(protocol_id, vendor_id, + sub_vendor_id, impl_ver); + if (key) + proto = xa_load(&scmi_protocols, key); + + return proto; +} + +static const struct scmi_protocol * +scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id, + char *sub_vendor_id, u32 impl_ver) +{ + const struct scmi_protocol *proto = NULL; + + /* Searching for closest match ...*/ + proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id, + sub_vendor_id, impl_ver); + if (proto) + return proto; + + /* Any match just on vendor/sub_vendor ? */ + if (impl_ver) { + proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id, + sub_vendor_id, 0); + if (proto) + return proto; + } + + /* Any match just on the vendor ? */ + if (sub_vendor_id) + proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id, + NULL, 0); + return proto; +} + +static const struct scmi_protocol * +scmi_protocol_get(int protocol_id, struct scmi_revision_info *version) +{ + const struct scmi_protocol *proto = NULL; + + if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE) + proto = xa_load(&scmi_protocols, protocol_id); + else + proto = scmi_vendor_protocol_lookup(protocol_id, + version->vendor_id, + version->sub_vendor_id, + version->impl_ver); if (!proto || !try_module_get(proto->owner)) { pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id); return NULL; @@ -206,21 +289,46 @@ static const struct scmi_protocol *scmi_protocol_get(int protocol_id) pr_debug("Found SCMI Protocol 0x%x\n", protocol_id); + if (protocol_id >= SCMI_PROTOCOL_VENDOR_BASE) + pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n", + protocol_id, proto->vendor_id ?: "", + proto->sub_vendor_id ?: "", proto->impl_ver); + return proto; } -static void scmi_protocol_put(int protocol_id) +static void scmi_protocol_put(const struct scmi_protocol *proto) { - const struct scmi_protocol *proto; - - proto = idr_find(&scmi_protocols, protocol_id); if (proto) module_put(proto->owner); } +static int scmi_vendor_protocol_check(const struct scmi_protocol *proto) +{ + if (!proto->vendor_id) { + pr_err("missing vendor_id for protocol 0x%x\n", proto->id); + return -EINVAL; + } + + if (strlen(proto->vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) { + pr_err("malformed vendor_id for protocol 0x%x\n", proto->id); + return -EINVAL; + } + + if (proto->sub_vendor_id && + strlen(proto->sub_vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) { + pr_err("malformed sub_vendor_id for protocol 0x%x\n", + proto->id); + return -EINVAL; + } + + return 0; +} + int scmi_protocol_register(const struct scmi_protocol *proto) { int ret; + unsigned long key; if (!proto) { pr_err("invalid protocol\n"); @@ -232,12 +340,23 @@ int scmi_protocol_register(const struct scmi_protocol *proto) return -EINVAL; } - spin_lock(&protocol_lock); - ret = idr_alloc(&scmi_protocols, (void *)proto, - proto->id, proto->id + 1, GFP_ATOMIC); - spin_unlock(&protocol_lock); - if (ret != proto->id) { - pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n", + if (proto->id >= SCMI_PROTOCOL_VENDOR_BASE && + scmi_vendor_protocol_check(proto)) + return -EINVAL; + + /* + * Calculate a protocol key to register this protocol with the core; + * key value 0 is considered invalid. + */ + key = scmi_protocol_key_calculate(proto->id, proto->vendor_id, + proto->sub_vendor_id, + proto->impl_ver); + if (!key) + return -EINVAL; + + ret = xa_insert(&scmi_protocols, key, (void *)proto, GFP_KERNEL); + if (ret) { + pr_err("unable to allocate SCMI protocol slot for 0x%x - err %d\n", proto->id, ret); return ret; } @@ -250,9 +369,15 @@ EXPORT_SYMBOL_GPL(scmi_protocol_register); void scmi_protocol_unregister(const struct scmi_protocol *proto) { - spin_lock(&protocol_lock); - idr_remove(&scmi_protocols, proto->id); - spin_unlock(&protocol_lock); + unsigned long key; + + key = scmi_protocol_key_calculate(proto->id, proto->vendor_id, + proto->sub_vendor_id, + proto->impl_ver); + if (!key) + return; + + xa_erase(&scmi_protocols, key); pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id); } @@ -696,6 +821,45 @@ scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id) return xfer ?: ERR_PTR(-EINVAL); } +/** + * scmi_bad_message_trace - A helper to trace weird messages + * + * @cinfo: A reference to the channel descriptor on which the message was + * received + * @msg_hdr: Message header to track + * @err: A specific error code used as a status value in traces. + * + * This helper can be used to trace any kind of weird, incomplete, unexpected, + * timed-out message that arrives and as such, can be traced only referring to + * the header content, since the payload is missing/unreliable. + */ +void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr, + enum scmi_bad_msg err) +{ + char *tag; + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + + switch (MSG_XTRACT_TYPE(msg_hdr)) { + case MSG_TYPE_COMMAND: + tag = "!RESP"; + break; + case MSG_TYPE_DELAYED_RESP: + tag = "!DLYD"; + break; + case MSG_TYPE_NOTIFICATION: + tag = "!NOTI"; + break; + default: + tag = "!UNKN"; + break; + } + + trace_scmi_msg_dump(info->id, cinfo->id, + MSG_XTRACT_PROT_ID(msg_hdr), + MSG_XTRACT_ID(msg_hdr), tag, + MSG_XTRACT_TOKEN(msg_hdr), err, NULL, 0); +} + /** * scmi_msg_response_validate - Validate message type against state of related * xfer @@ -822,6 +986,9 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) "Message for %d type %d is not expected!\n", xfer_id, msg_type); spin_unlock_irqrestore(&minfo->xfer_lock, flags); + + scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED); + return xfer; } refcount_inc(&xfer->users); @@ -846,6 +1013,9 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) dev_err(cinfo->dev, "Invalid message type:%d for %d - HDR:0x%X state:%d\n", msg_type, xfer_id, msg_hdr, xfer->state); + + scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID); + /* On error the refcount incremented above has to be dropped */ __scmi_xfer_put(minfo, xfer); xfer = ERR_PTR(-EINVAL); @@ -882,6 +1052,9 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, if (IS_ERR(xfer)) { dev_err(dev, "failed to get free message slot (%ld)\n", PTR_ERR(xfer)); + + scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM); + scmi_clear_channel(info, cinfo); return; } @@ -1001,6 +1174,7 @@ void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv) break; default: WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type); + scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNKNOWN); break; } } @@ -1488,6 +1662,20 @@ out: return ret; } +/** + * scmi_common_get_max_msg_size - Get maximum message size + * @ph: A protocol handle reference. + * + * Return: Maximum message size for the current protocol. + */ +static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph) +{ + const struct scmi_protocol_instance *pi = ph_to_pi(ph); + struct scmi_info *info = handle_to_scmi_info(pi->handle); + + return info->desc->max_msg_size; +} + /** * struct scmi_iterator - Iterator descriptor * @msg: A reference to the message TX buffer; filled by @prepare_message with @@ -1799,6 +1987,7 @@ static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph, static const struct scmi_proto_helpers_ops helpers_ops = { .extended_name_get = scmi_common_extended_name_get, + .get_max_msg_size = scmi_common_get_max_msg_size, .iter_response_init = scmi_iterator_init, .iter_response_run = scmi_iterator_run, .protocol_msg_check = scmi_protocol_msg_check, @@ -1891,7 +2080,7 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info, /* Protocol specific devres group */ gid = devres_open_group(handle->dev, NULL, GFP_KERNEL); if (!gid) { - scmi_protocol_put(proto->id); + scmi_protocol_put(proto); goto out; } @@ -1955,7 +2144,7 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info, clean: /* Take care to put the protocol module's owner before releasing all */ - scmi_protocol_put(proto->id); + scmi_protocol_put(proto); devres_release_group(handle->dev, gid); out: return ERR_PTR(ret); @@ -1989,7 +2178,7 @@ scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id) const struct scmi_protocol *proto; /* Fails if protocol not registered on bus */ - proto = scmi_protocol_get(protocol_id); + proto = scmi_protocol_get(protocol_id, &info->version); if (proto) pi = scmi_alloc_init_protocol_instance(info, proto); else @@ -2044,7 +2233,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id) idr_remove(&info->protocols, protocol_id); - scmi_protocol_put(protocol_id); + scmi_protocol_put(pi->proto); devres_release_group(handle->dev, gid); dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n", @@ -2491,6 +2680,10 @@ scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node, ret = 0; } + if (ret) + dev_err(info->dev, + "failed to setup channel for protocol:0x%X\n", prot_id); + return ret; } @@ -2760,6 +2953,7 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info) static int scmi_probe(struct platform_device *pdev) { int ret; + char *err_str = "probe failure\n"; struct scmi_handle *handle; const struct scmi_desc *desc; struct scmi_info *info; @@ -2810,27 +3004,37 @@ static int scmi_probe(struct platform_device *pdev) if (desc->ops->link_supplier) { ret = desc->ops->link_supplier(dev); - if (ret) + if (ret) { + err_str = "transport not ready\n"; goto clear_ida; + } } /* Setup all channels described in the DT at first */ ret = scmi_channels_setup(info); - if (ret) + if (ret) { + err_str = "failed to setup channels\n"; goto clear_ida; + } ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb); - if (ret) + if (ret) { + err_str = "failed to register bus notifier\n"; goto clear_txrx_setup; + } ret = blocking_notifier_chain_register(&scmi_requested_devices_nh, &info->dev_req_nb); - if (ret) + if (ret) { + err_str = "failed to register device notifier\n"; goto clear_bus_notifier; + } ret = scmi_xfer_info_init(info); - if (ret) + if (ret) { + err_str = "failed to init xfers pool\n"; goto clear_dev_req_notifier; + } if (scmi_top_dentry) { info->dbg = scmi_debugfs_common_setup(info); @@ -2867,9 +3071,11 @@ static int scmi_probe(struct platform_device *pdev) */ ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE); if (ret) { - dev_err(dev, "unable to communicate with SCMI\n"); - if (coex) + err_str = "unable to communicate with SCMI\n"; + if (coex) { + dev_err(dev, "%s", err_str); return 0; + } goto notification_exit; } @@ -2923,7 +3129,8 @@ clear_txrx_setup: scmi_cleanup_txrx_channels(info); clear_ida: ida_free(&scmi_id, info->id); - return ret; + + return dev_err_probe(dev, ret, "%s", err_str); } static void scmi_remove(struct platform_device *pdev) @@ -3127,6 +3334,7 @@ static int __init scmi_driver_init(void) scmi_voltage_register(); scmi_system_register(); scmi_powercap_register(); + scmi_pinctrl_register(); return platform_driver_register(&scmi_driver); } @@ -3144,6 +3352,7 @@ static void __exit scmi_driver_exit(void) scmi_voltage_unregister(); scmi_system_unregister(); scmi_powercap_unregister(); + scmi_pinctrl_unregister(); scmi_transports_exit(); diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c index b8d470417e..615a3b2ad8 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -56,6 +56,9 @@ static void rx_callback(struct mbox_client *cl, void *m) */ if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) { dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n"); + scmi_bad_message_trace(smbox->cinfo, + shmem_read_header(smbox->shmem), + MSG_MBOX_SPURIOUS); return; } diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c index 27c5253119..e160ecb229 100644 --- a/drivers/firmware/arm_scmi/notify.c +++ b/drivers/firmware/arm_scmi/notify.c @@ -1513,17 +1513,12 @@ static int scmi_devm_notifier_register(struct scmi_device *sdev, static int scmi_devm_notifier_match(struct device *dev, void *res, void *data) { struct scmi_notifier_devres *dres = res; - struct scmi_notifier_devres *xres = data; + struct notifier_block *nb = data; - if (WARN_ON(!dres || !xres)) + if (WARN_ON(!dres || !nb)) return 0; - return dres->proto_id == xres->proto_id && - dres->evt_id == xres->evt_id && - dres->nb == xres->nb && - ((!dres->src_id && !xres->src_id) || - (dres->src_id && xres->src_id && - dres->__src_id == xres->__src_id)); + return dres->nb == nb; } /** @@ -1531,10 +1526,6 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data) * notifier_block for an event * @sdev: A reference to an scmi_device whose embedded struct device is to * be used for devres accounting. - * @proto_id: Protocol ID - * @evt_id: Event ID - * @src_id: Source ID, when NULL register for events coming form ALL possible - * sources * @nb: A standard notifier block to register for the specified event * * Generic devres managed helper to explicitly un-register a notifier_block @@ -1544,25 +1535,12 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data) * Return: 0 on Success */ static int scmi_devm_notifier_unregister(struct scmi_device *sdev, - u8 proto_id, u8 evt_id, - const u32 *src_id, struct notifier_block *nb) { int ret; - struct scmi_notifier_devres dres; - - dres.handle = sdev->handle; - dres.proto_id = proto_id; - dres.evt_id = evt_id; - if (src_id) { - dres.__src_id = *src_id; - dres.src_id = &dres.__src_id; - } else { - dres.src_id = NULL; - } ret = devres_release(&sdev->dev, scmi_devm_release_notifier, - scmi_devm_notifier_match, &dres); + scmi_devm_notifier_match, nb); WARN_ON(ret); diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 345fff167b..4b7f1cbb9b 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -387,8 +387,8 @@ process_response_opp(struct device *dev, struct perf_dom_info *dom, ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL); if (ret) - dev_warn(dev, "Failed to add opps_by_lvl at %d - ret:%d\n", - opp->perf, ret); + dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n", + opp->perf, dom->info.name, ret); } static inline void @@ -405,8 +405,8 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom, ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL); if (ret) - dev_warn(dev, "Failed to add opps_by_lvl at %d - ret:%d\n", - opp->perf, ret); + dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n", + opp->perf, dom->info.name, ret); /* Note that PERF v4 reports always five 32-bit words */ opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq); @@ -417,8 +417,8 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom, GFP_KERNEL); if (ret) dev_warn(dev, - "Failed to add opps_by_idx at %d - ret:%d\n", - opp->level_index, ret); + "Failed to add opps_by_idx at %d for %s - ret:%d\n", + opp->level_index, dom->info.name, ret); hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq); } @@ -879,7 +879,8 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph, ret = dev_pm_opp_add_dynamic(dev, &data); if (ret) { - dev_warn(dev, "failed to add opp %luHz\n", freq); + dev_warn(dev, "[%d][%s]: Failed to add OPP[%d] %lu\n", + domain, dom->info.name, idx, freq); dev_pm_opp_remove_all_dynamic(dev); return ret; } diff --git a/drivers/firmware/arm_scmi/pinctrl.c b/drivers/firmware/arm_scmi/pinctrl.c new file mode 100644 index 0000000000..a2a7f880d6 --- /dev/null +++ b/drivers/firmware/arm_scmi/pinctrl.c @@ -0,0 +1,916 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Pinctrl Protocol + * + * Copyright (C) 2024 EPAM + * Copyright 2024 NXP + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "protocols.h" + +/* Updated only after ALL the mandatory features for that version are merged */ +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000 + +#define GET_GROUPS_NR(x) le32_get_bits((x), GENMASK(31, 16)) +#define GET_PINS_NR(x) le32_get_bits((x), GENMASK(15, 0)) +#define GET_FUNCTIONS_NR(x) le32_get_bits((x), GENMASK(15, 0)) + +#define EXT_NAME_FLAG(x) le32_get_bits((x), BIT(31)) +#define NUM_ELEMS(x) le32_get_bits((x), GENMASK(15, 0)) + +#define REMAINING(x) le32_get_bits((x), GENMASK(31, 16)) +#define RETURNED(x) le32_get_bits((x), GENMASK(11, 0)) + +#define CONFIG_FLAG_MASK GENMASK(19, 18) +#define SELECTOR_MASK GENMASK(17, 16) +#define SKIP_CONFIGS_MASK GENMASK(15, 8) +#define CONFIG_TYPE_MASK GENMASK(7, 0) + +enum scmi_pinctrl_protocol_cmd { + PINCTRL_ATTRIBUTES = 0x3, + PINCTRL_LIST_ASSOCIATIONS = 0x4, + PINCTRL_SETTINGS_GET = 0x5, + PINCTRL_SETTINGS_CONFIGURE = 0x6, + PINCTRL_REQUEST = 0x7, + PINCTRL_RELEASE = 0x8, + PINCTRL_NAME_GET = 0x9, + PINCTRL_SET_PERMISSIONS = 0xa, +}; + +struct scmi_msg_settings_conf { + __le32 identifier; + __le32 function_id; + __le32 attributes; + __le32 configs[]; +}; + +struct scmi_msg_settings_get { + __le32 identifier; + __le32 attributes; +}; + +struct scmi_resp_settings_get { + __le32 function_selected; + __le32 num_configs; + __le32 configs[]; +}; + +struct scmi_msg_pinctrl_protocol_attributes { + __le32 attributes_low; + __le32 attributes_high; +}; + +struct scmi_msg_pinctrl_attributes { + __le32 identifier; + __le32 flags; +}; + +struct scmi_resp_pinctrl_attributes { + __le32 attributes; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; +}; + +struct scmi_msg_pinctrl_list_assoc { + __le32 identifier; + __le32 flags; + __le32 index; +}; + +struct scmi_resp_pinctrl_list_assoc { + __le32 flags; + __le16 array[]; +}; + +struct scmi_msg_request { + __le32 identifier; + __le32 flags; +}; + +struct scmi_group_info { + char name[SCMI_MAX_STR_SIZE]; + bool present; + u32 *group_pins; + u32 nr_pins; +}; + +struct scmi_function_info { + char name[SCMI_MAX_STR_SIZE]; + bool present; + u32 *groups; + u32 nr_groups; +}; + +struct scmi_pin_info { + char name[SCMI_MAX_STR_SIZE]; + bool present; +}; + +struct scmi_pinctrl_info { + u32 version; + int nr_groups; + int nr_functions; + int nr_pins; + struct scmi_group_info *groups; + struct scmi_function_info *functions; + struct scmi_pin_info *pins; +}; + +static int scmi_pinctrl_attributes_get(const struct scmi_protocol_handle *ph, + struct scmi_pinctrl_info *pi) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_pinctrl_protocol_attributes *attr; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + pi->nr_functions = GET_FUNCTIONS_NR(attr->attributes_high); + pi->nr_groups = GET_GROUPS_NR(attr->attributes_low); + pi->nr_pins = GET_PINS_NR(attr->attributes_low); + if (pi->nr_pins == 0) { + dev_warn(ph->dev, "returned zero pins\n"); + ret = -EINVAL; + } + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_pinctrl_count_get(const struct scmi_protocol_handle *ph, + enum scmi_pinctrl_selector_type type) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + switch (type) { + case PIN_TYPE: + return pi->nr_pins; + case GROUP_TYPE: + return pi->nr_groups; + case FUNCTION_TYPE: + return pi->nr_functions; + default: + return -EINVAL; + } +} + +static int scmi_pinctrl_validate_id(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type) +{ + int value; + + value = scmi_pinctrl_count_get(ph, type); + if (value < 0) + return value; + + if (selector >= value || value == 0) + return -EINVAL; + + return 0; +} + +static int scmi_pinctrl_attributes(const struct scmi_protocol_handle *ph, + enum scmi_pinctrl_selector_type type, + u32 selector, char *name, + u32 *n_elems) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_pinctrl_attributes *tx; + struct scmi_resp_pinctrl_attributes *rx; + bool ext_name_flag; + + if (!name) + return -EINVAL; + + ret = scmi_pinctrl_validate_id(ph, selector, type); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, PINCTRL_ATTRIBUTES, sizeof(*tx), + sizeof(*rx), &t); + if (ret) + return ret; + + tx = t->tx.buf; + rx = t->rx.buf; + tx->identifier = cpu_to_le32(selector); + tx->flags = cpu_to_le32(type); + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + if (n_elems) + *n_elems = NUM_ELEMS(rx->attributes); + + strscpy(name, rx->name, SCMI_SHORT_NAME_MAX_SIZE); + + ext_name_flag = !!EXT_NAME_FLAG(rx->attributes); + } + + ph->xops->xfer_put(ph, t); + + if (ret) + return ret; + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (ext_name_flag) + ret = ph->hops->extended_name_get(ph, PINCTRL_NAME_GET, + selector, (u32 *)&type, name, + SCMI_MAX_STR_SIZE); + return ret; +} + +struct scmi_pinctrl_ipriv { + u32 selector; + enum scmi_pinctrl_selector_type type; + u32 *array; +}; + +static void iter_pinctrl_assoc_prepare_message(void *message, + u32 desc_index, + const void *priv) +{ + struct scmi_msg_pinctrl_list_assoc *msg = message; + const struct scmi_pinctrl_ipriv *p = priv; + + msg->identifier = cpu_to_le32(p->selector); + msg->flags = cpu_to_le32(p->type); + msg->index = cpu_to_le32(desc_index); +} + +static int iter_pinctrl_assoc_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + const struct scmi_resp_pinctrl_list_assoc *r = response; + + st->num_returned = RETURNED(r->flags); + st->num_remaining = REMAINING(r->flags); + + return 0; +} + +static int +iter_pinctrl_assoc_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) +{ + const struct scmi_resp_pinctrl_list_assoc *r = response; + struct scmi_pinctrl_ipriv *p = priv; + + p->array[st->desc_index + st->loop_idx] = + le16_to_cpu(r->array[st->loop_idx]); + + return 0; +} + +static int scmi_pinctrl_list_associations(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + u16 size, u32 *array) +{ + int ret; + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_pinctrl_assoc_prepare_message, + .update_state = iter_pinctrl_assoc_update_state, + .process_response = iter_pinctrl_assoc_process_response, + }; + struct scmi_pinctrl_ipriv ipriv = { + .selector = selector, + .type = type, + .array = array, + }; + + if (!array || !size || type == PIN_TYPE) + return -EINVAL; + + ret = scmi_pinctrl_validate_id(ph, selector, type); + if (ret) + return ret; + + iter = ph->hops->iter_response_init(ph, &ops, size, + PINCTRL_LIST_ASSOCIATIONS, + sizeof(struct scmi_msg_pinctrl_list_assoc), + &ipriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + return ph->hops->iter_response_run(iter); +} + +struct scmi_settings_get_ipriv { + u32 selector; + enum scmi_pinctrl_selector_type type; + bool get_all; + unsigned int *nr_configs; + enum scmi_pinctrl_conf_type *config_types; + u32 *config_values; +}; + +static void +iter_pinctrl_settings_get_prepare_message(void *message, u32 desc_index, + const void *priv) +{ + struct scmi_msg_settings_get *msg = message; + const struct scmi_settings_get_ipriv *p = priv; + u32 attributes; + + attributes = FIELD_PREP(SELECTOR_MASK, p->type); + + if (p->get_all) { + attributes |= FIELD_PREP(CONFIG_FLAG_MASK, 1) | + FIELD_PREP(SKIP_CONFIGS_MASK, desc_index); + } else { + attributes |= FIELD_PREP(CONFIG_TYPE_MASK, p->config_types[0]); + } + + msg->attributes = cpu_to_le32(attributes); + msg->identifier = cpu_to_le32(p->selector); +} + +static int +iter_pinctrl_settings_get_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + const struct scmi_resp_settings_get *r = response; + struct scmi_settings_get_ipriv *p = priv; + + if (p->get_all) { + st->num_returned = le32_get_bits(r->num_configs, GENMASK(7, 0)); + st->num_remaining = le32_get_bits(r->num_configs, GENMASK(31, 24)); + } else { + st->num_returned = 1; + st->num_remaining = 0; + } + + return 0; +} + +static int +iter_pinctrl_settings_get_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, + void *priv) +{ + const struct scmi_resp_settings_get *r = response; + struct scmi_settings_get_ipriv *p = priv; + u32 type = le32_get_bits(r->configs[st->loop_idx * 2], GENMASK(7, 0)); + u32 val = le32_to_cpu(r->configs[st->loop_idx * 2 + 1]); + + if (p->get_all) { + p->config_types[st->desc_index + st->loop_idx] = type; + } else { + if (p->config_types[0] != type) + return -EINVAL; + } + + p->config_values[st->desc_index + st->loop_idx] = val; + ++*p->nr_configs; + + return 0; +} + +static int +scmi_pinctrl_settings_get(const struct scmi_protocol_handle *ph, u32 selector, + enum scmi_pinctrl_selector_type type, + unsigned int *nr_configs, + enum scmi_pinctrl_conf_type *config_types, + u32 *config_values) +{ + int ret; + void *iter; + unsigned int max_configs = *nr_configs; + struct scmi_iterator_ops ops = { + .prepare_message = iter_pinctrl_settings_get_prepare_message, + .update_state = iter_pinctrl_settings_get_update_state, + .process_response = iter_pinctrl_settings_get_process_response, + }; + struct scmi_settings_get_ipriv ipriv = { + .selector = selector, + .type = type, + .get_all = (max_configs > 1), + .nr_configs = nr_configs, + .config_types = config_types, + .config_values = config_values, + }; + + if (!config_types || !config_values || type == FUNCTION_TYPE) + return -EINVAL; + + ret = scmi_pinctrl_validate_id(ph, selector, type); + if (ret) + return ret; + + /* Prepare to count returned configs */ + *nr_configs = 0; + iter = ph->hops->iter_response_init(ph, &ops, max_configs, + PINCTRL_SETTINGS_GET, + sizeof(struct scmi_msg_settings_get), + &ipriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + return ph->hops->iter_response_run(iter); +} + +static int scmi_pinctrl_settings_get_one(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + enum scmi_pinctrl_conf_type config_type, + u32 *config_value) +{ + unsigned int nr_configs = 1; + + return scmi_pinctrl_settings_get(ph, selector, type, &nr_configs, + &config_type, config_value); +} + +static int scmi_pinctrl_settings_get_all(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + unsigned int *nr_configs, + enum scmi_pinctrl_conf_type *config_types, + u32 *config_values) +{ + if (!nr_configs || *nr_configs == 0) + return -EINVAL; + + return scmi_pinctrl_settings_get(ph, selector, type, nr_configs, + config_types, config_values); +} + +static int +scmi_pinctrl_settings_conf(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + u32 nr_configs, + enum scmi_pinctrl_conf_type *config_type, + u32 *config_value) +{ + struct scmi_xfer *t; + struct scmi_msg_settings_conf *tx; + u32 attributes; + int ret, i; + u32 configs_in_chunk, conf_num = 0; + u32 chunk; + int max_msg_size = ph->hops->get_max_msg_size(ph); + + if (!config_type || !config_value || type == FUNCTION_TYPE) + return -EINVAL; + + ret = scmi_pinctrl_validate_id(ph, selector, type); + if (ret) + return ret; + + configs_in_chunk = (max_msg_size - sizeof(*tx)) / (sizeof(__le32) * 2); + while (conf_num < nr_configs) { + chunk = (nr_configs - conf_num > configs_in_chunk) ? + configs_in_chunk : nr_configs - conf_num; + + ret = ph->xops->xfer_get_init(ph, PINCTRL_SETTINGS_CONFIGURE, + sizeof(*tx) + + chunk * 2 * sizeof(__le32), 0, &t); + if (ret) + break; + + tx = t->tx.buf; + tx->identifier = cpu_to_le32(selector); + tx->function_id = cpu_to_le32(0xFFFFFFFF); + attributes = FIELD_PREP(GENMASK(1, 0), type) | + FIELD_PREP(GENMASK(9, 2), chunk); + tx->attributes = cpu_to_le32(attributes); + + for (i = 0; i < chunk; i++) { + tx->configs[i * 2] = + cpu_to_le32(config_type[conf_num + i]); + tx->configs[i * 2 + 1] = + cpu_to_le32(config_value[conf_num + i]); + } + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + if (ret) + break; + + conf_num += chunk; + } + + return ret; +} + +static int scmi_pinctrl_function_select(const struct scmi_protocol_handle *ph, + u32 group, + enum scmi_pinctrl_selector_type type, + u32 function_id) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_settings_conf *tx; + u32 attributes; + + ret = scmi_pinctrl_validate_id(ph, group, type); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, PINCTRL_SETTINGS_CONFIGURE, + sizeof(*tx), 0, &t); + if (ret) + return ret; + + tx = t->tx.buf; + tx->identifier = cpu_to_le32(group); + tx->function_id = cpu_to_le32(function_id); + attributes = FIELD_PREP(GENMASK(1, 0), type) | BIT(10); + tx->attributes = cpu_to_le32(attributes); + + ret = ph->xops->do_xfer(ph, t); + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_pinctrl_request_free(const struct scmi_protocol_handle *ph, + u32 identifier, + enum scmi_pinctrl_selector_type type, + enum scmi_pinctrl_protocol_cmd cmd) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_request *tx; + + if (type == FUNCTION_TYPE) + return -EINVAL; + + if (cmd != PINCTRL_REQUEST && cmd != PINCTRL_RELEASE) + return -EINVAL; + + ret = scmi_pinctrl_validate_id(ph, identifier, type); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, cmd, sizeof(*tx), 0, &t); + if (ret) + return ret; + + tx = t->tx.buf; + tx->identifier = cpu_to_le32(identifier); + tx->flags = cpu_to_le32(type); + + ret = ph->xops->do_xfer(ph, t); + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_pinctrl_pin_request(const struct scmi_protocol_handle *ph, + u32 pin) +{ + return scmi_pinctrl_request_free(ph, pin, PIN_TYPE, PINCTRL_REQUEST); +} + +static int scmi_pinctrl_pin_free(const struct scmi_protocol_handle *ph, u32 pin) +{ + return scmi_pinctrl_request_free(ph, pin, PIN_TYPE, PINCTRL_RELEASE); +} + +static int scmi_pinctrl_get_group_info(const struct scmi_protocol_handle *ph, + u32 selector, + struct scmi_group_info *group) +{ + int ret; + + ret = scmi_pinctrl_attributes(ph, GROUP_TYPE, selector, group->name, + &group->nr_pins); + if (ret) + return ret; + + if (!group->nr_pins) { + dev_err(ph->dev, "Group %d has 0 elements", selector); + return -ENODATA; + } + + group->group_pins = kmalloc_array(group->nr_pins, + sizeof(*group->group_pins), + GFP_KERNEL); + if (!group->group_pins) + return -ENOMEM; + + ret = scmi_pinctrl_list_associations(ph, selector, GROUP_TYPE, + group->nr_pins, group->group_pins); + if (ret) { + kfree(group->group_pins); + return ret; + } + + group->present = true; + return 0; +} + +static int scmi_pinctrl_get_group_name(const struct scmi_protocol_handle *ph, + u32 selector, const char **name) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + if (!name) + return -EINVAL; + + if (selector >= pi->nr_groups || pi->nr_groups == 0) + return -EINVAL; + + if (!pi->groups[selector].present) { + int ret; + + ret = scmi_pinctrl_get_group_info(ph, selector, + &pi->groups[selector]); + if (ret) + return ret; + } + + *name = pi->groups[selector].name; + + return 0; +} + +static int scmi_pinctrl_group_pins_get(const struct scmi_protocol_handle *ph, + u32 selector, const u32 **pins, + u32 *nr_pins) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + if (!pins || !nr_pins) + return -EINVAL; + + if (selector >= pi->nr_groups || pi->nr_groups == 0) + return -EINVAL; + + if (!pi->groups[selector].present) { + int ret; + + ret = scmi_pinctrl_get_group_info(ph, selector, + &pi->groups[selector]); + if (ret) + return ret; + } + + *pins = pi->groups[selector].group_pins; + *nr_pins = pi->groups[selector].nr_pins; + + return 0; +} + +static int scmi_pinctrl_get_function_info(const struct scmi_protocol_handle *ph, + u32 selector, + struct scmi_function_info *func) +{ + int ret; + + ret = scmi_pinctrl_attributes(ph, FUNCTION_TYPE, selector, func->name, + &func->nr_groups); + if (ret) + return ret; + + if (!func->nr_groups) { + dev_err(ph->dev, "Function %d has 0 elements", selector); + return -ENODATA; + } + + func->groups = kmalloc_array(func->nr_groups, sizeof(*func->groups), + GFP_KERNEL); + if (!func->groups) + return -ENOMEM; + + ret = scmi_pinctrl_list_associations(ph, selector, FUNCTION_TYPE, + func->nr_groups, func->groups); + if (ret) { + kfree(func->groups); + return ret; + } + + func->present = true; + return 0; +} + +static int scmi_pinctrl_get_function_name(const struct scmi_protocol_handle *ph, + u32 selector, const char **name) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + if (!name) + return -EINVAL; + + if (selector >= pi->nr_functions || pi->nr_functions == 0) + return -EINVAL; + + if (!pi->functions[selector].present) { + int ret; + + ret = scmi_pinctrl_get_function_info(ph, selector, + &pi->functions[selector]); + if (ret) + return ret; + } + + *name = pi->functions[selector].name; + return 0; +} + +static int +scmi_pinctrl_function_groups_get(const struct scmi_protocol_handle *ph, + u32 selector, u32 *nr_groups, + const u32 **groups) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + if (!groups || !nr_groups) + return -EINVAL; + + if (selector >= pi->nr_functions || pi->nr_functions == 0) + return -EINVAL; + + if (!pi->functions[selector].present) { + int ret; + + ret = scmi_pinctrl_get_function_info(ph, selector, + &pi->functions[selector]); + if (ret) + return ret; + } + + *groups = pi->functions[selector].groups; + *nr_groups = pi->functions[selector].nr_groups; + + return 0; +} + +static int scmi_pinctrl_mux_set(const struct scmi_protocol_handle *ph, + u32 selector, u32 group) +{ + return scmi_pinctrl_function_select(ph, group, GROUP_TYPE, selector); +} + +static int scmi_pinctrl_get_pin_info(const struct scmi_protocol_handle *ph, + u32 selector, struct scmi_pin_info *pin) +{ + int ret; + + if (!pin) + return -EINVAL; + + ret = scmi_pinctrl_attributes(ph, PIN_TYPE, selector, pin->name, NULL); + if (ret) + return ret; + + pin->present = true; + return 0; +} + +static int scmi_pinctrl_get_pin_name(const struct scmi_protocol_handle *ph, + u32 selector, const char **name) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + if (!name) + return -EINVAL; + + if (selector >= pi->nr_pins) + return -EINVAL; + + if (!pi->pins[selector].present) { + int ret; + + ret = scmi_pinctrl_get_pin_info(ph, selector, &pi->pins[selector]); + if (ret) + return ret; + } + + *name = pi->pins[selector].name; + + return 0; +} + +static int scmi_pinctrl_name_get(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + const char **name) +{ + switch (type) { + case PIN_TYPE: + return scmi_pinctrl_get_pin_name(ph, selector, name); + case GROUP_TYPE: + return scmi_pinctrl_get_group_name(ph, selector, name); + case FUNCTION_TYPE: + return scmi_pinctrl_get_function_name(ph, selector, name); + default: + return -EINVAL; + } +} + +static const struct scmi_pinctrl_proto_ops pinctrl_proto_ops = { + .count_get = scmi_pinctrl_count_get, + .name_get = scmi_pinctrl_name_get, + .group_pins_get = scmi_pinctrl_group_pins_get, + .function_groups_get = scmi_pinctrl_function_groups_get, + .mux_set = scmi_pinctrl_mux_set, + .settings_get_one = scmi_pinctrl_settings_get_one, + .settings_get_all = scmi_pinctrl_settings_get_all, + .settings_conf = scmi_pinctrl_settings_conf, + .pin_request = scmi_pinctrl_pin_request, + .pin_free = scmi_pinctrl_pin_free, +}; + +static int scmi_pinctrl_protocol_init(const struct scmi_protocol_handle *ph) +{ + int ret; + u32 version; + struct scmi_pinctrl_info *pinfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_dbg(ph->dev, "Pinctrl Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + ret = scmi_pinctrl_attributes_get(ph, pinfo); + if (ret) + return ret; + + pinfo->pins = devm_kcalloc(ph->dev, pinfo->nr_pins, + sizeof(*pinfo->pins), GFP_KERNEL); + if (!pinfo->pins) + return -ENOMEM; + + pinfo->groups = devm_kcalloc(ph->dev, pinfo->nr_groups, + sizeof(*pinfo->groups), GFP_KERNEL); + if (!pinfo->groups) + return -ENOMEM; + + pinfo->functions = devm_kcalloc(ph->dev, pinfo->nr_functions, + sizeof(*pinfo->functions), GFP_KERNEL); + if (!pinfo->functions) + return -ENOMEM; + + pinfo->version = version; + + return ph->set_priv(ph, pinfo, version); +} + +static int scmi_pinctrl_protocol_deinit(const struct scmi_protocol_handle *ph) +{ + int i; + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + /* Free groups_pins allocated in scmi_pinctrl_get_group_info */ + for (i = 0; i < pi->nr_groups; i++) { + if (pi->groups[i].present) { + kfree(pi->groups[i].group_pins); + pi->groups[i].present = false; + } + } + + /* Free groups allocated in scmi_pinctrl_get_function_info */ + for (i = 0; i < pi->nr_functions; i++) { + if (pi->functions[i].present) { + kfree(pi->functions[i].groups); + pi->functions[i].present = false; + } + } + + return 0; +} + +static const struct scmi_protocol scmi_pinctrl = { + .id = SCMI_PROTOCOL_PINCTRL, + .owner = THIS_MODULE, + .instance_init = &scmi_pinctrl_protocol_init, + .instance_deinit = &scmi_pinctrl_protocol_deinit, + .ops = &pinctrl_proto_ops, + .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION, +}; +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(pinctrl, scmi_pinctrl) diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h index 317d3fb326..8e95f53bd7 100644 --- a/drivers/firmware/arm_scmi/protocols.h +++ b/drivers/firmware/arm_scmi/protocols.h @@ -29,6 +29,8 @@ #define PROTOCOL_REV_MAJOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))) #define PROTOCOL_REV_MINOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x)))) +#define SCMI_PROTOCOL_VENDOR_BASE 0x80 + enum scmi_common_cmd { PROTOCOL_VERSION = 0x0, PROTOCOL_ATTRIBUTES = 0x1, @@ -258,6 +260,7 @@ struct scmi_fc_info { * @fastchannel_init: A common helper used to initialize FC descriptors by * gathering FC descriptions from the SCMI platform server. * @fastchannel_db_ring: A common helper to ring a FC doorbell. + * @get_max_msg_size: A common helper to get the maximum message size. */ struct scmi_proto_helpers_ops { int (*extended_name_get)(const struct scmi_protocol_handle *ph, @@ -277,6 +280,7 @@ struct scmi_proto_helpers_ops { struct scmi_fc_db_info **p_db, u32 *rate_limit); void (*fastchannel_db_ring)(struct scmi_fc_db_info *db); + int (*get_max_msg_size)(const struct scmi_protocol_handle *ph); }; /** @@ -323,6 +327,16 @@ typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *); * protocol by the agent. Each protocol implementation * in the agent is supposed to downgrade to match the * protocol version supported by the platform. + * @vendor_id: A firmware vendor string for vendor protocols matching. + * Ignored when @id identifies a standard protocol, cannot be NULL + * otherwise. + * @sub_vendor_id: A firmware sub_vendor string for vendor protocols matching. + * Ignored if NULL or when @id identifies a standard protocol. + * @impl_ver: A firmware implementation version for vendor protocols matching. + * Ignored if zero or if @id identifies a standard protocol. + * + * Note that vendor protocols matching at load time is performed by attempting + * the closest match first against the tuple (vendor, sub_vendor, impl_ver) */ struct scmi_protocol { const u8 id; @@ -332,6 +346,9 @@ struct scmi_protocol { const void *ops; const struct scmi_protocol_events *events; unsigned int supported_version; + char *vendor_id; + char *sub_vendor_id; + u32 impl_ver; }; #define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \ @@ -353,6 +370,7 @@ void __exit scmi_##name##_unregister(void) \ DECLARE_SCMI_REGISTER_UNREGISTER(base); DECLARE_SCMI_REGISTER_UNREGISTER(clock); DECLARE_SCMI_REGISTER_UNREGISTER(perf); +DECLARE_SCMI_REGISTER_UNREGISTER(pinctrl); DECLARE_SCMI_REGISTER_UNREGISTER(power); DECLARE_SCMI_REGISTER_UNREGISTER(reset); DECLARE_SCMI_REGISTER_UNREGISTER(sensors); diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index d68c01cb7a..4892058445 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -908,7 +908,6 @@ static const struct virtio_device_id id_table[] = { static struct virtio_driver virtio_scmi_driver = { .driver.name = "scmi-virtio", - .driver.owner = THIS_MODULE, .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .id_table = id_table, diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index a1da7581ad..8a347b9384 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -819,6 +819,33 @@ int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, } EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_ctrl, FW_CS_DSP); +/** + * cs_dsp_coeff_lock_and_write_ctrl() - Writes the given buffer to the given coefficient control + * @ctl: pointer to coefficient control + * @off: word offset at which data should be written + * @buf: the buffer to write to the given control + * @len: the length of the buffer in bytes + * + * Same as cs_dsp_coeff_write_ctrl() but takes pwr_lock. + * + * Return: A negative number on error, 1 when the control value changed and 0 when it has not. + */ +int cs_dsp_coeff_lock_and_write_ctrl(struct cs_dsp_coeff_ctl *ctl, + unsigned int off, const void *buf, size_t len) +{ + struct cs_dsp *dsp = ctl->dsp; + int ret; + + lockdep_assert_not_held(&dsp->pwr_lock); + + mutex_lock(&dsp->pwr_lock); + ret = cs_dsp_coeff_write_ctrl(ctl, off, buf, len); + mutex_unlock(&dsp->pwr_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(cs_dsp_coeff_lock_and_write_ctrl); + static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, unsigned int off, void *buf, size_t len) { @@ -891,6 +918,33 @@ int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, } EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_read_ctrl, FW_CS_DSP); +/** + * cs_dsp_coeff_lock_and_read_ctrl() - Reads the given coefficient control into the given buffer + * @ctl: pointer to coefficient control + * @off: word offset at which data should be read + * @buf: the buffer to store to the given control + * @len: the length of the buffer in bytes + * + * Same as cs_dsp_coeff_read_ctrl() but takes pwr_lock. + * + * Return: Zero for success, a negative number on error. + */ +int cs_dsp_coeff_lock_and_read_ctrl(struct cs_dsp_coeff_ctl *ctl, + unsigned int off, void *buf, size_t len) +{ + struct cs_dsp *dsp = ctl->dsp; + int ret; + + lockdep_assert_not_held(&dsp->pwr_lock); + + mutex_lock(&dsp->pwr_lock); + ret = cs_dsp_coeff_read_ctrl(ctl, off, buf, len); + mutex_unlock(&dsp->pwr_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(cs_dsp_coeff_lock_and_read_ctrl); + static int cs_dsp_coeff_init_control_caches(struct cs_dsp *dsp) { struct cs_dsp_coeff_ctl *ctl; diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index ac2a5d2d47..23b002e4d4 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -42,6 +42,7 @@ static struct dmi_memdev_info { u8 type; /* DDR2, DDR3, DDR4 etc */ } *dmi_memdev; static int dmi_memdev_nr; +static int dmi_memdev_populated_nr __initdata; static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s) { @@ -459,6 +460,9 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v) else bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20; + if (bytes) + dmi_memdev_populated_nr++; + dmi_memdev[nr].size = bytes; nr++; } @@ -757,16 +761,8 @@ static void __init dmi_scan_machine(void) pr_info("DMI not present or invalid.\n"); } -static ssize_t raw_table_read(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t pos, size_t count) -{ - memcpy(buf, attr->private + pos, count); - return count; -} - -static BIN_ATTR(smbios_entry_point, S_IRUSR, raw_table_read, NULL, 0); -static BIN_ATTR(DMI, S_IRUSR, raw_table_read, NULL, 0); +static BIN_ATTR_SIMPLE_ADMIN_RO(smbios_entry_point); +static BIN_ATTR_SIMPLE_ADMIN_RO(DMI); static int __init dmi_init(void) { @@ -835,6 +831,8 @@ void __init dmi_setup(void) return; dmi_memdev_walk(); + pr_info("DMI: Memory slots populated: %d/%d\n", + dmi_memdev_populated_nr, dmi_memdev_nr); dump_stack_set_arch_desc("%s", dmi_ids_string); } diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 833cbb995d..552c78f5f0 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -136,7 +136,7 @@ static int efi_pstore_read_func(struct pstore_record *record, &size, record->buf); if (status != EFI_SUCCESS) { kfree(record->buf); - return -EIO; + return efi_status_to_err(status); } /* @@ -162,7 +162,15 @@ static ssize_t efi_pstore_read(struct pstore_record *record) efi_status_t status; for (;;) { - varname_size = 1024; + /* + * A small set of old UEFI implementations reject sizes + * above a certain threshold, the lowest seen in the wild + * is 512. + * + * TODO: Commonize with the iteration implementation in + * fs/efivarfs to keep all the quirks in one place. + */ + varname_size = 512; /* * If this is the first read() call in the pstore enumeration, @@ -181,7 +189,7 @@ static ssize_t efi_pstore_read(struct pstore_record *record) return 0; if (status != EFI_SUCCESS) - return -EIO; + return efi_status_to_err(status); /* skip variables that don't concern us */ if (efi_guidcmp(guid, LINUX_EFI_CRASH_GUID)) @@ -219,7 +227,7 @@ static int efi_pstore_write(struct pstore_record *record) record->size, record->psi->buf, true); efivar_unlock(); - return status == EFI_SUCCESS ? 0 : -EIO; + return efi_status_to_err(status); }; static int efi_pstore_erase(struct pstore_record *record) @@ -230,7 +238,7 @@ static int efi_pstore_erase(struct pstore_record *record) PSTORE_EFI_ATTRIBUTES, 0, NULL); if (status != EFI_SUCCESS && status != EFI_NOT_FOUND) - return -EIO; + return efi_status_to_err(status); return 0; } diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 31eb1e287c..06f0428a72 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -56,17 +56,6 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_CFI), $(KBUILD_CFLAGS)) # disable LTO KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS)) -GCOV_PROFILE := n -# Sanitizer runtimes are unavailable and cannot be linked here. -KASAN_SANITIZE := n -KCSAN_SANITIZE := n -KMSAN_SANITIZE := n -UBSAN_SANITIZE := n -OBJECT_FILES_NON_STANDARD := y - -# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. -KCOV_INSTRUMENT := n - lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \ file.o mem.o random.o randomalloc.o pci.o \ skip_spaces.o lib-cmdline.o lib-ctype.o \ diff --git a/drivers/firmware/efi/libstub/screen_info.c b/drivers/firmware/efi/libstub/screen_info.c index a51ec201ca..5d3a1e32d1 100644 --- a/drivers/firmware/efi/libstub/screen_info.c +++ b/drivers/firmware/efi/libstub/screen_info.c @@ -32,6 +32,8 @@ struct screen_info *__alloc_screen_info(void) if (status != EFI_SUCCESS) return NULL; + memset(si, 0, sizeof(*si)); + status = efi_bs_call(install_configuration_table, &screen_info_guid, si); if (status == EFI_SUCCESS) diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 1983fd3bf3..99d39eda51 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -469,11 +469,12 @@ void __noreturn efi_stub_entry(efi_handle_t handle, efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg) { - static struct boot_params boot_params __page_aligned_bss; - struct setup_header *hdr = &boot_params.hdr; efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID; + struct boot_params *boot_params; + struct setup_header *hdr; int options_size = 0; efi_status_t status; + unsigned long alloc; char *cmdline_ptr; if (efi_is_native()) @@ -491,6 +492,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, efi_exit(handle, status); } + status = efi_allocate_pages(PARAM_SIZE, &alloc, ULONG_MAX); + if (status != EFI_SUCCESS) + efi_exit(handle, status); + + boot_params = memset((void *)alloc, 0x0, PARAM_SIZE); + hdr = &boot_params->hdr; + /* Assign the setup_header fields that the kernel actually cares about */ hdr->root_flags = 1; hdr->vid_mode = 0xffff; @@ -500,17 +508,16 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, /* Convert unicode cmdline to ascii */ cmdline_ptr = efi_convert_cmdline(image, &options_size); - if (!cmdline_ptr) - goto fail; + if (!cmdline_ptr) { + efi_free(PARAM_SIZE, alloc); + efi_exit(handle, EFI_OUT_OF_RESOURCES); + } efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr, - &boot_params.ext_cmd_line_ptr); + &boot_params->ext_cmd_line_ptr); - efi_stub_entry(handle, sys_table_arg, &boot_params); + efi_stub_entry(handle, sys_table_arg, boot_params); /* not reached */ - -fail: - efi_exit(handle, status); } static void add_e820ext(struct boot_params *params, diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c index de1a9a1f9f..4fd45d6f69 100644 --- a/drivers/firmware/efi/rci2-table.c +++ b/drivers/firmware/efi/rci2-table.c @@ -40,15 +40,7 @@ static u8 *rci2_base; static u32 rci2_table_len; unsigned long rci2_table_phys __ro_after_init = EFI_INVALID_TABLE_ADDR; -static ssize_t raw_table_read(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t pos, size_t count) -{ - memcpy(buf, attr->private + pos, count); - return count; -} - -static BIN_ATTR(rci2, S_IRUSR, raw_table_read, NULL, 0); +static BIN_ATTR_SIMPLE_ADMIN_RO(rci2); static u16 checksum(void) { diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index 5d56bc40a7..708b777857 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c @@ -213,7 +213,7 @@ extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock); * Calls the appropriate efi_runtime_service() with the appropriate * arguments. */ -static void efi_call_rts(struct work_struct *work) +static void __nocfi efi_call_rts(struct work_struct *work) { const union efi_rts_args *args = efi_rts_work.args; efi_status_t status = EFI_NOT_FOUND; @@ -435,7 +435,7 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, return status; } -static efi_status_t +static efi_status_t __nocfi virt_efi_set_variable_nb(efi_char16_t *name, efi_guid_t *vendor, u32 attr, unsigned long data_size, void *data) { @@ -469,7 +469,7 @@ static efi_status_t virt_efi_query_variable_info(u32 attr, return status; } -static efi_status_t +static efi_status_t __nocfi virt_efi_query_variable_info_nb(u32 attr, u64 *storage_space, u64 *remaining_space, u64 *max_variable_size) { @@ -499,10 +499,9 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count) return status; } -static void virt_efi_reset_system(int reset_type, - efi_status_t status, - unsigned long data_size, - efi_char16_t *data) +static void __nocfi +virt_efi_reset_system(int reset_type, efi_status_t status, + unsigned long data_size, efi_char16_t *data) { if (down_trylock(&efi_runtime_lock)) { pr_warn("failed to invoke the reset_system() runtime service:\n" diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index f654e6f6af..4056ba7f34 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -215,7 +215,7 @@ efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor, if (data_size > 0) { status = check_var_size(nonblocking, attr, - data_size + ucs2_strsize(name, 1024)); + data_size + ucs2_strsize(name, EFI_VAR_NAME_LEN)); if (status != EFI_SUCCESS) return status; } diff --git a/drivers/firmware/google/cbmem.c b/drivers/firmware/google/cbmem.c index c2bffdc352..6f810d720f 100644 --- a/drivers/firmware/google/cbmem.c +++ b/drivers/firmware/google/cbmem.c @@ -124,7 +124,6 @@ static struct coreboot_driver cbmem_entry_driver = { .probe = cbmem_entry_probe, .drv = { .name = "cbmem", - .owner = THIS_MODULE, .dev_groups = dev_groups, }, .id_table = cbmem_ids, diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c index d4b6e581a6..fa7752f6e8 100644 --- a/drivers/firmware/google/coreboot_table.c +++ b/drivers/firmware/google/coreboot_table.c @@ -85,13 +85,15 @@ static void coreboot_device_release(struct device *dev) kfree(device); } -int coreboot_driver_register(struct coreboot_driver *driver) +int __coreboot_driver_register(struct coreboot_driver *driver, + struct module *owner) { driver->drv.bus = &coreboot_bus_type; + driver->drv.owner = owner; return driver_register(&driver->drv); } -EXPORT_SYMBOL(coreboot_driver_register); +EXPORT_SYMBOL(__coreboot_driver_register); void coreboot_driver_unregister(struct coreboot_driver *driver) { diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h index 86427989c5..bb6f0f7299 100644 --- a/drivers/firmware/google/coreboot_table.h +++ b/drivers/firmware/google/coreboot_table.h @@ -97,8 +97,12 @@ struct coreboot_driver { const struct coreboot_device_id *id_table; }; +/* use a macro to avoid include chaining to get THIS_MODULE */ +#define coreboot_driver_register(driver) \ + __coreboot_driver_register(driver, THIS_MODULE) /* Register a driver that uses the data from a coreboot table. */ -int coreboot_driver_register(struct coreboot_driver *driver); +int __coreboot_driver_register(struct coreboot_driver *driver, + struct module *owner); /* Unregister a driver that uses the data from a coreboot table. */ void coreboot_driver_unregister(struct coreboot_driver *driver); diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c index 2ad85052b3..68f4df7e6c 100644 --- a/drivers/firmware/qcom/qcom_scm.c +++ b/drivers/firmware/qcom/qcom_scm.c @@ -4,6 +4,8 @@ */ #include +#include +#include #include #include #include @@ -114,6 +116,10 @@ static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) #define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1) +#define QCOM_DLOAD_MASK GENMASK(5, 4) +#define QCOM_DLOAD_NODUMP 0 +#define QCOM_DLOAD_FULLDUMP 1 + static const char * const qcom_scm_convention_names[] = { [SMC_CONVENTION_UNKNOWN] = "unknown", [SMC_CONVENTION_ARM_32] = "smc arm 32", @@ -163,9 +169,6 @@ static int qcom_scm_bw_enable(void) if (!__scm->path) return 0; - if (IS_ERR(__scm->path)) - return -EINVAL; - mutex_lock(&__scm->scm_bw_lock); if (!__scm->scm_vote_count) { ret = icc_set_bw(__scm->path, 0, UINT_MAX); @@ -183,7 +186,7 @@ err_bw: static void qcom_scm_bw_disable(void) { - if (IS_ERR_OR_NULL(__scm->path)) + if (!__scm->path) return; mutex_lock(&__scm->scm_bw_lock); @@ -496,19 +499,32 @@ static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) return qcom_scm_call_atomic(__scm->dev, &desc, NULL); } +static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val) +{ + unsigned int old; + unsigned int new; + int ret; + + ret = qcom_scm_io_readl(addr, &old); + if (ret) + return ret; + + new = (old & ~mask) | (val & mask); + + return qcom_scm_io_writel(addr, new); +} + static void qcom_scm_set_download_mode(bool enable) { - bool avail; + u32 val = enable ? QCOM_DLOAD_FULLDUMP : QCOM_DLOAD_NODUMP; int ret = 0; - avail = __qcom_scm_is_call_available(__scm->dev, - QCOM_SCM_SVC_BOOT, - QCOM_SCM_BOOT_SET_DLOAD_MODE); - if (avail) { + if (__scm->dload_mode_addr) { + ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK, + FIELD_PREP(QCOM_DLOAD_MASK, val)); + } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT, + QCOM_SCM_BOOT_SET_DLOAD_MODE)) { ret = __qcom_scm_set_dload_mode(__scm->dev, enable); - } else if (__scm->dload_mode_addr) { - ret = qcom_scm_io_writel(__scm->dload_mode_addr, - enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0); } else { dev_err(__scm->dev, "No available mechanism for setting download mode\n"); @@ -557,10 +573,9 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, */ mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, GFP_KERNEL); - if (!mdata_buf) { - dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); + if (!mdata_buf) return -ENOMEM; - } + memcpy(mdata_buf, metadata, size); ret = qcom_scm_clk_enable(); @@ -1631,7 +1646,9 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send); + any potential issues with this, only allow validated machines for now. */ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { + { .compatible = "lenovo,flex-5g" }, { .compatible = "lenovo,thinkpad-x13s", }, + { .compatible = "qcom,sc8180x-primus" }, { } }; @@ -1750,7 +1767,7 @@ int qcom_scm_wait_for_wq_completion(u32 wq_ctx) return 0; } -static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx) +static int qcom_scm_waitq_wakeup(unsigned int wq_ctx) { int ret; @@ -1782,7 +1799,7 @@ static irqreturn_t qcom_scm_irq_handler(int irq, void *data) goto out; } - ret = qcom_scm_waitq_wakeup(scm, wq_ctx); + ret = qcom_scm_waitq_wakeup(wq_ctx); if (ret) goto out; } while (more_pending); diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c index db818f9dcb..d670635914 100644 --- a/drivers/firmware/smccc/smccc.c +++ b/drivers/firmware/smccc/smccc.c @@ -69,6 +69,7 @@ s32 arm_smccc_get_soc_id_revision(void) { return smccc_soc_id_revision; } +EXPORT_SYMBOL_GPL(arm_smccc_get_soc_id_revision); static int __init smccc_devices_init(void) { diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 8b9a2556de..160968301b 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -87,7 +87,6 @@ struct ti_sci_desc { * struct ti_sci_info - Structure representing a TI SCI instance * @dev: Device pointer * @desc: SoC description for this instance - * @nb: Reboot Notifier block * @d: Debugfs file entry * @debug_region: Memory region where the debug message are available * @debug_region_size: Debug region size @@ -103,7 +102,6 @@ struct ti_sci_desc { */ struct ti_sci_info { struct device *dev; - struct notifier_block nb; const struct ti_sci_desc *desc; struct dentry *d; void __iomem *debug_region; @@ -122,7 +120,6 @@ struct ti_sci_info { #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl) #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle) -#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb) #ifdef CONFIG_DEBUG_FS @@ -3254,10 +3251,9 @@ devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, } EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource); -static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, - void *cmd) +static int tisci_reboot_handler(struct sys_off_data *data) { - struct ti_sci_info *info = reboot_to_ti_sci_info(nb); + struct ti_sci_info *info = data->cb_data; const struct ti_sci_handle *handle = &info->handle; ti_sci_cmd_core_reboot(handle); @@ -3303,7 +3299,6 @@ static int ti_sci_probe(struct platform_device *pdev) struct mbox_client *cl; int ret = -EINVAL; int i; - int reboot = 0; u32 h_id; desc = device_get_match_data(dev); @@ -3327,8 +3322,6 @@ static int ti_sci_probe(struct platform_device *pdev) } } - reboot = of_property_read_bool(dev->of_node, - "ti,system-reboot-controller"); INIT_LIST_HEAD(&info->node); minfo = &info->minfo; @@ -3399,15 +3392,10 @@ static int ti_sci_probe(struct platform_device *pdev) ti_sci_setup_ops(info); - if (reboot) { - info->nb.notifier_call = tisci_reboot_handler; - info->nb.priority = 128; - - ret = register_restart_handler(&info->nb); - if (ret) { - dev_err(dev, "reboot registration fail(%d)\n", ret); - goto out; - } + ret = devm_register_restart_handler(dev, tisci_reboot_handler, info); + if (ret) { + dev_err(dev, "reboot registration fail(%d)\n", ret); + goto out; } dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n", diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c index 31d962cdd6..3e7f186d23 100644 --- a/drivers/firmware/turris-mox-rwtm.c +++ b/drivers/firmware/turris-mox-rwtm.c @@ -2,7 +2,7 @@ /* * Turris Mox rWTM firmware driver * - * Copyright (C) 2019 Marek Behún + * Copyright (C) 2019, 2024 Marek Behún */ #include @@ -174,6 +174,9 @@ static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data) struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev); struct armada_37xx_rwtm_rx_msg *msg = data; + if (completion_done(&rwtm->cmd_done)) + return; + rwtm->reply = *msg; complete(&rwtm->cmd_done); } @@ -199,9 +202,8 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) if (ret < 0) return ret; - ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); - if (ret < 0) - return ret; + if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) + return -ETIMEDOUT; ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval); if (ret == -ENODATA) { @@ -235,9 +237,8 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) if (ret < 0) return ret; - ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); - if (ret < 0) - return ret; + if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) + return -ETIMEDOUT; ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval); if (ret == -ENODATA) { @@ -274,9 +275,8 @@ static int check_get_random_support(struct mox_rwtm *rwtm) if (ret < 0) return ret; - ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); - if (ret < 0) - return ret; + if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) + return -ETIMEDOUT; return mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval); } @@ -499,6 +499,7 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, rwtm); mutex_init(&rwtm->busy); + init_completion(&rwtm->cmd_done); rwtm->mbox_client.dev = dev; rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback; @@ -512,8 +513,6 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev) goto remove_files; } - init_completion(&rwtm->cmd_done); - ret = mox_get_board_info(rwtm); if (ret < 0) dev_warn(dev, "Cannot read board information: %i\n", ret); diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index 2f689ac4ba..37b35f58f0 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -64,9 +64,21 @@ config FPGA_MGR_STRATIX10_SOC help FPGA manager driver support for the Intel Stratix10 SoC. +config FPGA_MGR_XILINX_CORE + tristate + +config FPGA_MGR_XILINX_SELECTMAP + tristate "Xilinx Configuration over SelectMAP" + depends on HAS_IOMEM + select FPGA_MGR_XILINX_CORE + help + FPGA manager driver support for Xilinx FPGA configuration + over SelectMAP interface. + config FPGA_MGR_XILINX_SPI tristate "Xilinx Configuration over Slave Serial (SPI)" depends on SPI + select FPGA_MGR_XILINX_CORE help FPGA manager driver support for Xilinx FPGA configuration over slave serial interface. diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile index 352a261262..aeb89bb135 100644 --- a/drivers/fpga/Makefile +++ b/drivers/fpga/Makefile @@ -15,6 +15,8 @@ obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o obj-$(CONFIG_FPGA_MGR_STRATIX10_SOC) += stratix10-soc.o obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o +obj-$(CONFIG_FPGA_MGR_XILINX_CORE) += xilinx-core.o +obj-$(CONFIG_FPGA_MGR_XILINX_SELECTMAP) += xilinx-selectmap.o obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index 4ffb9da537..6b09144324 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -72,7 +72,6 @@ static bool altera_cvp_chkcfg; struct cvp_priv; struct altera_cvp_conf { - struct fpga_manager *mgr; struct pci_dev *pci_dev; void __iomem *map; void (*write_data)(struct altera_cvp_conf *conf, diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c index 740980e7ce..d0ec3539b3 100644 --- a/drivers/fpga/altera-ps-spi.c +++ b/drivers/fpga/altera-ps-spi.c @@ -284,7 +284,6 @@ MODULE_DEVICE_TABLE(spi, altera_ps_spi_ids); static struct spi_driver altera_ps_driver = { .driver = { .name = "altera-ps-spi", - .owner = THIS_MODULE, .of_match_table = of_ef_match, }, .id_table = altera_ps_spi_ids, diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c index c0a75ca360..6b97c07384 100644 --- a/drivers/fpga/dfl-afu-main.c +++ b/drivers/fpga/dfl-afu-main.c @@ -858,8 +858,6 @@ static int afu_dev_init(struct platform_device *pdev) if (!afu) return -ENOMEM; - afu->pdata = pdata; - mutex_lock(&pdata->lock); dfl_fpga_pdata_set_private(pdata, afu); afu_mmio_region_init(pdata); diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h index 674e9772f0..7bef3e300a 100644 --- a/drivers/fpga/dfl-afu.h +++ b/drivers/fpga/dfl-afu.h @@ -67,7 +67,6 @@ struct dfl_afu_dma_region { * @regions: the mmio region linked list of this afu feature device. * @dma_regions: root of dma regions rb tree. * @num_umsgs: num of umsgs. - * @pdata: afu platform device's pdata. */ struct dfl_afu { u64 region_cur_offset; @@ -75,8 +74,6 @@ struct dfl_afu { u8 num_umsgs; struct list_head regions; struct rb_root dma_regions; - - struct dfl_feature_platform_data *pdata; }; /* hold pdata->lock when call __afu_port_enable/disable */ diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c index a2b5da0093..864924f68f 100644 --- a/drivers/fpga/dfl-fme-main.c +++ b/drivers/fpga/dfl-fme-main.c @@ -679,8 +679,6 @@ static int fme_dev_init(struct platform_device *pdev) if (!fme) return -ENOMEM; - fme->pdata = pdata; - mutex_lock(&pdata->lock); dfl_fpga_pdata_set_private(pdata, fme); mutex_unlock(&pdata->lock); diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h index 4195dd6819..a566dbc2b4 100644 --- a/drivers/fpga/dfl-fme.h +++ b/drivers/fpga/dfl-fme.h @@ -24,13 +24,11 @@ * @mgr: FME's FPGA manager platform device. * @region_list: linked list of FME's FPGA regions. * @bridge_list: linked list of FME's FPGA bridges. - * @pdata: fme platform device's pdata. */ struct dfl_fme { struct platform_device *mgr; struct list_head region_list; struct list_head bridge_list; - struct dfl_feature_platform_data *pdata; }; extern const struct dfl_feature_ops fme_pr_mgmt_ops; diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h index 1d724a28f0..5063d73b0d 100644 --- a/drivers/fpga/dfl.h +++ b/drivers/fpga/dfl.h @@ -437,11 +437,6 @@ void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id) return NULL; } -static inline bool is_dfl_feature_present(struct device *dev, u16 id) -{ - return !!dfl_get_feature_ioaddr_by_id(dev, id); -} - static inline struct device *dfl_fpga_pdata_to_parent(struct dfl_feature_platform_data *pdata) { diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c index c0028ae4c5..62c3026613 100644 --- a/drivers/fpga/ice40-spi.c +++ b/drivers/fpga/ice40-spi.c @@ -10,8 +10,8 @@ #include #include +#include #include -#include #include #include @@ -199,7 +199,7 @@ static struct spi_driver ice40_fpga_driver = { .probe = ice40_fpga_probe, .driver = { .name = "ice40spi", - .of_match_table = of_match_ptr(ice40_fpga_of_match), + .of_match_table = ice40_fpga_of_match, }, .id_table = ice40_fpga_spi_ids, }; diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c index 89851b1337..7ac9f9f5af 100644 --- a/drivers/fpga/intel-m10-bmc-sec-update.c +++ b/drivers/fpga/intel-m10-bmc-sec-update.c @@ -529,11 +529,12 @@ static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl, const u8 *data, u32 size) { struct m10bmc_sec *sec = fwl->dd_handle; + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; u32 ret; sec->cancel_request = false; - if (!size || size > M10BMC_STAGING_SIZE) + if (!size || size > csr_map->staging_size) return FW_UPLOAD_ERR_INVALID_SIZE; if (sec->m10bmc->flash_bulk_ops) diff --git a/drivers/fpga/tests/fpga-bridge-test.c b/drivers/fpga/tests/fpga-bridge-test.c index 1d258002cd..2f7a24f238 100644 --- a/drivers/fpga/tests/fpga-bridge-test.c +++ b/drivers/fpga/tests/fpga-bridge-test.c @@ -7,8 +7,8 @@ * Author: Marco Pagani */ +#include #include -#include #include #include #include @@ -19,7 +19,7 @@ struct bridge_stats { struct bridge_ctx { struct fpga_bridge *bridge; - struct platform_device *pdev; + struct device *dev; struct bridge_stats stats; }; @@ -43,30 +43,31 @@ static const struct fpga_bridge_ops fake_bridge_ops = { /** * register_test_bridge() - Register a fake FPGA bridge for testing. * @test: KUnit test context object. + * @dev_name: name of the kunit device to be registered * * Return: Context of the newly registered FPGA bridge. */ -static struct bridge_ctx *register_test_bridge(struct kunit *test) +static struct bridge_ctx *register_test_bridge(struct kunit *test, const char *dev_name) { struct bridge_ctx *ctx; ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - ctx->pdev = platform_device_register_simple("bridge_pdev", PLATFORM_DEVID_AUTO, NULL, 0); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->pdev); + ctx->dev = kunit_device_register(test, dev_name); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev); - ctx->bridge = fpga_bridge_register(&ctx->pdev->dev, "Fake FPGA bridge", &fake_bridge_ops, + ctx->bridge = fpga_bridge_register(ctx->dev, "Fake FPGA bridge", &fake_bridge_ops, &ctx->stats); KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->bridge)); return ctx; } -static void unregister_test_bridge(struct bridge_ctx *ctx) +static void unregister_test_bridge(struct kunit *test, struct bridge_ctx *ctx) { fpga_bridge_unregister(ctx->bridge); - platform_device_unregister(ctx->pdev); + kunit_device_unregister(test, ctx->dev); } static void fpga_bridge_test_get(struct kunit *test) @@ -74,10 +75,10 @@ static void fpga_bridge_test_get(struct kunit *test) struct bridge_ctx *ctx = test->priv; struct fpga_bridge *bridge; - bridge = fpga_bridge_get(&ctx->pdev->dev, NULL); + bridge = fpga_bridge_get(ctx->dev, NULL); KUNIT_EXPECT_PTR_EQ(test, bridge, ctx->bridge); - bridge = fpga_bridge_get(&ctx->pdev->dev, NULL); + bridge = fpga_bridge_get(ctx->dev, NULL); KUNIT_EXPECT_EQ(test, PTR_ERR(bridge), -EBUSY); fpga_bridge_put(ctx->bridge); @@ -105,19 +106,19 @@ static void fpga_bridge_test_get_put_list(struct kunit *test) int ret; ctx_0 = test->priv; - ctx_1 = register_test_bridge(test); + ctx_1 = register_test_bridge(test, "fpga-bridge-test-dev-1"); INIT_LIST_HEAD(&bridge_list); /* Get bridge 0 and add it to the list */ - ret = fpga_bridge_get_to_list(&ctx_0->pdev->dev, NULL, &bridge_list); + ret = fpga_bridge_get_to_list(ctx_0->dev, NULL, &bridge_list); KUNIT_EXPECT_EQ(test, ret, 0); KUNIT_EXPECT_PTR_EQ(test, ctx_0->bridge, list_first_entry_or_null(&bridge_list, struct fpga_bridge, node)); /* Get bridge 1 and add it to the list */ - ret = fpga_bridge_get_to_list(&ctx_1->pdev->dev, NULL, &bridge_list); + ret = fpga_bridge_get_to_list(ctx_1->dev, NULL, &bridge_list); KUNIT_EXPECT_EQ(test, ret, 0); KUNIT_EXPECT_PTR_EQ(test, ctx_1->bridge, @@ -141,19 +142,19 @@ static void fpga_bridge_test_get_put_list(struct kunit *test) KUNIT_EXPECT_TRUE(test, list_empty(&bridge_list)); - unregister_test_bridge(ctx_1); + unregister_test_bridge(test, ctx_1); } static int fpga_bridge_test_init(struct kunit *test) { - test->priv = register_test_bridge(test); + test->priv = register_test_bridge(test, "fpga-bridge-test-dev-0"); return 0; } static void fpga_bridge_test_exit(struct kunit *test) { - unregister_test_bridge(test->priv); + unregister_test_bridge(test, test->priv); } static struct kunit_case fpga_bridge_test_cases[] = { diff --git a/drivers/fpga/tests/fpga-mgr-test.c b/drivers/fpga/tests/fpga-mgr-test.c index 6acec55b60..125b3a4d43 100644 --- a/drivers/fpga/tests/fpga-mgr-test.c +++ b/drivers/fpga/tests/fpga-mgr-test.c @@ -7,8 +7,8 @@ * Author: Marco Pagani */ +#include #include -#include #include #include #include @@ -40,7 +40,7 @@ struct mgr_stats { struct mgr_ctx { struct fpga_image_info *img_info; struct fpga_manager *mgr; - struct platform_device *pdev; + struct device *dev; struct mgr_stats stats; }; @@ -194,7 +194,7 @@ static void fpga_mgr_test_get(struct kunit *test) struct mgr_ctx *ctx = test->priv; struct fpga_manager *mgr; - mgr = fpga_mgr_get(&ctx->pdev->dev); + mgr = fpga_mgr_get(ctx->dev); KUNIT_EXPECT_PTR_EQ(test, mgr, ctx->mgr); fpga_mgr_put(ctx->mgr); @@ -284,14 +284,14 @@ static int fpga_mgr_test_init(struct kunit *test) ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - ctx->pdev = platform_device_register_simple("mgr_pdev", PLATFORM_DEVID_AUTO, NULL, 0); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->pdev); + ctx->dev = kunit_device_register(test, "fpga-manager-test-dev"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev); - ctx->mgr = devm_fpga_mgr_register(&ctx->pdev->dev, "Fake FPGA Manager", &fake_mgr_ops, + ctx->mgr = devm_fpga_mgr_register(ctx->dev, "Fake FPGA Manager", &fake_mgr_ops, &ctx->stats); KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->mgr)); - ctx->img_info = fpga_image_info_alloc(&ctx->pdev->dev); + ctx->img_info = fpga_image_info_alloc(ctx->dev); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->img_info); test->priv = ctx; @@ -304,7 +304,7 @@ static void fpga_mgr_test_exit(struct kunit *test) struct mgr_ctx *ctx = test->priv; fpga_image_info_free(ctx->img_info); - platform_device_unregister(ctx->pdev); + kunit_device_unregister(test, ctx->dev); } static struct kunit_case fpga_mgr_test_cases[] = { diff --git a/drivers/fpga/tests/fpga-region-test.c b/drivers/fpga/tests/fpga-region-test.c index baab07e3fc..bcf0651df2 100644 --- a/drivers/fpga/tests/fpga-region-test.c +++ b/drivers/fpga/tests/fpga-region-test.c @@ -7,12 +7,12 @@ * Author: Marco Pagani */ +#include #include #include #include #include #include -#include #include struct mgr_stats { @@ -26,11 +26,11 @@ struct bridge_stats { struct test_ctx { struct fpga_manager *mgr; - struct platform_device *mgr_pdev; + struct device *mgr_dev; struct fpga_bridge *bridge; - struct platform_device *bridge_pdev; + struct device *bridge_dev; struct fpga_region *region; - struct platform_device *region_pdev; + struct device *region_dev; struct bridge_stats bridge_stats; struct mgr_stats mgr_stats; }; @@ -91,7 +91,7 @@ static void fpga_region_test_class_find(struct kunit *test) struct test_ctx *ctx = test->priv; struct fpga_region *region; - region = fpga_region_class_find(NULL, &ctx->region_pdev->dev, fake_region_match); + region = fpga_region_class_find(NULL, ctx->region_dev, fake_region_match); KUNIT_EXPECT_PTR_EQ(test, region, ctx->region); put_device(®ion->dev); @@ -108,7 +108,7 @@ static void fpga_region_test_program_fpga(struct kunit *test) char img_buf[4]; int ret; - img_info = fpga_image_info_alloc(&ctx->mgr_pdev->dev); + img_info = fpga_image_info_alloc(ctx->mgr_dev); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, img_info); img_info->buf = img_buf; @@ -148,32 +148,30 @@ static int fpga_region_test_init(struct kunit *test) ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - ctx->mgr_pdev = platform_device_register_simple("mgr_pdev", PLATFORM_DEVID_AUTO, NULL, 0); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->mgr_pdev); + ctx->mgr_dev = kunit_device_register(test, "fpga-manager-test-dev"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->mgr_dev); - ctx->mgr = devm_fpga_mgr_register(&ctx->mgr_pdev->dev, "Fake FPGA Manager", &fake_mgr_ops, - &ctx->mgr_stats); + ctx->mgr = devm_fpga_mgr_register(ctx->mgr_dev, "Fake FPGA Manager", + &fake_mgr_ops, &ctx->mgr_stats); KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->mgr)); - ctx->bridge_pdev = platform_device_register_simple("bridge_pdev", PLATFORM_DEVID_AUTO, - NULL, 0); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->bridge_pdev); + ctx->bridge_dev = kunit_device_register(test, "fpga-bridge-test-dev"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->bridge_dev); - ctx->bridge = fpga_bridge_register(&ctx->bridge_pdev->dev, "Fake FPGA Bridge", + ctx->bridge = fpga_bridge_register(ctx->bridge_dev, "Fake FPGA Bridge", &fake_bridge_ops, &ctx->bridge_stats); KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->bridge)); ctx->bridge_stats.enable = true; - ctx->region_pdev = platform_device_register_simple("region_pdev", PLATFORM_DEVID_AUTO, - NULL, 0); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->region_pdev); + ctx->region_dev = kunit_device_register(test, "fpga-region-test-dev"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->region_dev); region_info.mgr = ctx->mgr; region_info.priv = ctx->bridge; region_info.get_bridges = fake_region_get_bridges; - ctx->region = fpga_region_register_full(&ctx->region_pdev->dev, ®ion_info); + ctx->region = fpga_region_register_full(ctx->region_dev, ®ion_info); KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->region)); test->priv = ctx; @@ -186,18 +184,17 @@ static void fpga_region_test_exit(struct kunit *test) struct test_ctx *ctx = test->priv; fpga_region_unregister(ctx->region); - platform_device_unregister(ctx->region_pdev); + kunit_device_unregister(test, ctx->region_dev); fpga_bridge_unregister(ctx->bridge); - platform_device_unregister(ctx->bridge_pdev); + kunit_device_unregister(test, ctx->bridge_dev); - platform_device_unregister(ctx->mgr_pdev); + kunit_device_unregister(test, ctx->mgr_dev); } static struct kunit_case fpga_region_test_cases[] = { KUNIT_CASE(fpga_region_test_class_find), KUNIT_CASE(fpga_region_test_program_fpga), - {} }; diff --git a/drivers/fpga/xilinx-core.c b/drivers/fpga/xilinx-core.c new file mode 100644 index 0000000000..39aeacf2e4 --- /dev/null +++ b/drivers/fpga/xilinx-core.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Common parts of the Xilinx Spartan6 and 7 Series FPGA manager drivers. + * + * Copyright (C) 2017 DENX Software Engineering + * + * Anatolij Gustschin + */ + +#include "xilinx-core.h" + +#include +#include +#include +#include + +static int get_done_gpio(struct fpga_manager *mgr) +{ + struct xilinx_fpga_core *core = mgr->priv; + int ret; + + ret = gpiod_get_value(core->done); + if (ret < 0) + dev_err(&mgr->dev, "Error reading DONE (%d)\n", ret); + + return ret; +} + +static enum fpga_mgr_states xilinx_core_state(struct fpga_manager *mgr) +{ + if (!get_done_gpio(mgr)) + return FPGA_MGR_STATE_RESET; + + return FPGA_MGR_STATE_UNKNOWN; +} + +/** + * wait_for_init_b - wait for the INIT_B pin to have a given state, or wait + * a given delay if the pin is unavailable + * + * @mgr: The FPGA manager object + * @value: Value INIT_B to wait for (1 = asserted = low) + * @alt_udelay: Delay to wait if the INIT_B GPIO is not available + * + * Returns 0 when the INIT_B GPIO reached the given state or -ETIMEDOUT if + * too much time passed waiting for that. If no INIT_B GPIO is available + * then always return 0. + */ +static int wait_for_init_b(struct fpga_manager *mgr, int value, + unsigned long alt_udelay) +{ + struct xilinx_fpga_core *core = mgr->priv; + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + + if (core->init_b) { + while (time_before(jiffies, timeout)) { + int ret = gpiod_get_value(core->init_b); + + if (ret == value) + return 0; + + if (ret < 0) { + dev_err(&mgr->dev, + "Error reading INIT_B (%d)\n", ret); + return ret; + } + + usleep_range(100, 400); + } + + dev_err(&mgr->dev, "Timeout waiting for INIT_B to %s\n", + value ? "assert" : "deassert"); + return -ETIMEDOUT; + } + + udelay(alt_udelay); + + return 0; +} + +static int xilinx_core_write_init(struct fpga_manager *mgr, + struct fpga_image_info *info, const char *buf, + size_t count) +{ + struct xilinx_fpga_core *core = mgr->priv; + int err; + + if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) { + dev_err(&mgr->dev, "Partial reconfiguration not supported\n"); + return -EINVAL; + } + + gpiod_set_value(core->prog_b, 1); + + err = wait_for_init_b(mgr, 1, 1); /* min is 500 ns */ + if (err) { + gpiod_set_value(core->prog_b, 0); + return err; + } + + gpiod_set_value(core->prog_b, 0); + + err = wait_for_init_b(mgr, 0, 0); + if (err) + return err; + + if (get_done_gpio(mgr)) { + dev_err(&mgr->dev, "Unexpected DONE pin state...\n"); + return -EIO; + } + + /* program latency */ + usleep_range(7500, 7600); + return 0; +} + +static int xilinx_core_write(struct fpga_manager *mgr, const char *buf, + size_t count) +{ + struct xilinx_fpga_core *core = mgr->priv; + + return core->write(core, buf, count); +} + +static int xilinx_core_write_complete(struct fpga_manager *mgr, + struct fpga_image_info *info) +{ + struct xilinx_fpga_core *core = mgr->priv; + unsigned long timeout = + jiffies + usecs_to_jiffies(info->config_complete_timeout_us); + bool expired = false; + int done; + int ret; + const char padding[1] = { 0xff }; + + /* + * This loop is carefully written such that if the driver is + * scheduled out for more than 'timeout', we still check for DONE + * before giving up and we apply 8 extra CCLK cycles in all cases. + */ + while (!expired) { + expired = time_after(jiffies, timeout); + + done = get_done_gpio(mgr); + if (done < 0) + return done; + + ret = core->write(core, padding, sizeof(padding)); + if (ret) + return ret; + + if (done) + return 0; + } + + if (core->init_b) { + ret = gpiod_get_value(core->init_b); + + if (ret < 0) { + dev_err(&mgr->dev, "Error reading INIT_B (%d)\n", ret); + return ret; + } + + dev_err(&mgr->dev, + ret ? "CRC error or invalid device\n" : + "Missing sync word or incomplete bitstream\n"); + } else { + dev_err(&mgr->dev, "Timeout after config data transfer\n"); + } + + return -ETIMEDOUT; +} + +static inline struct gpio_desc * +xilinx_core_devm_gpiod_get(struct device *dev, const char *con_id, + const char *legacy_con_id, enum gpiod_flags flags) +{ + struct gpio_desc *desc; + + desc = devm_gpiod_get(dev, con_id, flags); + if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT && + of_device_is_compatible(dev->of_node, "xlnx,fpga-slave-serial")) + desc = devm_gpiod_get(dev, legacy_con_id, flags); + + return desc; +} + +static const struct fpga_manager_ops xilinx_core_ops = { + .state = xilinx_core_state, + .write_init = xilinx_core_write_init, + .write = xilinx_core_write, + .write_complete = xilinx_core_write_complete, +}; + +int xilinx_core_probe(struct xilinx_fpga_core *core) +{ + struct fpga_manager *mgr; + + if (!core || !core->dev || !core->write) + return -EINVAL; + + /* PROGRAM_B is active low */ + core->prog_b = xilinx_core_devm_gpiod_get(core->dev, "prog", "prog_b", + GPIOD_OUT_LOW); + if (IS_ERR(core->prog_b)) + return dev_err_probe(core->dev, PTR_ERR(core->prog_b), + "Failed to get PROGRAM_B gpio\n"); + + core->init_b = xilinx_core_devm_gpiod_get(core->dev, "init", "init-b", + GPIOD_IN); + if (IS_ERR(core->init_b)) + return dev_err_probe(core->dev, PTR_ERR(core->init_b), + "Failed to get INIT_B gpio\n"); + + core->done = devm_gpiod_get(core->dev, "done", GPIOD_IN); + if (IS_ERR(core->done)) + return dev_err_probe(core->dev, PTR_ERR(core->done), + "Failed to get DONE gpio\n"); + + mgr = devm_fpga_mgr_register(core->dev, + "Xilinx Slave Serial FPGA Manager", + &xilinx_core_ops, core); + return PTR_ERR_OR_ZERO(mgr); +} +EXPORT_SYMBOL_GPL(xilinx_core_probe); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Anatolij Gustschin "); +MODULE_DESCRIPTION("Xilinx 7 Series FPGA manager core"); diff --git a/drivers/fpga/xilinx-core.h b/drivers/fpga/xilinx-core.h new file mode 100644 index 0000000000..f02ac67fce --- /dev/null +++ b/drivers/fpga/xilinx-core.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __XILINX_CORE_H +#define __XILINX_CORE_H + +#include + +/** + * struct xilinx_fpga_core - interface between the driver and the core manager + * of Xilinx 7 Series FPGA manager + * @dev: device node + * @write: write callback of the driver + */ +struct xilinx_fpga_core { +/* public: */ + struct device *dev; + int (*write)(struct xilinx_fpga_core *core, const char *buf, + size_t count); +/* private: handled by xilinx-core */ + struct gpio_desc *prog_b; + struct gpio_desc *init_b; + struct gpio_desc *done; +}; + +int xilinx_core_probe(struct xilinx_fpga_core *core); + +#endif /* __XILINX_CORE_H */ diff --git a/drivers/fpga/xilinx-selectmap.c b/drivers/fpga/xilinx-selectmap.c new file mode 100644 index 0000000000..2cd87e7e91 --- /dev/null +++ b/drivers/fpga/xilinx-selectmap.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Xilinx Spartan6 and 7 Series SelectMAP interface driver + * + * (C) 2024 Charles Perry + * + * Manage Xilinx FPGA firmware loaded over the SelectMAP configuration + * interface. + */ + +#include "xilinx-core.h" + +#include +#include +#include +#include +#include +#include + +struct xilinx_selectmap_conf { + struct xilinx_fpga_core core; + void __iomem *base; +}; + +#define to_xilinx_selectmap_conf(obj) \ + container_of(obj, struct xilinx_selectmap_conf, core) + +static int xilinx_selectmap_write(struct xilinx_fpga_core *core, + const char *buf, size_t count) +{ + struct xilinx_selectmap_conf *conf = to_xilinx_selectmap_conf(core); + size_t i; + + for (i = 0; i < count; ++i) + writeb(buf[i], conf->base); + + return 0; +} + +static int xilinx_selectmap_probe(struct platform_device *pdev) +{ + struct xilinx_selectmap_conf *conf; + struct gpio_desc *gpio; + void __iomem *base; + + conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL); + if (!conf) + return -ENOMEM; + + conf->core.dev = &pdev->dev; + conf->core.write = xilinx_selectmap_write; + + base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + if (IS_ERR(base)) + return dev_err_probe(&pdev->dev, PTR_ERR(base), + "ioremap error\n"); + conf->base = base; + + /* CSI_B is active low */ + gpio = devm_gpiod_get_optional(&pdev->dev, "csi", GPIOD_OUT_HIGH); + if (IS_ERR(gpio)) + return dev_err_probe(&pdev->dev, PTR_ERR(gpio), + "Failed to get CSI_B gpio\n"); + + /* RDWR_B is active low */ + gpio = devm_gpiod_get_optional(&pdev->dev, "rdwr", GPIOD_OUT_HIGH); + if (IS_ERR(gpio)) + return dev_err_probe(&pdev->dev, PTR_ERR(gpio), + "Failed to get RDWR_B gpio\n"); + + return xilinx_core_probe(&conf->core); +} + +static const struct of_device_id xlnx_selectmap_of_match[] = { + { .compatible = "xlnx,fpga-xc7s-selectmap", }, // Spartan-7 + { .compatible = "xlnx,fpga-xc7a-selectmap", }, // Artix-7 + { .compatible = "xlnx,fpga-xc7k-selectmap", }, // Kintex-7 + { .compatible = "xlnx,fpga-xc7v-selectmap", }, // Virtex-7 + {}, +}; +MODULE_DEVICE_TABLE(of, xlnx_selectmap_of_match); + +static struct platform_driver xilinx_selectmap_driver = { + .driver = { + .name = "xilinx-selectmap", + .of_match_table = xlnx_selectmap_of_match, + }, + .probe = xilinx_selectmap_probe, +}; + +module_platform_driver(xilinx_selectmap_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Charles Perry "); +MODULE_DESCRIPTION("Load Xilinx FPGA firmware over SelectMap"); diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c index e1a227e7ff..8756504340 100644 --- a/drivers/fpga/xilinx-spi.c +++ b/drivers/fpga/xilinx-spi.c @@ -10,127 +10,17 @@ * the slave serial configuration interface. */ -#include -#include -#include -#include +#include "xilinx-core.h" + #include #include #include #include -#include - -struct xilinx_spi_conf { - struct spi_device *spi; - struct gpio_desc *prog_b; - struct gpio_desc *init_b; - struct gpio_desc *done; -}; - -static int get_done_gpio(struct fpga_manager *mgr) -{ - struct xilinx_spi_conf *conf = mgr->priv; - int ret; - - ret = gpiod_get_value(conf->done); - - if (ret < 0) - dev_err(&mgr->dev, "Error reading DONE (%d)\n", ret); - - return ret; -} - -static enum fpga_mgr_states xilinx_spi_state(struct fpga_manager *mgr) -{ - if (!get_done_gpio(mgr)) - return FPGA_MGR_STATE_RESET; - - return FPGA_MGR_STATE_UNKNOWN; -} - -/** - * wait_for_init_b - wait for the INIT_B pin to have a given state, or wait - * a given delay if the pin is unavailable - * - * @mgr: The FPGA manager object - * @value: Value INIT_B to wait for (1 = asserted = low) - * @alt_udelay: Delay to wait if the INIT_B GPIO is not available - * - * Returns 0 when the INIT_B GPIO reached the given state or -ETIMEDOUT if - * too much time passed waiting for that. If no INIT_B GPIO is available - * then always return 0. - */ -static int wait_for_init_b(struct fpga_manager *mgr, int value, - unsigned long alt_udelay) -{ - struct xilinx_spi_conf *conf = mgr->priv; - unsigned long timeout = jiffies + msecs_to_jiffies(1000); - - if (conf->init_b) { - while (time_before(jiffies, timeout)) { - int ret = gpiod_get_value(conf->init_b); - - if (ret == value) - return 0; - - if (ret < 0) { - dev_err(&mgr->dev, "Error reading INIT_B (%d)\n", ret); - return ret; - } - - usleep_range(100, 400); - } - - dev_err(&mgr->dev, "Timeout waiting for INIT_B to %s\n", - value ? "assert" : "deassert"); - return -ETIMEDOUT; - } - - udelay(alt_udelay); - - return 0; -} - -static int xilinx_spi_write_init(struct fpga_manager *mgr, - struct fpga_image_info *info, - const char *buf, size_t count) -{ - struct xilinx_spi_conf *conf = mgr->priv; - int err; - - if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) { - dev_err(&mgr->dev, "Partial reconfiguration not supported\n"); - return -EINVAL; - } - - gpiod_set_value(conf->prog_b, 1); - - err = wait_for_init_b(mgr, 1, 1); /* min is 500 ns */ - if (err) { - gpiod_set_value(conf->prog_b, 0); - return err; - } - - gpiod_set_value(conf->prog_b, 0); - - err = wait_for_init_b(mgr, 0, 0); - if (err) - return err; - - if (get_done_gpio(mgr)) { - dev_err(&mgr->dev, "Unexpected DONE pin state...\n"); - return -EIO; - } - /* program latency */ - usleep_range(7500, 7600); - return 0; -} - -static int xilinx_spi_write(struct fpga_manager *mgr, const char *buf, +static int xilinx_spi_write(struct xilinx_fpga_core *core, const char *buf, size_t count) { - struct xilinx_spi_conf *conf = mgr->priv; + struct spi_device *spi = to_spi_device(core->dev); const char *fw_data = buf; const char *fw_data_end = fw_data + count; @@ -141,9 +31,9 @@ static int xilinx_spi_write(struct fpga_manager *mgr, const char *buf, remaining = fw_data_end - fw_data; stride = min_t(size_t, remaining, SZ_4K); - ret = spi_write(conf->spi, fw_data, stride); + ret = spi_write(spi, fw_data, stride); if (ret) { - dev_err(&mgr->dev, "SPI error in firmware write: %d\n", + dev_err(core->dev, "SPI error in firmware write: %d\n", ret); return ret; } @@ -153,109 +43,25 @@ static int xilinx_spi_write(struct fpga_manager *mgr, const char *buf, return 0; } -static int xilinx_spi_apply_cclk_cycles(struct xilinx_spi_conf *conf) -{ - struct spi_device *spi = conf->spi; - const u8 din_data[1] = { 0xff }; - int ret; - - ret = spi_write(conf->spi, din_data, sizeof(din_data)); - if (ret) - dev_err(&spi->dev, "applying CCLK cycles failed: %d\n", ret); - - return ret; -} - -static int xilinx_spi_write_complete(struct fpga_manager *mgr, - struct fpga_image_info *info) -{ - struct xilinx_spi_conf *conf = mgr->priv; - unsigned long timeout = jiffies + usecs_to_jiffies(info->config_complete_timeout_us); - bool expired = false; - int done; - int ret; - - /* - * This loop is carefully written such that if the driver is - * scheduled out for more than 'timeout', we still check for DONE - * before giving up and we apply 8 extra CCLK cycles in all cases. - */ - while (!expired) { - expired = time_after(jiffies, timeout); - - done = get_done_gpio(mgr); - if (done < 0) - return done; - - ret = xilinx_spi_apply_cclk_cycles(conf); - if (ret) - return ret; - - if (done) - return 0; - } - - if (conf->init_b) { - ret = gpiod_get_value(conf->init_b); - - if (ret < 0) { - dev_err(&mgr->dev, "Error reading INIT_B (%d)\n", ret); - return ret; - } - - dev_err(&mgr->dev, - ret ? "CRC error or invalid device\n" - : "Missing sync word or incomplete bitstream\n"); - } else { - dev_err(&mgr->dev, "Timeout after config data transfer\n"); - } - - return -ETIMEDOUT; -} - -static const struct fpga_manager_ops xilinx_spi_ops = { - .state = xilinx_spi_state, - .write_init = xilinx_spi_write_init, - .write = xilinx_spi_write, - .write_complete = xilinx_spi_write_complete, -}; - static int xilinx_spi_probe(struct spi_device *spi) { - struct xilinx_spi_conf *conf; - struct fpga_manager *mgr; + struct xilinx_fpga_core *core; - conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL); - if (!conf) + core = devm_kzalloc(&spi->dev, sizeof(*core), GFP_KERNEL); + if (!core) return -ENOMEM; - conf->spi = spi; + core->dev = &spi->dev; + core->write = xilinx_spi_write; - /* PROGRAM_B is active low */ - conf->prog_b = devm_gpiod_get(&spi->dev, "prog_b", GPIOD_OUT_LOW); - if (IS_ERR(conf->prog_b)) - return dev_err_probe(&spi->dev, PTR_ERR(conf->prog_b), - "Failed to get PROGRAM_B gpio\n"); - - conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN); - if (IS_ERR(conf->init_b)) - return dev_err_probe(&spi->dev, PTR_ERR(conf->init_b), - "Failed to get INIT_B gpio\n"); - - conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN); - if (IS_ERR(conf->done)) - return dev_err_probe(&spi->dev, PTR_ERR(conf->done), - "Failed to get DONE gpio\n"); - - mgr = devm_fpga_mgr_register(&spi->dev, - "Xilinx Slave Serial FPGA Manager", - &xilinx_spi_ops, conf); - return PTR_ERR_OR_ZERO(mgr); + return xilinx_core_probe(core); } #ifdef CONFIG_OF static const struct of_device_id xlnx_spi_of_match[] = { - { .compatible = "xlnx,fpga-slave-serial", }, + { + .compatible = "xlnx,fpga-slave-serial", + }, {} }; MODULE_DEVICE_TABLE(of, xlnx_spi_of_match); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index cbfcfefdb5..1c28a48915 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -103,6 +103,15 @@ config GPIO_REGMAP select REGMAP tristate +config GPIO_SWNODE_UNDEFINED + bool + help + This adds a special place holder for software nodes to contain an + undefined GPIO reference, this is primarily used by SPI to allow a + list of GPIO chip selects to mark a certain chip select as being + controlled the SPI device's internal chip select mechanism and not + a GPIO. + # put drivers in the right section, in alphabetical order # This symbol is selected by both I2C and SPI expanders @@ -312,6 +321,24 @@ config GPIO_GENERIC_PLATFORM help Say yes here to support basic platform_device memory-mapped GPIO controllers. +config GPIO_GRANITERAPIDS + tristate "Intel Granite Rapids-D vGPIO support" + depends on X86 || COMPILE_TEST + select GPIOLIB_IRQCHIP + help + Select this to enable virtual GPIO support on platforms with the + following SoCs: + + - Intel Granite Rapids-D + + The driver enables basic GPIO functionality and implements interrupt + support. The virtual GPIO driver controls GPIO lines via a firmware + interface. The physical GPIO pins reside on device that is external + from the main SoC package, such as a BMC or a CPLD. + + To compile this driver as a module, choose M here: the module will + be called gpio-graniterapids. + config GPIO_GRGPIO tristate "Aeroflex Gaisler GRGPIO support" depends on OF_GPIO diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index fdd28c58d8..e2a5301378 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -66,6 +66,7 @@ obj-$(CONFIG_GPIO_FTGPIO010) += gpio-ftgpio010.o obj-$(CONFIG_GPIO_FXL6408) += gpio-fxl6408.o obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o obj-$(CONFIG_GPIO_GPIO_MM) += gpio-gpio-mm.o +obj-$(CONFIG_GPIO_GRANITERAPIDS) += gpio-graniterapids.o obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o obj-$(CONFIG_GPIO_GW_PLD) += gpio-gw-pld.o obj-$(CONFIG_GPIO_HISI) += gpio-hisi.o diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c index a789af4a5c..8dce78ea71 100644 --- a/drivers/gpio/gpio-brcmstb.c +++ b/drivers/gpio/gpio-brcmstb.c @@ -50,7 +50,6 @@ struct brcmstb_gpio_priv { struct irq_domain *irq_domain; struct irq_chip irq_chip; int parent_irq; - int gpio_base; int num_gpios; int parent_wake_irq; }; @@ -92,7 +91,7 @@ brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank) static int brcmstb_gpio_hwirq_to_offset(irq_hw_number_t hwirq, struct brcmstb_gpio_bank *bank) { - return hwirq - (bank->gc.base - bank->parent_priv->gpio_base); + return hwirq - bank->gc.offset; } static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank, @@ -118,7 +117,7 @@ static int brcmstb_gpio_to_irq(struct gpio_chip *gc, unsigned offset) { struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc); /* gc_offset is relative to this gpio_chip; want real offset */ - int hwirq = offset + (gc->base - priv->gpio_base); + int hwirq = offset + gc->offset; if (hwirq >= priv->num_gpios) return -ENXIO; @@ -263,7 +262,7 @@ static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank) { struct brcmstb_gpio_priv *priv = bank->parent_priv; struct irq_domain *domain = priv->irq_domain; - int hwbase = bank->gc.base - priv->gpio_base; + int hwbase = bank->gc.offset; unsigned long status; while ((status = brcmstb_gpio_get_active_irqs(bank))) { @@ -412,7 +411,7 @@ static int brcmstb_gpio_of_xlate(struct gpio_chip *gc, if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells)) return -EINVAL; - offset = gpiospec->args[0] - (gc->base - priv->gpio_base); + offset = gpiospec->args[0] - bank->gc.offset; if (offset >= gc->ngpio || offset < 0) return -EINVAL; @@ -596,8 +595,8 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) const __be32 *p; u32 bank_width; int num_banks = 0; + int num_gpios = 0; int err; - static int gpio_base; unsigned long flags = 0; bool need_wakeup_event = false; @@ -611,7 +610,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) if (IS_ERR(reg_base)) return PTR_ERR(reg_base); - priv->gpio_base = gpio_base; priv->reg_base = reg_base; priv->pdev = pdev; @@ -651,7 +649,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) dev_dbg(dev, "Width 0 found: Empty bank @ %d\n", num_banks); num_banks++; - gpio_base += MAX_GPIO_PER_BANK; + num_gpios += MAX_GPIO_PER_BANK; continue; } @@ -691,12 +689,13 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) err = -ENOMEM; goto fail; } - gc->base = gpio_base; gc->of_gpio_n_cells = 2; gc->of_xlate = brcmstb_gpio_of_xlate; /* not all ngpio lines are valid, will use bank width later */ gc->ngpio = MAX_GPIO_PER_BANK; gc->offset = bank->id * MAX_GPIO_PER_BANK; + gc->request = gpiochip_generic_request; + gc->free = gpiochip_generic_free; if (priv->parent_irq > 0) gc->to_irq = brcmstb_gpio_to_irq; @@ -713,7 +712,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) bank->id); goto fail; } - gpio_base += gc->ngpio; + num_gpios += gc->ngpio; dev_dbg(dev, "bank=%d, base=%d, ngpio=%d, width=%d\n", bank->id, gc->base, gc->ngpio, bank->width); @@ -724,7 +723,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) num_banks++; } - priv->num_gpios = gpio_base - priv->gpio_base; + priv->num_gpios = num_gpios; if (priv->parent_irq > 0) { err = brcmstb_gpio_irq_setup(pdev, priv); if (err) diff --git a/drivers/gpio/gpio-cros-ec.c b/drivers/gpio/gpio-cros-ec.c index 842e1c0604..0c09bb54dc 100644 --- a/drivers/gpio/gpio-cros-ec.c +++ b/drivers/gpio/gpio-cros-ec.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -197,11 +198,18 @@ static int cros_ec_gpio_probe(struct platform_device *pdev) return devm_gpiochip_add_data(dev, gc, cros_ec); } +static const struct platform_device_id cros_ec_gpio_id[] = { + { "cros-ec-gpio", 0 }, + {} +}; +MODULE_DEVICE_TABLE(platform, cros_ec_gpio_id); + static struct platform_driver cros_ec_gpio_driver = { .probe = cros_ec_gpio_probe, .driver = { .name = "cros-ec-gpio", }, + .id_table = cros_ec_gpio_id, }; module_platform_driver(cros_ec_gpio_driver); diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c new file mode 100644 index 0000000000..f2e911a3d2 --- /dev/null +++ b/drivers/gpio/gpio-graniterapids.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel Granite Rapids-D vGPIO driver + * + * Copyright (c) 2024, Intel Corporation. + * + * Author: Aapo Vienamo + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define GNR_NUM_PINS 128 +#define GNR_PINS_PER_REG 32 +#define GNR_NUM_REGS DIV_ROUND_UP(GNR_NUM_PINS, GNR_PINS_PER_REG) + +#define GNR_CFG_BAR 0x00 +#define GNR_CFG_LOCK_OFFSET 0x04 +#define GNR_GPI_STATUS_OFFSET 0x20 +#define GNR_GPI_ENABLE_OFFSET 0x24 + +#define GNR_CFG_DW_RX_MASK GENMASK(25, 22) +#define GNR_CFG_DW_RX_DISABLE FIELD_PREP(GNR_CFG_DW_RX_MASK, 2) +#define GNR_CFG_DW_RX_EDGE FIELD_PREP(GNR_CFG_DW_RX_MASK, 1) +#define GNR_CFG_DW_RX_LEVEL FIELD_PREP(GNR_CFG_DW_RX_MASK, 0) +#define GNR_CFG_DW_RXDIS BIT(4) +#define GNR_CFG_DW_TXDIS BIT(3) +#define GNR_CFG_DW_RXSTATE BIT(1) +#define GNR_CFG_DW_TXSTATE BIT(0) + +/** + * struct gnr_gpio - Intel Granite Rapids-D vGPIO driver state + * @gc: GPIO controller interface + * @reg_base: base address of the GPIO registers + * @ro_bitmap: bitmap of read-only pins + * @lock: guard the registers + * @pad_backup: backup of the register state for suspend + */ +struct gnr_gpio { + struct gpio_chip gc; + void __iomem *reg_base; + DECLARE_BITMAP(ro_bitmap, GNR_NUM_PINS); + raw_spinlock_t lock; + u32 pad_backup[]; +}; + +static void __iomem *gnr_gpio_get_padcfg_addr(const struct gnr_gpio *priv, + unsigned int gpio) +{ + return priv->reg_base + gpio * sizeof(u32); +} + +static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio, + u32 clear_mask, u32 set_mask) +{ + struct gnr_gpio *priv = gpiochip_get_data(gc); + void __iomem *addr = gnr_gpio_get_padcfg_addr(priv, gpio); + u32 dw; + + if (test_bit(gpio, priv->ro_bitmap)) + return -EACCES; + + guard(raw_spinlock_irqsave)(&priv->lock); + + dw = readl(addr); + dw &= ~clear_mask; + dw |= set_mask; + writel(dw, addr); + + return 0; +} + +static int gnr_gpio_get(struct gpio_chip *gc, unsigned int gpio) +{ + const struct gnr_gpio *priv = gpiochip_get_data(gc); + u32 dw; + + dw = readl(gnr_gpio_get_padcfg_addr(priv, gpio)); + + return !!(dw & GNR_CFG_DW_RXSTATE); +} + +static void gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value) +{ + u32 clear = 0; + u32 set = 0; + + if (value) + set = GNR_CFG_DW_TXSTATE; + else + clear = GNR_CFG_DW_TXSTATE; + + gnr_gpio_configure_line(gc, gpio, clear, set); +} + +static int gnr_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio) +{ + struct gnr_gpio *priv = gpiochip_get_data(gc); + u32 dw; + + dw = readl(gnr_gpio_get_padcfg_addr(priv, gpio)); + + if (dw & GNR_CFG_DW_TXDIS) + return GPIO_LINE_DIRECTION_IN; + + return GPIO_LINE_DIRECTION_OUT; +} + +static int gnr_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio) +{ + return gnr_gpio_configure_line(gc, gpio, GNR_CFG_DW_RXDIS, 0); +} + +static int gnr_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio, int value) +{ + u32 clear = GNR_CFG_DW_TXDIS; + u32 set = value ? GNR_CFG_DW_TXSTATE : 0; + + return gnr_gpio_configure_line(gc, gpio, clear, set); +} + +static const struct gpio_chip gnr_gpio_chip = { + .owner = THIS_MODULE, + .get = gnr_gpio_get, + .set = gnr_gpio_set, + .get_direction = gnr_gpio_get_direction, + .direction_input = gnr_gpio_direction_input, + .direction_output = gnr_gpio_direction_output, +}; + +static void __iomem *gnr_gpio_get_reg_addr(const struct gnr_gpio *priv, + unsigned int base, + unsigned int gpio) +{ + return priv->reg_base + base + gpio * sizeof(u32); +} + +static void gnr_gpio_irq_ack(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct gnr_gpio *priv = gpiochip_get_data(gc); + irq_hw_number_t gpio = irqd_to_hwirq(d); + unsigned int reg_idx = gpio / GNR_PINS_PER_REG; + unsigned int bit_idx = gpio % GNR_PINS_PER_REG; + void __iomem *addr = gnr_gpio_get_reg_addr(priv, GNR_GPI_STATUS_OFFSET, reg_idx); + u32 reg; + + guard(raw_spinlock_irqsave)(&priv->lock); + + reg = readl(addr); + reg &= ~BIT(bit_idx); + writel(reg, addr); +} + +static void gnr_gpio_irq_mask_unmask(struct gpio_chip *gc, unsigned long gpio, bool mask) +{ + struct gnr_gpio *priv = gpiochip_get_data(gc); + unsigned int reg_idx = gpio / GNR_PINS_PER_REG; + unsigned int bit_idx = gpio % GNR_PINS_PER_REG; + void __iomem *addr = gnr_gpio_get_reg_addr(priv, GNR_GPI_ENABLE_OFFSET, reg_idx); + u32 reg; + + guard(raw_spinlock_irqsave)(&priv->lock); + + reg = readl(addr); + if (mask) + reg &= ~BIT(bit_idx); + else + reg |= BIT(bit_idx); + writel(reg, addr); +} + +static void gnr_gpio_irq_mask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + gnr_gpio_irq_mask_unmask(gc, hwirq, true); + gpiochip_disable_irq(gc, hwirq); +} + +static void gnr_gpio_irq_unmask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + gpiochip_enable_irq(gc, hwirq); + gnr_gpio_irq_mask_unmask(gc, hwirq, false); +} + +static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t pin = irqd_to_hwirq(d); + u32 mask = GNR_CFG_DW_RX_MASK; + u32 set; + + /* Falling edge and level low triggers not supported by the GPIO controller */ + switch (type) { + case IRQ_TYPE_NONE: + set = GNR_CFG_DW_RX_DISABLE; + break; + case IRQ_TYPE_EDGE_RISING: + set = GNR_CFG_DW_RX_EDGE; + irq_set_handler_locked(d, handle_edge_irq); + break; + case IRQ_TYPE_LEVEL_HIGH: + set = GNR_CFG_DW_RX_LEVEL; + irq_set_handler_locked(d, handle_level_irq); + break; + default: + return -EINVAL; + } + + return gnr_gpio_configure_line(gc, pin, mask, set); +} + +static const struct irq_chip gnr_gpio_irq_chip = { + .irq_ack = gnr_gpio_irq_ack, + .irq_mask = gnr_gpio_irq_mask, + .irq_unmask = gnr_gpio_irq_unmask, + .irq_set_type = gnr_gpio_irq_set_type, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + +static void gnr_gpio_init_pin_ro_bits(struct device *dev, + const void __iomem *cfg_lock_base, + unsigned long *ro_bitmap) +{ + u32 tmp[GNR_NUM_REGS]; + + memcpy_fromio(tmp, cfg_lock_base, sizeof(tmp)); + bitmap_from_arr32(ro_bitmap, tmp, GNR_NUM_PINS); +} + +static irqreturn_t gnr_gpio_irq(int irq, void *data) +{ + struct gnr_gpio *priv = data; + unsigned int handled = 0; + + for (unsigned int i = 0; i < GNR_NUM_REGS; i++) { + const void __iomem *reg = priv->reg_base + i * sizeof(u32); + unsigned long pending; + unsigned long enabled; + unsigned int bit_idx; + + scoped_guard(raw_spinlock, &priv->lock) { + pending = readl(reg + GNR_GPI_STATUS_OFFSET); + enabled = readl(reg + GNR_GPI_ENABLE_OFFSET); + } + + /* Only enabled interrupts */ + pending &= enabled; + + for_each_set_bit(bit_idx, &pending, GNR_PINS_PER_REG) { + unsigned int hwirq = i * GNR_PINS_PER_REG + bit_idx; + + generic_handle_domain_irq(priv->gc.irq.domain, hwirq); + } + + handled += pending ? 1 : 0; + + } + return IRQ_RETVAL(handled); +} + +static int gnr_gpio_probe(struct platform_device *pdev) +{ + size_t num_backup_pins = IS_ENABLED(CONFIG_PM_SLEEP) ? GNR_NUM_PINS : 0; + struct device *dev = &pdev->dev; + struct gpio_irq_chip *girq; + struct gnr_gpio *priv; + void __iomem *regs; + int irq, ret; + + priv = devm_kzalloc(dev, struct_size(priv, pad_backup, num_backup_pins), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + raw_spin_lock_init(&priv->lock); + + regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(dev, irq, gnr_gpio_irq, IRQF_SHARED | IRQF_NO_THREAD, + dev_name(dev), priv); + if (ret) + return dev_err_probe(dev, ret, "failed to request interrupt\n"); + + priv->reg_base = regs + readl(regs + GNR_CFG_BAR); + + gnr_gpio_init_pin_ro_bits(dev, priv->reg_base + GNR_CFG_LOCK_OFFSET, + priv->ro_bitmap); + + priv->gc = gnr_gpio_chip; + priv->gc.label = dev_name(dev); + priv->gc.parent = dev; + priv->gc.ngpio = GNR_NUM_PINS; + priv->gc.base = -1; + + girq = &priv->gc.irq; + gpio_irq_chip_set_chip(girq, &gnr_gpio_irq_chip); + girq->chip->name = dev_name(dev); + girq->parent_handler = NULL; + girq->num_parents = 0; + girq->parents = NULL; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_bad_irq; + + platform_set_drvdata(pdev, priv); + + return devm_gpiochip_add_data(dev, &priv->gc, priv); +} + +static int gnr_gpio_suspend(struct device *dev) +{ + struct gnr_gpio *priv = dev_get_drvdata(dev); + unsigned int i; + + guard(raw_spinlock_irqsave)(&priv->lock); + + for_each_clear_bit(i, priv->ro_bitmap, priv->gc.ngpio) + priv->pad_backup[i] = readl(gnr_gpio_get_padcfg_addr(priv, i)); + + return 0; +} + +static int gnr_gpio_resume(struct device *dev) +{ + struct gnr_gpio *priv = dev_get_drvdata(dev); + unsigned int i; + + guard(raw_spinlock_irqsave)(&priv->lock); + + for_each_clear_bit(i, priv->ro_bitmap, priv->gc.ngpio) + writel(priv->pad_backup[i], gnr_gpio_get_padcfg_addr(priv, i)); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(gnr_gpio_pm_ops, gnr_gpio_suspend, gnr_gpio_resume); + +static const struct acpi_device_id gnr_gpio_acpi_match[] = { + { "INTC1109" }, + {} +}; +MODULE_DEVICE_TABLE(acpi, gnr_gpio_acpi_match); + +static struct platform_driver gnr_gpio_driver = { + .driver = { + .name = "gpio-graniterapids", + .pm = pm_sleep_ptr(&gnr_gpio_pm_ops), + .acpi_match_table = gnr_gpio_acpi_match, + }, + .probe = gnr_gpio_probe, +}; +module_platform_driver(gnr_gpio_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Aapo Vienamo "); +MODULE_DESCRIPTION("Intel Granite Rapids-D vGPIO driver"); diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c index 899335da93..7e29a2d8de 100644 --- a/drivers/gpio/gpio-gw-pld.c +++ b/drivers/gpio/gpio-gw-pld.c @@ -130,5 +130,6 @@ static struct i2c_driver gw_pld_driver = { }; module_i2c_driver(gw_pld_driver); +MODULE_DESCRIPTION("Gateworks I2C PLD GPIO expander"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Linus Walleij "); diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c index cd9b16dbe1..94f6fefc01 100644 --- a/drivers/gpio/gpio-mc33880.c +++ b/drivers/gpio/gpio-mc33880.c @@ -168,5 +168,6 @@ static void __exit mc33880_exit(void) module_exit(mc33880_exit); MODULE_AUTHOR("Mocean Laboratories "); +MODULE_DESCRIPTION("MC33880 high-side/low-side switch GPIO driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index f2f40393e3..732a696474 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -144,7 +144,7 @@ static int pca953x_acpi_get_irq(struct device *dev) if (ret) dev_warn(dev, "can't add GPIO ACPI mapping\n"); - ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq-gpios", 0); + ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq", 0); if (ret < 0) return ret; diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index 53b69abe67..7c57eaeb0a 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -438,5 +438,6 @@ static void __exit pcf857x_exit(void) } module_exit(pcf857x_exit); +MODULE_DESCRIPTION("Driver for pcf857x, pca857x, and pca967x I2C GPIO expanders"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Brownell"); diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c index 2efd1b1a08..7f7f95ad43 100644 --- a/drivers/gpio/gpio-pcie-idio-24.c +++ b/drivers/gpio/gpio-pcie-idio-24.c @@ -267,7 +267,7 @@ static int idio_24_reg_mask_xlate(struct gpio_regmap *const gpio, const unsigned case IDIO_24_CONTROL_REG: /* We can only set direction for TTL/CMOS lines */ if (offset < 48) - return -EOPNOTSUPP; + return -ENOTSUPP; *reg = IDIO_24_CONTROL_REG; *mask = CONTROL_REG_OUT_MODE; diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c index 9fc1f3dd41..a211a02d4b 100644 --- a/drivers/gpio/gpio-pl061.c +++ b/drivers/gpio/gpio-pl061.c @@ -438,4 +438,5 @@ static struct amba_driver pl061_gpio_driver = { }; module_amba_driver(pl061_gpio_driver); +MODULE_DESCRIPTION("Driver for the ARM PrimeCell(tm) General Purpose Input/Output (PL061)"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c index c08c8e5288..71684dee2c 100644 --- a/drivers/gpio/gpio-regmap.c +++ b/drivers/gpio/gpio-regmap.c @@ -129,7 +129,7 @@ static int gpio_regmap_get_direction(struct gpio_chip *chip, base = gpio_regmap_addr(gpio->reg_dir_in_base); invert = 1; } else { - return -EOPNOTSUPP; + return -ENOTSUPP; } ret = gpio->reg_mask_xlate(gpio, base, offset, ®, &mask); @@ -160,7 +160,7 @@ static int gpio_regmap_set_direction(struct gpio_chip *chip, base = gpio_regmap_addr(gpio->reg_dir_in_base); invert = 1; } else { - return -EOPNOTSUPP; + return -ENOTSUPP; } ret = gpio->reg_mask_xlate(gpio, base, offset, ®, &mask); diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c index e48392074e..ff0341b122 100644 --- a/drivers/gpio/gpio-sch.c +++ b/drivers/gpio/gpio-sch.c @@ -38,8 +38,8 @@ struct sch_gpio { struct gpio_chip chip; + void __iomem *regs; spinlock_t lock; - unsigned short iobase; unsigned short resume_base; /* GPE handling */ @@ -75,7 +75,7 @@ static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned int gpio, unsigned in offset = sch_gpio_offset(sch, gpio, reg); bit = sch_gpio_bit(sch, gpio); - reg_val = !!(inb(sch->iobase + offset) & BIT(bit)); + reg_val = !!(ioread8(sch->regs + offset) & BIT(bit)); return reg_val; } @@ -89,12 +89,14 @@ static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned int gpio, unsigned i offset = sch_gpio_offset(sch, gpio, reg); bit = sch_gpio_bit(sch, gpio); - reg_val = inb(sch->iobase + offset); + reg_val = ioread8(sch->regs + offset); if (val) - outb(reg_val | BIT(bit), sch->iobase + offset); + reg_val |= BIT(bit); else - outb((reg_val & ~BIT(bit)), sch->iobase + offset); + reg_val &= ~BIT(bit); + + iowrite8(reg_val, sch->regs + offset); } static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned int gpio_num) @@ -267,8 +269,8 @@ static u32 sch_gpio_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context) spin_lock_irqsave(&sch->lock, flags); - core_status = inl(sch->iobase + CORE_BANK_OFFSET + GTS); - resume_status = inl(sch->iobase + RESUME_BANK_OFFSET + GTS); + core_status = ioread32(sch->regs + CORE_BANK_OFFSET + GTS); + resume_status = ioread32(sch->regs + RESUME_BANK_OFFSET + GTS); spin_unlock_irqrestore(&sch->lock, flags); @@ -319,12 +321,14 @@ static int sch_gpio_install_gpe_handler(struct sch_gpio *sch) static int sch_gpio_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct gpio_irq_chip *girq; struct sch_gpio *sch; struct resource *res; + void __iomem *regs; int ret; - sch = devm_kzalloc(&pdev->dev, sizeof(*sch), GFP_KERNEL); + sch = devm_kzalloc(dev, sizeof(*sch), GFP_KERNEL); if (!sch) return -ENOMEM; @@ -332,15 +336,16 @@ static int sch_gpio_probe(struct platform_device *pdev) if (!res) return -EBUSY; - if (!devm_request_region(&pdev->dev, res->start, resource_size(res), - pdev->name)) + regs = devm_ioport_map(dev, res->start, resource_size(res)); + if (!regs) return -EBUSY; + sch->regs = regs; + spin_lock_init(&sch->lock); - sch->iobase = res->start; sch->chip = sch_gpio_chip; - sch->chip.label = dev_name(&pdev->dev); - sch->chip.parent = &pdev->dev; + sch->chip.label = dev_name(dev); + sch->chip.parent = dev; switch (pdev->id) { case PCI_DEVICE_ID_INTEL_SCH_LPC: @@ -394,9 +399,9 @@ static int sch_gpio_probe(struct platform_device *pdev) ret = sch_gpio_install_gpe_handler(sch); if (ret) - dev_warn(&pdev->dev, "Can't setup GPE, no IRQ support\n"); + dev_warn(dev, "Can't setup GPE, no IRQ support\n"); - return devm_gpiochip_add_data(&pdev->dev, &sch->chip, sch); + return devm_gpiochip_add_data(dev, &sch->chip, sch); } static struct platform_driver sch_gpio_driver = { diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c index fcc5e8c089..9fae8e396c 100644 --- a/drivers/gpio/gpio-virtio.c +++ b/drivers/gpio/gpio-virtio.c @@ -653,7 +653,6 @@ static struct virtio_driver virtio_gpio_driver = { .remove = virtio_gpio_remove, .driver = { .name = KBUILD_MODNAME, - .owner = THIS_MODULE, }, }; module_virtio_driver(virtio_gpio_driver); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index c1e190d3ea..bb063b81ce 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -890,9 +890,6 @@ static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev, struct acpi_gpio_lookup lookup; int ret; - if (!adev) - return ERR_PTR(-ENODEV); - memset(&lookup, 0, sizeof(lookup)); lookup.index = index; @@ -958,6 +955,10 @@ static struct gpio_desc *acpi_get_gpiod_from_data(struct fwnode_handle *fwnode, static bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id) { + /* If there is no ACPI device, there is no _CRS to fall back to */ + if (!adev) + return false; + /* Never allow fallback if the device has properties */ if (acpi_dev_has_props(adev) || adev->driver_gpios) return false; @@ -965,14 +966,11 @@ static bool acpi_can_fallback_to_crs(struct acpi_device *adev, return con_id == NULL; } -struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, - const char *con_id, - unsigned int idx, - enum gpiod_flags *dflags, - unsigned long *lookupflags) +static struct gpio_desc * +__acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int idx, + bool can_fallback, struct acpi_gpio_info *info) { struct acpi_device *adev = to_acpi_device_node(fwnode); - struct acpi_gpio_info info; struct gpio_desc *desc; char propname[32]; int i; @@ -989,25 +987,38 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, if (adev) desc = acpi_get_gpiod_by_index(adev, - propname, idx, &info); + propname, idx, info); else desc = acpi_get_gpiod_from_data(fwnode, - propname, idx, &info); - if (!IS_ERR(desc)) - break; + propname, idx, info); if (PTR_ERR(desc) == -EPROBE_DEFER) return ERR_CAST(desc); + + if (!IS_ERR(desc)) + return desc; } /* Then from plain _CRS GPIOs */ - if (IS_ERR(desc)) { - if (!adev || !acpi_can_fallback_to_crs(adev, con_id)) - return ERR_PTR(-ENOENT); + if (can_fallback) + return acpi_get_gpiod_by_index(adev, NULL, idx, info); - desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info); - if (IS_ERR(desc)) - return desc; - } + return ERR_PTR(-ENOENT); +} + +struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, + const char *con_id, + unsigned int idx, + enum gpiod_flags *dflags, + unsigned long *lookupflags) +{ + struct acpi_device *adev = to_acpi_device_node(fwnode); + bool can_fallback = acpi_can_fallback_to_crs(adev, con_id); + struct acpi_gpio_info info; + struct gpio_desc *desc; + + desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info); + if (IS_ERR(desc)) + return desc; if (info.gpioint && (*dflags == GPIOD_OUT_LOW || *dflags == GPIOD_OUT_HIGH)) { @@ -1023,7 +1034,7 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, /** * acpi_dev_gpio_irq_wake_get_by() - Find GpioInt and translate it to Linux IRQ number * @adev: pointer to a ACPI device to get IRQ from - * @name: optional name of GpioInt resource + * @con_id: optional name of GpioInt resource * @index: index of GpioInt resource (starting from %0) * @wake_capable: Set to true if the IRQ is wake capable * @@ -1034,17 +1045,18 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, * The function is idempotent, though each time it runs it will configure GPIO * pin direction according to the flags in GpioInt resource. * - * The function takes optional @name parameter. If the resource has a property - * name, then only those will be taken into account. + * The function takes optional @con_id parameter. If the resource has + * a @con_id in a property, then only those will be taken into account. * * The GPIO is considered wake capable if the GpioInt resource specifies * SharedAndWake or ExclusiveAndWake. * * Return: Linux IRQ number (> %0) on success, negative errno on failure. */ -int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, int index, +int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index, bool *wake_capable) { + struct fwnode_handle *fwnode = acpi_fwnode_handle(adev); int idx, i; unsigned int irq_flags; int ret; @@ -1053,9 +1065,8 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, in struct acpi_gpio_info info; struct gpio_desc *desc; - desc = acpi_get_gpiod_by_index(adev, name, i, &info); - /* Ignore -EPROBE_DEFER, it only matters if idx matches */ + desc = __acpi_find_gpio(fwnode, con_id, i, true, &info); if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) return PTR_ERR(desc); @@ -1075,7 +1086,11 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, in acpi_gpio_update_gpiod_flags(&dflags, &info); acpi_gpio_update_gpiod_lookup_flags(&lflags, &info); - snprintf(label, sizeof(label), "GpioInt() %d", index); + snprintf(label, sizeof(label), "%pfwP GpioInt(%d)", fwnode, index); + ret = gpiod_set_consumer_name(desc, con_id ?: label); + if (ret) + return ret; + ret = gpiod_configure_flags(desc, label, lflags, dflags); if (ret < 0) return ret; diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c index b138682fec..5a9911ae91 100644 --- a/drivers/gpio/gpiolib-legacy.c +++ b/drivers/gpio/gpiolib-legacy.c @@ -28,10 +28,9 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) struct gpio_desc *desc; int err; - desc = gpio_to_desc(gpio); - /* Compatibility: assume unavailable "valid" GPIOs will appear later */ - if (!desc && gpio_is_valid(gpio)) + desc = gpio_to_desc(gpio); + if (!desc) return -EPROBE_DEFER; err = gpiod_request(desc, label); @@ -63,51 +62,13 @@ EXPORT_SYMBOL_GPL(gpio_request_one); */ int gpio_request(unsigned gpio, const char *label) { - struct gpio_desc *desc = gpio_to_desc(gpio); + struct gpio_desc *desc; /* Compatibility: assume unavailable "valid" GPIOs will appear later */ - if (!desc && gpio_is_valid(gpio)) + desc = gpio_to_desc(gpio); + if (!desc) return -EPROBE_DEFER; return gpiod_request(desc, label); } EXPORT_SYMBOL_GPL(gpio_request); - -/** - * gpio_request_array - request multiple GPIOs in a single call - * @array: array of the 'struct gpio' - * @num: how many GPIOs in the array - * - * **DEPRECATED** This function is deprecated and must not be used in new code. - */ -int gpio_request_array(const struct gpio *array, size_t num) -{ - int i, err; - - for (i = 0; i < num; i++, array++) { - err = gpio_request_one(array->gpio, array->flags, array->label); - if (err) - goto err_free; - } - return 0; - -err_free: - while (i--) - gpio_free((--array)->gpio); - return err; -} -EXPORT_SYMBOL_GPL(gpio_request_array); - -/** - * gpio_free_array - release multiple GPIOs in a single call - * @array: array of the 'struct gpio' - * @num: how many GPIOs in the array - * - * **DEPRECATED** This function is deprecated and must not be used in new code. - */ -void gpio_free_array(const struct gpio *array, size_t num) -{ - while (num--) - gpio_free((array++)->gpio); -} -EXPORT_SYMBOL_GPL(gpio_free_array); diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 5c44422001..89d5e64cf6 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -1055,7 +1055,7 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip) struct of_phandle_args pinspec; struct pinctrl_dev *pctldev; struct device_node *np; - int index = 0, ret; + int index = 0, ret, trim; const char *name; static const char group_names_propname[] = "gpio-ranges-group-names"; struct property *group_names; @@ -1077,7 +1077,14 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip) if (!pctldev) return -EPROBE_DEFER; + /* Ignore ranges outside of this GPIO chip */ + if (pinspec.args[0] >= (chip->offset + chip->ngpio)) + continue; + if (pinspec.args[0] + pinspec.args[2] <= chip->offset) + continue; + if (pinspec.args[2]) { + /* npins != 0: linear range */ if (group_names) { of_property_read_string_index(np, group_names_propname, @@ -1088,7 +1095,19 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip) break; } } - /* npins != 0: linear range */ + + /* Trim the range to fit this GPIO chip */ + if (chip->offset > pinspec.args[0]) { + trim = chip->offset - pinspec.args[0]; + pinspec.args[2] -= trim; + pinspec.args[1] += trim; + pinspec.args[0] = 0; + } else { + pinspec.args[0] -= chip->offset; + } + if ((pinspec.args[0] + pinspec.args[2]) > chip->ngpio) + pinspec.args[2] = chip->ngpio - pinspec.args[0]; + ret = gpiochip_add_pin_range(chip, pinctrl_dev_get_devname(pctldev), pinspec.args[0], diff --git a/drivers/gpio/gpiolib-swnode.c b/drivers/gpio/gpiolib-swnode.c index fa52bdb1a2..cec1ab878a 100644 --- a/drivers/gpio/gpiolib-swnode.c +++ b/drivers/gpio/gpiolib-swnode.c @@ -4,8 +4,13 @@ * * Copyright 2022 Google LLC */ + +#define pr_fmt(fmt) "gpiolib: swnode: " fmt + #include #include +#include +#include #include #include #include @@ -17,6 +22,8 @@ #include "gpiolib.h" #include "gpiolib-swnode.h" +#define GPIOLIB_SWNODE_UNDEFINED_NAME "swnode-gpio-undefined" + static void swnode_format_propname(const char *con_id, char *propname, size_t max_size) { @@ -40,6 +47,14 @@ static struct gpio_device *swnode_get_gpio_device(struct fwnode_handle *fwnode) if (!gdev_node || !gdev_node->name) return ERR_PTR(-EINVAL); + /* + * Check for a special node that identifies undefined GPIOs, this is + * primarily used as a key for internal chip selects in SPI bindings. + */ + if (IS_ENABLED(CONFIG_GPIO_SWNODE_UNDEFINED) && + !strcmp(gdev_node->name, GPIOLIB_SWNODE_UNDEFINED_NAME)) + return ERR_PTR(-ENOENT); + gdev = gpio_device_find_by_label(gdev_node->name); return gdev ?: ERR_PTR(-EPROBE_DEFER); } @@ -121,3 +136,32 @@ int swnode_gpio_count(const struct fwnode_handle *fwnode, const char *con_id) return count ?: -ENOENT; } + +#if IS_ENABLED(CONFIG_GPIO_SWNODE_UNDEFINED) +/* + * A special node that identifies undefined GPIOs, this is primarily used as + * a key for internal chip selects in SPI bindings. + */ +const struct software_node swnode_gpio_undefined = { + .name = GPIOLIB_SWNODE_UNDEFINED_NAME, +}; +EXPORT_SYMBOL_NS_GPL(swnode_gpio_undefined, GPIO_SWNODE); + +static int __init swnode_gpio_init(void) +{ + int ret; + + ret = software_node_register(&swnode_gpio_undefined); + if (ret < 0) + pr_err("failed to register swnode: %d\n", ret); + + return ret; +} +subsys_initcall(swnode_gpio_init); + +static void __exit swnode_gpio_cleanup(void) +{ + software_node_unregister(&swnode_gpio_undefined); +} +__exitcall(swnode_gpio_cleanup); +#endif diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 6853ecd98b..26202586fd 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -412,7 +412,7 @@ static ssize_t base_show(struct device *dev, { const struct gpio_device *gdev = dev_get_drvdata(dev); - return sysfs_emit(buf, "%d\n", gdev->base); + return sysfs_emit(buf, "%u\n", gdev->base); } static DEVICE_ATTR_RO(base); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index fa50db0c36..fa62367ee9 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -164,9 +164,6 @@ struct gpio_desc *gpio_to_desc(unsigned gpio) } } - if (!gpio_is_valid(gpio)) - pr_warn("invalid GPIO %d\n", gpio); - return NULL; } EXPORT_SYMBOL_GPL(gpio_to_desc); @@ -311,10 +308,10 @@ struct gpio_chip *gpio_device_get_chip(struct gpio_device *gdev) EXPORT_SYMBOL_GPL(gpio_device_get_chip); /* dynamic allocation of GPIOs, e.g. on a hotplugged device */ -static int gpiochip_find_base_unlocked(int ngpio) +static int gpiochip_find_base_unlocked(u16 ngpio) { + unsigned int base = GPIO_DYNAMIC_BASE; struct gpio_device *gdev; - int base = GPIO_DYNAMIC_BASE; list_for_each_entry_srcu(gdev, &gpio_devices, list, lockdep_is_held(&gpio_devices_lock)) { @@ -325,9 +322,11 @@ static int gpiochip_find_base_unlocked(int ngpio) base = gdev->base + gdev->ngpio; if (base < GPIO_DYNAMIC_BASE) base = GPIO_DYNAMIC_BASE; + if (base > GPIO_DYNAMIC_MAX - ngpio) + break; } - if (gpio_is_valid(base)) { + if (base <= GPIO_DYNAMIC_MAX - ngpio) { pr_debug("%s: found new base at %d\n", __func__, base); return base; } else { @@ -379,7 +378,10 @@ int gpiod_get_direction(struct gpio_desc *desc) if (ret < 0) return ret; - /* GPIOF_DIR_IN or other positive, otherwise GPIOF_DIR_OUT */ + /* + * GPIO_LINE_DIRECTION_IN or other positive, + * otherwise GPIO_LINE_DIRECTION_OUT. + */ if (ret > 0) ret = 1; @@ -760,7 +762,7 @@ static int gpiochip_setup_dev(struct gpio_device *gdev) if (ret) goto err_remove_device; - dev_dbg(&gdev->dev, "registered GPIOs %d to %d on %s\n", gdev->base, + dev_dbg(&gdev->dev, "registered GPIOs %u to %u on %s\n", gdev->base, gdev->base + gdev->ngpio - 1, gdev->label); return 0; @@ -4254,7 +4256,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer, ret = gpiod_configure_flags(desc, con_id, lookupflags, flags); if (ret < 0) { gpiod_put(desc); - dev_dbg(consumer, "setup of GPIO %s failed\n", name); + dev_err(consumer, "setup of GPIO %s failed: %d\n", name, ret); return ERR_PTR(ret); } @@ -4802,14 +4804,14 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev) value = gpio_chip_get_value(gc, desc); is_irq = test_bit(FLAG_USED_AS_IRQ, &desc->flags); active_low = test_bit(FLAG_ACTIVE_LOW, &desc->flags); - seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s%s\n", + seq_printf(s, " gpio-%-3u (%-20.20s|%-20.20s) %s %s %s%s\n", gpio, desc->name ?: "", gpiod_get_label(desc), is_out ? "out" : "in ", value >= 0 ? (value ? "hi" : "lo") : "? ", is_irq ? "IRQ " : "", active_low ? "ACTIVE LOW" : ""); } else if (desc->name) { - seq_printf(s, " gpio-%-3d (%-20.20s)\n", gpio, desc->name); + seq_printf(s, " gpio-%-3u (%-20.20s)\n", gpio, desc->name); } gpio++; @@ -4881,7 +4883,7 @@ static int gpiolib_seq_show(struct seq_file *s, void *v) return 0; } - seq_printf(s, "%s%s: GPIOs %d-%d", priv->newline ? "\n" : "", + seq_printf(s, "%s%s: GPIOs %u-%u", priv->newline ? "\n" : "", dev_name(&gdev->dev), gdev->base, gdev->base + gdev->ngpio - 1); parent = gc->parent; diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index 8e0e211ebf..48e086c2f4 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -63,7 +63,7 @@ struct gpio_device { struct gpio_chip __rcu *chip; struct gpio_desc *descs; struct srcu_struct desc_srcu; - int base; + unsigned int base; u16 ngpio; bool can_sleep; const char *label; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 5a0c476361..359b68adaf 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -29,6 +29,8 @@ menuconfig DRM details. You should also select and configure AGP (/dev/agpgart) support if it is available for your platform. +if DRM + config DRM_MIPI_DBI tristate depends on DRM @@ -102,6 +104,37 @@ config DRM_KMS_HELPER help CRTC helpers for KMS drivers. +config DRM_PANIC + bool "Display a user-friendly message when a kernel panic occurs" + depends on DRM && !(FRAMEBUFFER_CONSOLE && VT_CONSOLE) + select FONT_SUPPORT + help + Enable a drm panic handler, which will display a user-friendly message + when a kernel panic occurs. It's useful when using a user-space + console instead of fbcon. + It will only work if your graphic driver supports this feature. + To support Hi-DPI Display, you can enable bigger fonts like + FONT_TER16x32 + +config DRM_PANIC_FOREGROUND_COLOR + hex "Drm panic screen foreground color, in RGB" + depends on DRM_PANIC + default 0xffffff + +config DRM_PANIC_BACKGROUND_COLOR + hex "Drm panic screen background color, in RGB" + depends on DRM_PANIC + default 0x000000 + +config DRM_PANIC_DEBUG + bool "Add a debug fs entry to trigger drm_panic" + depends on DRM_PANIC && DEBUG_FS + help + Add dri/[device]/drm_panic_plane_x in the kernel debugfs, to force the + panic handler to write the panic message to this plane scanout buffer. + This is unsafe and should not be enabled on a production build. + If in doubt, say "N". + config DRM_DEBUG_DP_MST_TOPOLOGY_REFS bool "Enable refcount backtrace history in the DP MST helpers" depends on STACKTRACE_SUPPORT @@ -371,6 +404,8 @@ source "drivers/gpu/drm/lima/Kconfig" source "drivers/gpu/drm/panfrost/Kconfig" +source "drivers/gpu/drm/panthor/Kconfig" + source "drivers/gpu/drm/aspeed/Kconfig" source "drivers/gpu/drm/mcde/Kconfig" @@ -403,10 +438,6 @@ config DRM_HYPERV config DRM_EXPORT_FOR_TESTS bool -# Separate option because drm_panel_orientation_quirks.c is shared with fbdev -config DRM_PANEL_ORIENTATION_QUIRKS - tristate - config DRM_LIB_RANDOM bool default n @@ -414,3 +445,23 @@ config DRM_LIB_RANDOM config DRM_PRIVACY_SCREEN bool default n + +config DRM_WERROR + bool "Compile the drm subsystem with warnings as errors" + depends on DRM && EXPERT + depends on !WERROR + default n + help + A kernel build should not cause any compiler warnings, and this + enables the '-Werror' flag to enforce that rule in the drm subsystem. + + The drm subsystem enables more warnings than the kernel default, so + this config option is disabled by default. + + If in doubt, say N. + +endif + +# Separate option because drm_panel_orientation_quirks.c is shared with fbdev +config DRM_PANEL_ORIENTATION_QUIRKS + tristate diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 104b42df2e..f9ca4f8fa6 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -5,6 +5,34 @@ CFLAGS-$(CONFIG_DRM_USE_DYNAMIC_DEBUG) += -DDYNAMIC_DEBUG_MODULE +# Unconditionally enable W=1 warnings locally +# --- begin copy-paste W=1 warnings from scripts/Makefile.extrawarn +subdir-ccflags-y += -Wextra -Wunused -Wno-unused-parameter +subdir-ccflags-y += $(call cc-option, -Wrestrict) +subdir-ccflags-y += -Wmissing-format-attribute +subdir-ccflags-y += -Wold-style-definition +subdir-ccflags-y += -Wmissing-include-dirs +subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable) +subdir-ccflags-y += $(call cc-option, -Wunused-const-variable) +subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned) +subdir-ccflags-y += $(call cc-option, -Wformat-overflow) +# FIXME: fix -Wformat-truncation warnings and uncomment +#subdir-ccflags-y += $(call cc-option, -Wformat-truncation) +subdir-ccflags-y += $(call cc-option, -Wstringop-truncation) +# The following turn off the warnings enabled by -Wextra +ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),) +subdir-ccflags-y += -Wno-missing-field-initializers +subdir-ccflags-y += -Wno-type-limits +subdir-ccflags-y += -Wno-shift-negative-value +endif +ifeq ($(findstring 3, $(KBUILD_EXTRA_WARN)),) +subdir-ccflags-y += -Wno-sign-compare +endif +# --- end copy-paste + +# Enable -Werror in CI and development +subdir-ccflags-$(CONFIG_DRM_WERROR) += -Werror + drm-y := \ drm_aperture.o \ drm_atomic.o \ @@ -60,6 +88,7 @@ drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \ drm_privacy_screen.o \ drm_privacy_screen_x86.o drm-$(CONFIG_DRM_ACCEL) += ../../accel/drm_accel.o +drm-$(CONFIG_DRM_PANIC) += drm_panic.o obj-$(CONFIG_DRM) += drm.o obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o @@ -179,6 +208,7 @@ obj-$(CONFIG_DRM_XEN) += xen/ obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/ obj-$(CONFIG_DRM_LIMA) += lima/ obj-$(CONFIG_DRM_PANFROST) += panfrost/ +obj-$(CONFIG_DRM_PANTHOR) += panthor/ obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/ obj-$(CONFIG_DRM_MCDE) += mcde/ obj-$(CONFIG_DRM_TIDSS) += tidss/ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 4536c8ad0e..078f588e99 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -23,7 +23,7 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -FULL_AMD_PATH=$(srctree)/$(src)/.. +FULL_AMD_PATH=$(src)/.. DISPLAY_FOLDER_NAME=display FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME) @@ -70,7 +70,8 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \ amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o \ atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ - amdgpu_dma_buf.o amdgpu_vm.o amdgpu_vm_pt.o amdgpu_ib.o amdgpu_pll.o \ + amdgpu_dma_buf.o amdgpu_vm.o amdgpu_vm_pt.o amdgpu_vm_tlb_fence.o \ + amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \ amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \ @@ -80,7 +81,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \ amdgpu_fw_attestation.o amdgpu_securedisplay.o \ amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \ - amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o + amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o @@ -247,7 +248,8 @@ amdgpu-y += \ smuio_v11_0_6.o \ smuio_v13_0.o \ smuio_v13_0_3.o \ - smuio_v13_0_6.o + smuio_v13_0_6.o \ + smuio_v14_0_2.o # add reset block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index b3b8464720..f87d53e183 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -139,6 +139,14 @@ enum amdgpu_ss { AMDGPU_SS_DRV_UNLOAD }; +struct amdgpu_hwip_reg_entry { + u32 hwip; + u32 inst; + u32 seg; + u32 reg_offset; + const char *reg_name; +}; + struct amdgpu_watchdog_timer { bool timeout_fatal_disable; uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */ @@ -494,6 +502,7 @@ struct amdgpu_wb { uint64_t gpu_addr; u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */ unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; + spinlock_t lock; }; int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); @@ -606,7 +615,7 @@ struct amdgpu_asic_funcs { /* PCIe replay counter */ uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev); /* device supports BACO */ - bool (*supports_baco)(struct amdgpu_device *adev); + int (*supports_baco)(struct amdgpu_device *adev); /* pre asic_init quirks */ void (*pre_asic_init)(struct amdgpu_device *adev); /* enter/exit umd stable pstate */ @@ -1408,7 +1417,8 @@ bool amdgpu_device_supports_atpx(struct drm_device *dev); bool amdgpu_device_supports_px(struct drm_device *dev); bool amdgpu_device_supports_boco(struct drm_device *dev); bool amdgpu_device_supports_smart_shift(struct drm_device *dev); -bool amdgpu_device_supports_baco(struct drm_device *dev); +int amdgpu_device_supports_baco(struct drm_device *dev); +void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev); bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); int amdgpu_device_baco_enter(struct drm_device *dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index 493982f946..c50202215f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -28,7 +28,7 @@ #define ACA_BANK_HWID(type, hwid, mcatype) [ACA_HWIP_TYPE_##type] = {hwid, mcatype} -typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, void *data); +typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data); struct aca_banks { int nr_banks; @@ -86,7 +86,7 @@ static void aca_banks_release(struct aca_banks *banks) } } -static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_error_type type, u32 *count) +static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_smu_type type, u32 *count) { struct amdgpu_aca *aca = &adev->aca; const struct aca_smu_funcs *smu_funcs = aca->smu_funcs; @@ -116,20 +116,22 @@ static struct aca_regs_dump { {"CONTROL_MASK", ACA_REG_IDX_CTL_MASK}, }; -static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank) +static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank, + struct ras_query_context *qctx) { + u64 event_id = qctx ? qctx->event_id : 0ULL; int i; - dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n"); + RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n"); /* plus 1 for output format, e.g: ACA[08/08]: xxxx */ for (i = 0; i < ARRAY_SIZE(aca_regs); i++) - dev_info(adev->dev, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n", - idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]); + RAS_EVENT_LOG(adev, event_id, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n", + idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]); } -static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_error_type type, +static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type, int start, int count, - struct aca_banks *banks) + struct aca_banks *banks, struct ras_query_context *qctx) { struct amdgpu_aca *aca = &adev->aca; const struct aca_smu_funcs *smu_funcs = aca->smu_funcs; @@ -143,13 +145,12 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_erro return -EOPNOTSUPP; switch (type) { - case ACA_ERROR_TYPE_UE: + case ACA_SMU_TYPE_UE: max_count = smu_funcs->max_ue_bank_count; break; - case ACA_ERROR_TYPE_CE: + case ACA_SMU_TYPE_CE: max_count = smu_funcs->max_ce_bank_count; break; - case ACA_ERROR_TYPE_DEFERRED: default: return -EINVAL; } @@ -164,7 +165,9 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_erro if (ret) return ret; - aca_smu_bank_dump(adev, i, count, &bank); + bank.type = type; + + aca_smu_bank_dump(adev, i, count, &bank, qctx); ret = aca_banks_add_bank(banks, &bank); if (ret) @@ -195,7 +198,7 @@ static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type t return hwip->hwid == hwid && hwip->mcatype == mcatype; } -static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type) +static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type) { const struct aca_bank_ops *bank_ops = handle->bank_ops; @@ -273,59 +276,49 @@ static struct aca_bank_error *get_bank_error(struct aca_error *aerr, struct aca_ return new_bank_error(aerr, info); } -static int aca_log_errors(struct aca_handle *handle, enum aca_error_type type, - struct aca_bank_report *report) +int aca_error_cache_log_bank_error(struct aca_handle *handle, struct aca_bank_info *info, + enum aca_error_type type, u64 count) { struct aca_error_cache *error_cache = &handle->error_cache; struct aca_bank_error *bank_error; struct aca_error *aerr; - if (!handle || !report) + if (!handle || !info || type >= ACA_ERROR_TYPE_COUNT) return -EINVAL; - if (!report->count[type]) + if (!count) return 0; aerr = &error_cache->errors[type]; - bank_error = get_bank_error(aerr, &report->info); + bank_error = get_bank_error(aerr, info); if (!bank_error) return -ENOMEM; - bank_error->count[type] += report->count[type]; + bank_error->count += count; return 0; } -static int aca_generate_bank_report(struct aca_handle *handle, struct aca_bank *bank, - enum aca_error_type type, struct aca_bank_report *report) +static int aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type) { const struct aca_bank_ops *bank_ops = handle->bank_ops; - if (!bank || !report) + if (!bank) return -EINVAL; - if (!bank_ops->aca_bank_generate_report) + if (!bank_ops->aca_bank_parser) return -EOPNOTSUPP; - memset(report, 0, sizeof(*report)); - return bank_ops->aca_bank_generate_report(handle, bank, type, - report, handle->data); + return bank_ops->aca_bank_parser(handle, bank, type, + handle->data); } static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank *bank, - enum aca_error_type type, void *data) + enum aca_smu_type type, void *data) { - struct aca_bank_report report; int ret; - ret = aca_generate_bank_report(handle, bank, type, &report); - if (ret) - return ret; - - if (!report.count[type]) - return 0; - - ret = aca_log_errors(handle, type, &report); + ret = aca_bank_parser(handle, bank, type); if (ret) return ret; @@ -333,7 +326,7 @@ static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank } static int aca_dispatch_bank(struct aca_handle_manager *mgr, struct aca_bank *bank, - enum aca_error_type type, bank_handler_t handler, void *data) + enum aca_smu_type type, bank_handler_t handler, void *data) { struct aca_handle *handle; int ret; @@ -354,7 +347,7 @@ static int aca_dispatch_bank(struct aca_handle_manager *mgr, struct aca_bank *ba } static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks *banks, - enum aca_error_type type, bank_handler_t handler, void *data) + enum aca_smu_type type, bank_handler_t handler, void *data) { struct aca_bank_node *node; struct aca_bank *bank; @@ -378,8 +371,28 @@ static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks * return 0; } -static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type, - bank_handler_t handler, void *data) +static bool aca_bank_should_update(struct amdgpu_device *adev, enum aca_smu_type type) +{ + struct amdgpu_aca *aca = &adev->aca; + bool ret = true; + + /* + * Because the UE Valid MCA count will only be cleared after reset, + * in order to avoid repeated counting of the error count, + * the aca bank is only updated once during the gpu recovery stage. + */ + if (type == ACA_SMU_TYPE_UE) { + if (amdgpu_ras_intr_triggered()) + ret = atomic_cmpxchg(&aca->ue_update_flag, 0, 1) == 0; + else + atomic_set(&aca->ue_update_flag, 0); + } + + return ret; +} + +static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type, + bank_handler_t handler, struct ras_query_context *qctx, void *data) { struct amdgpu_aca *aca = &adev->aca; struct aca_banks banks; @@ -389,9 +402,8 @@ static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type if (list_empty(&aca->mgr.list)) return 0; - /* NOTE: pmfw is only support UE and CE */ - if (type == ACA_ERROR_TYPE_DEFERRED) - type = ACA_ERROR_TYPE_CE; + if (!aca_bank_should_update(adev, type)) + return 0; ret = aca_smu_get_valid_aca_count(adev, type, &count); if (ret) @@ -402,7 +414,7 @@ static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type aca_banks_init(&banks); - ret = aca_smu_get_valid_aca_banks(adev, type, 0, count, &banks); + ret = aca_smu_get_valid_aca_banks(adev, type, 0, count, &banks, qctx); if (ret) goto err_release_banks; @@ -431,7 +443,7 @@ static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_er if (type >= ACA_ERROR_TYPE_COUNT) return -EINVAL; - count = bank_error->count[type]; + count = bank_error->count; if (!count) return 0; @@ -447,6 +459,8 @@ static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_er amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, count); break; case ACA_ERROR_TYPE_DEFERRED: + amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, count); + break; default: break; } @@ -477,12 +491,25 @@ out_unlock: } static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, enum aca_error_type type, - struct ras_err_data *err_data) + struct ras_err_data *err_data, struct ras_query_context *qctx) { + enum aca_smu_type smu_type; int ret; + switch (type) { + case ACA_ERROR_TYPE_UE: + smu_type = ACA_SMU_TYPE_UE; + break; + case ACA_ERROR_TYPE_CE: + case ACA_ERROR_TYPE_DEFERRED: + smu_type = ACA_SMU_TYPE_CE; + break; + default: + return -EINVAL; + } + /* udpate aca bank to aca source error_cache first */ - ret = aca_banks_update(adev, type, handler_aca_log_bank_error, NULL); + ret = aca_banks_update(adev, smu_type, handler_aca_log_bank_error, qctx, NULL); if (ret) return ret; @@ -498,10 +525,9 @@ static bool aca_handle_is_valid(struct aca_handle *handle) } int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, - enum aca_error_type type, void *data) + enum aca_error_type type, struct ras_err_data *err_data, + struct ras_query_context *qctx) { - struct ras_err_data *err_data = (struct ras_err_data *)data; - if (!handle || !err_data) return -EINVAL; @@ -511,7 +537,7 @@ int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *han if (!(BIT(type) & handle->mask)) return 0; - return __aca_get_error_data(adev, handle, type, err_data); + return __aca_get_error_data(adev, handle, type, err_data, qctx); } static void aca_error_init(struct aca_error *aerr, enum aca_error_type type) @@ -668,6 +694,8 @@ int amdgpu_aca_init(struct amdgpu_device *adev) struct amdgpu_aca *aca = &adev->aca; int ret; + atomic_set(&aca->ue_update_flag, 0); + ret = aca_manager_init(&aca->mgr); if (ret) return ret; @@ -680,6 +708,8 @@ void amdgpu_aca_fini(struct amdgpu_device *adev) struct amdgpu_aca *aca = &adev->aca; aca_manager_fini(&aca->mgr); + + atomic_set(&aca->ue_update_flag, 0); } int amdgpu_aca_reset(struct amdgpu_device *adev) @@ -723,23 +753,13 @@ int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info) static int aca_bank_get_error_code(struct amdgpu_device *adev, struct aca_bank *bank) { - int error_code; - - switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { - case IP_VERSION(13, 0, 6): - if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) { - error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]); - return error_code & 0xff; - } - break; - default: - break; - } + struct amdgpu_aca *aca = &adev->aca; + const struct aca_smu_funcs *smu_funcs = aca->smu_funcs; - /* NOTE: the true error code is encoded in status.errorcode[0:7] */ - error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]); + if (!smu_funcs || !smu_funcs->parse_error_code) + return -EOPNOTSUPP; - return error_code & 0xff; + return smu_funcs->parse_error_code(adev, bank); } int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size) @@ -750,6 +770,9 @@ int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank return -EINVAL; error_code = aca_bank_get_error_code(adev, bank); + if (error_code < 0) + return error_code; + for (i = 0; i < size; i++) { if (err_codes[i] == error_code) return 0; @@ -784,7 +807,7 @@ static int amdgpu_aca_smu_debug_mode_set(void *data, u64 val) return 0; } -static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_error_type type, int idx) +static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_smu_type type, int idx) { struct aca_bank_info info; int i, ret; @@ -793,7 +816,7 @@ static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_e if (ret) return; - seq_printf(m, "aca entry[%d].type: %s\n", idx, type == ACA_ERROR_TYPE_UE ? "UE" : "CE"); + seq_printf(m, "aca entry[%d].type: %s\n", idx, type == ACA_SMU_TYPE_UE ? "UE" : "CE"); seq_printf(m, "aca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n", idx, info.socket_id, info.die_id, info.hwid, info.mcatype); @@ -807,7 +830,7 @@ struct aca_dump_context { }; static int handler_aca_bank_dump(struct aca_handle *handle, struct aca_bank *bank, - enum aca_error_type type, void *data) + enum aca_smu_type type, void *data) { struct aca_dump_context *ctx = (struct aca_dump_context *)data; @@ -816,7 +839,7 @@ static int handler_aca_bank_dump(struct aca_handle *handle, struct aca_bank *ban return handler_aca_log_bank_error(handle, bank, type, NULL); } -static int aca_dump_show(struct seq_file *m, enum aca_error_type type) +static int aca_dump_show(struct seq_file *m, enum aca_smu_type type) { struct amdgpu_device *adev = (struct amdgpu_device *)m->private; struct aca_dump_context context = { @@ -824,12 +847,12 @@ static int aca_dump_show(struct seq_file *m, enum aca_error_type type) .idx = 0, }; - return aca_banks_update(adev, type, handler_aca_bank_dump, (void *)&context); + return aca_banks_update(adev, type, handler_aca_bank_dump, NULL, (void *)&context); } static int aca_dump_ce_show(struct seq_file *m, void *unused) { - return aca_dump_show(m, ACA_ERROR_TYPE_CE); + return aca_dump_show(m, ACA_SMU_TYPE_CE); } static int aca_dump_ce_open(struct inode *inode, struct file *file) @@ -847,7 +870,7 @@ static const struct file_operations aca_ce_dump_debug_fops = { static int aca_dump_ue_show(struct seq_file *m, void *unused) { - return aca_dump_show(m, ACA_ERROR_TYPE_UE); + return aca_dump_show(m, ACA_SMU_TYPE_UE); } static int aca_dump_ue_open(struct inode *inode, struct file *file) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h index 2da50e0958..5ef6b745f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h @@ -26,6 +26,9 @@ #include +struct ras_err_data; +struct ras_query_context; + #define ACA_MAX_REGS_COUNT (16) #define ACA_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l) @@ -99,7 +102,14 @@ enum aca_error_type { ACA_ERROR_TYPE_COUNT }; +enum aca_smu_type { + ACA_SMU_TYPE_UE = 0, + ACA_SMU_TYPE_CE, + ACA_SMU_TYPE_COUNT, +}; + struct aca_bank { + enum aca_smu_type type; u64 regs[ACA_MAX_REGS_COUNT]; }; @@ -115,15 +125,10 @@ struct aca_bank_info { int mcatype; }; -struct aca_bank_report { - struct aca_bank_info info; - u64 count[ACA_ERROR_TYPE_COUNT]; -}; - struct aca_bank_error { struct list_head node; struct aca_bank_info info; - u64 count[ACA_ERROR_TYPE_COUNT]; + u64 count; }; struct aca_error { @@ -157,9 +162,8 @@ struct aca_handle { }; struct aca_bank_ops { - int (*aca_bank_generate_report)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, - struct aca_bank_report *report, void *data); - bool (*aca_bank_is_valid)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, + int (*aca_bank_parser)(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data); + bool (*aca_bank_is_valid)(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data); }; @@ -167,13 +171,15 @@ struct aca_smu_funcs { int max_ue_bank_count; int max_ce_bank_count; int (*set_debug_mode)(struct amdgpu_device *adev, bool enable); - int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_error_type type, u32 *count); - int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_error_type type, int idx, struct aca_bank *bank); + int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_smu_type type, u32 *count); + int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_smu_type type, int idx, struct aca_bank *bank); + int (*parse_error_code)(struct amdgpu_device *adev, struct aca_bank *bank); }; struct amdgpu_aca { struct aca_handle_manager mgr; const struct aca_smu_funcs *smu_funcs; + atomic_t ue_update_flag; bool is_enabled; }; @@ -196,7 +202,10 @@ int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle, const char *name, const struct aca_info *aca_info, void *data); void amdgpu_aca_remove_handle(struct aca_handle *handle); int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, - enum aca_error_type type, void *data); + enum aca_error_type type, struct ras_err_data *err_data, + struct ras_query_context *qctx); int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en); void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root); +int aca_error_cache_log_bank_error(struct aca_handle *handle, struct aca_bank_info *info, + enum aca_error_type type, u64 count); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 6d72355ac4..bf6c4a0d05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -637,6 +637,8 @@ static const struct amd_ip_funcs acp_ip_funcs = { .soft_reset = acp_soft_reset, .set_clockgating_state = acp_set_clockgating_state, .set_powergating_state = acp_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version acp_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 7291c3fd8c..e3738d4172 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -750,10 +750,17 @@ bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev) return amdgpu_ras_get_fed_status(adev); } +void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *adev, + enum amdgpu_ras_block block, uint16_t pasid, + pasid_notify pasid_fn, void *data, uint32_t reset) +{ + amdgpu_umc_pasid_poison_handler(adev, block, pasid, pasid_fn, data, reset); +} + void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, - enum amdgpu_ras_block block, bool reset) + enum amdgpu_ras_block block, uint32_t reset) { - amdgpu_umc_poison_handler(adev, block, reset); + amdgpu_umc_pasid_poison_handler(adev, block, 0, NULL, NULL, reset); } int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, @@ -772,12 +779,20 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, return 0; } -bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev) +bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev, + int hub_inst, int hub_type) { - if (adev->gfx.ras && adev->gfx.ras->query_utcl2_poison_status) - return adev->gfx.ras->query_utcl2_poison_status(adev); - else - return false; + if (!hub_type) { + if (adev->gfxhub.funcs->query_utcl2_poison_status) + return adev->gfxhub.funcs->query_utcl2_poison_status(adev, hub_inst); + else + return false; + } else { + if (adev->mmhub.funcs->query_utcl2_poison_status) + return adev->mmhub.funcs->query_utcl2_poison_status(adev, hub_inst); + else + return false; + } } int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 0ef223c2af..1de021ebdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -336,12 +336,18 @@ void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev); int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config); void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, - enum amdgpu_ras_block block, bool reset); + enum amdgpu_ras_block block, uint32_t reset); + +void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *adev, + enum amdgpu_ras_block block, uint16_t pasid, + pasid_notify pasid_fn, void *data, uint32_t reset); + bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev); bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem); void amdgpu_amdkfd_block_mmu_notifications(void *p); int amdgpu_amdkfd_criu_resume(void *p); -bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev); +bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev, + int hub_inst, int hub_type); int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 alloc_flag, int8_t xcp_id); void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 69810b3f1c..3ab6c3aa0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -881,6 +881,7 @@ uint32_t kgd_gfx_v10_set_wave_launch_mode(struct amdgpu_device *adev, } #define TCP_WATCH_STRIDE (mmTCP_WATCH1_ADDR_H - mmTCP_WATCH0_ADDR_H) +#define SQ_WATCH_STRIDE (mmSQ_WATCH1_ADDR_H - mmSQ_WATCH0_ADDR_H) uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev, uint64_t watch_address, uint32_t watch_address_mask, @@ -889,55 +890,93 @@ uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev, uint32_t debug_vmid, uint32_t inst) { + /* SQ_WATCH?_ADDR_* and TCP_WATCH?_ADDR_* are programmed with the + * same values. + */ uint32_t watch_address_high; uint32_t watch_address_low; - uint32_t watch_address_cntl; - - watch_address_cntl = 0; + uint32_t tcp_watch_address_cntl; + uint32_t sq_watch_address_cntl; watch_address_low = lower_32_bits(watch_address); watch_address_high = upper_32_bits(watch_address) & 0xffff; - watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + tcp_watch_address_cntl = 0; + tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl, TCP_WATCH0_CNTL, VMID, debug_vmid); - watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl, TCP_WATCH0_CNTL, MODE, watch_mode); - watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl, TCP_WATCH0_CNTL, MASK, watch_address_mask >> 7); + sq_watch_address_cntl = 0; + sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl, + SQ_WATCH0_CNTL, + VMID, + debug_vmid); + sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl, + SQ_WATCH0_CNTL, + MODE, + watch_mode); + sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl, + SQ_WATCH0_CNTL, + MASK, + watch_address_mask >> 6); + /* Turning off this watch point until we set all the registers */ - watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl, TCP_WATCH0_CNTL, VALID, 0); - WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) + (watch_id * TCP_WATCH_STRIDE)), - watch_address_cntl); + tcp_watch_address_cntl); + + sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl, + SQ_WATCH0_CNTL, + VALID, + 0); + WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_CNTL) + + (watch_id * SQ_WATCH_STRIDE)), + sq_watch_address_cntl); + /* Program {TCP,SQ}_WATCH?_ADDR* */ WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) + (watch_id * TCP_WATCH_STRIDE)), watch_address_high); - WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_L) + (watch_id * TCP_WATCH_STRIDE)), watch_address_low); + WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_ADDR_H) + + (watch_id * SQ_WATCH_STRIDE)), + watch_address_high); + WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_ADDR_L) + + (watch_id * SQ_WATCH_STRIDE)), + watch_address_low); + /* Enable the watch point */ - watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl, TCP_WATCH0_CNTL, VALID, 1); - WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) + (watch_id * TCP_WATCH_STRIDE)), - watch_address_cntl); + tcp_watch_address_cntl); + + sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl, + SQ_WATCH0_CNTL, + VALID, + 1); + WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_CNTL) + + (watch_id * SQ_WATCH_STRIDE)), + sq_watch_address_cntl); return 0; } @@ -953,8 +992,14 @@ uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev, (watch_id * TCP_WATCH_STRIDE)), watch_address_cntl); + WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_CNTL) + + (watch_id * SQ_WATCH_STRIDE)), + watch_address_cntl); + return 0; } +#undef TCP_WATCH_STRIDE +#undef SQ_WATCH_STRIDE /* kgd_gfx_v10_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 8975cf41a9..48ad0c04aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, return -EINVAL; vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id); - if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) { + if (adev->flags & AMD_IS_APU) { system_mem_needed = size; ttm_mem_needed = size; } @@ -233,7 +233,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, if (adev && xcp_id >= 0) { adev->kfd.vram_used[xcp_id] += vram_needed; adev->kfd.vram_used_aligned[xcp_id] += - (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ? + (adev->flags & AMD_IS_APU) ? vram_needed : ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN); } @@ -261,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, if (adev) { adev->kfd.vram_used[xcp_id] -= size; - if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) { + if (adev->flags & AMD_IS_APU) { adev->kfd.vram_used_aligned[xcp_id] -= size; kfd_mem_limit.system_mem_used -= size; kfd_mem_limit.ttm_mem_used -= size; @@ -890,7 +890,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, * if peer device has large BAR. In contrast, access over xGMI is * allowed for both small and large BAR configurations of peer device */ - if ((adev != bo_adev && !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)) && + if ((adev != bo_adev && !(adev->flags & AMD_IS_APU)) && ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) || (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) || (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { @@ -1658,7 +1658,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev, - atomic64_read(&adev->vram_pin_size) - reserved_for_pt; - if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) { + if (adev->flags & AMD_IS_APU) { system_mem_available = no_system_mem_limit ? kfd_mem_limit.max_system_mem_limit : kfd_mem_limit.max_system_mem_limit - @@ -1706,7 +1706,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; - if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) { + if (adev->flags & AMD_IS_APU) { domain = AMDGPU_GEM_DOMAIN_GTT; alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; @@ -1953,7 +1953,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( if (size) { if (!is_imported && (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM || - ((adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) && + ((adev->flags & AMD_IS_APU) && mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT))) *size = bo_size; else @@ -2376,7 +2376,7 @@ static int import_obj_create(struct amdgpu_device *adev, (*mem)->bo = bo; (*mem)->va = va; (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && - !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ? + !(adev->flags & AMD_IS_APU) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; (*mem)->mapped_to_gpu_memory = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 12b4885182..2e13c7c4b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -34,6 +34,7 @@ union firmware_info { struct atom_firmware_info_v3_2 v32; struct atom_firmware_info_v3_3 v33; struct atom_firmware_info_v3_4 v34; + struct atom_firmware_info_v3_5 v35; }; /* @@ -887,6 +888,10 @@ int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev) fw_reserved_fb_size = (firmware_info->v34.fw_reserved_size_in_kb << 10); break; + case 5: + fw_reserved_fb_size = + (firmware_info->v35.fw_reserved_size_in_kb << 10); + break; default: fw_reserved_fb_size = 0; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index edc6377ec5..199693369c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -39,7 +39,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, for (i = 0; i < n; i++) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence, - false, false, false); + false, false, 0); if (r) goto exit_do_move; r = dma_fence_wait(fence, false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c new file mode 100644 index 0000000000..c1cb626836 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c @@ -0,0 +1,360 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include "amdgpu_dev_coredump.h" +#include "atom.h" + +#ifndef CONFIG_DEV_COREDUMP +void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, + struct amdgpu_reset_context *reset_context) +{ +} +#else + +const char *hw_ip_names[MAX_HWIP] = { + [GC_HWIP] = "GC", + [HDP_HWIP] = "HDP", + [SDMA0_HWIP] = "SDMA0", + [SDMA1_HWIP] = "SDMA1", + [SDMA2_HWIP] = "SDMA2", + [SDMA3_HWIP] = "SDMA3", + [SDMA4_HWIP] = "SDMA4", + [SDMA5_HWIP] = "SDMA5", + [SDMA6_HWIP] = "SDMA6", + [SDMA7_HWIP] = "SDMA7", + [LSDMA_HWIP] = "LSDMA", + [MMHUB_HWIP] = "MMHUB", + [ATHUB_HWIP] = "ATHUB", + [NBIO_HWIP] = "NBIO", + [MP0_HWIP] = "MP0", + [MP1_HWIP] = "MP1", + [UVD_HWIP] = "UVD/JPEG/VCN", + [VCN1_HWIP] = "VCN1", + [VCE_HWIP] = "VCE", + [VPE_HWIP] = "VPE", + [DF_HWIP] = "DF", + [DCE_HWIP] = "DCE", + [OSSSYS_HWIP] = "OSSSYS", + [SMUIO_HWIP] = "SMUIO", + [PWR_HWIP] = "PWR", + [NBIF_HWIP] = "NBIF", + [THM_HWIP] = "THM", + [CLK_HWIP] = "CLK", + [UMC_HWIP] = "UMC", + [RSMU_HWIP] = "RSMU", + [XGMI_HWIP] = "XGMI", + [DCI_HWIP] = "DCI", + [PCIE_HWIP] = "PCIE", +}; + +static void amdgpu_devcoredump_fw_info(struct amdgpu_device *adev, + struct drm_printer *p) +{ + uint32_t version; + uint32_t feature; + uint8_t smu_program, smu_major, smu_minor, smu_debug; + struct atom_context *ctx = adev->mode_info.atom_context; + + drm_printf(p, "VCE feature version: %u, fw version: 0x%08x\n", + adev->vce.fb_version, adev->vce.fw_version); + drm_printf(p, "UVD feature version: %u, fw version: 0x%08x\n", 0, + adev->uvd.fw_version); + drm_printf(p, "GMC feature version: %u, fw version: 0x%08x\n", 0, + adev->gmc.fw_version); + drm_printf(p, "ME feature version: %u, fw version: 0x%08x\n", + adev->gfx.me_feature_version, adev->gfx.me_fw_version); + drm_printf(p, "PFP feature version: %u, fw version: 0x%08x\n", + adev->gfx.pfp_feature_version, adev->gfx.pfp_fw_version); + drm_printf(p, "CE feature version: %u, fw version: 0x%08x\n", + adev->gfx.ce_feature_version, adev->gfx.ce_fw_version); + drm_printf(p, "RLC feature version: %u, fw version: 0x%08x\n", + adev->gfx.rlc_feature_version, adev->gfx.rlc_fw_version); + + drm_printf(p, "RLC SRLC feature version: %u, fw version: 0x%08x\n", + adev->gfx.rlc_srlc_feature_version, + adev->gfx.rlc_srlc_fw_version); + drm_printf(p, "RLC SRLG feature version: %u, fw version: 0x%08x\n", + adev->gfx.rlc_srlg_feature_version, + adev->gfx.rlc_srlg_fw_version); + drm_printf(p, "RLC SRLS feature version: %u, fw version: 0x%08x\n", + adev->gfx.rlc_srls_feature_version, + adev->gfx.rlc_srls_fw_version); + drm_printf(p, "RLCP feature version: %u, fw version: 0x%08x\n", + adev->gfx.rlcp_ucode_feature_version, + adev->gfx.rlcp_ucode_version); + drm_printf(p, "RLCV feature version: %u, fw version: 0x%08x\n", + adev->gfx.rlcv_ucode_feature_version, + adev->gfx.rlcv_ucode_version); + drm_printf(p, "MEC feature version: %u, fw version: 0x%08x\n", + adev->gfx.mec_feature_version, adev->gfx.mec_fw_version); + + if (adev->gfx.mec2_fw) + drm_printf(p, "MEC2 feature version: %u, fw version: 0x%08x\n", + adev->gfx.mec2_feature_version, + adev->gfx.mec2_fw_version); + + drm_printf(p, "IMU feature version: %u, fw version: 0x%08x\n", 0, + adev->gfx.imu_fw_version); + drm_printf(p, "PSP SOS feature version: %u, fw version: 0x%08x\n", + adev->psp.sos.feature_version, adev->psp.sos.fw_version); + drm_printf(p, "PSP ASD feature version: %u, fw version: 0x%08x\n", + adev->psp.asd_context.bin_desc.feature_version, + adev->psp.asd_context.bin_desc.fw_version); + + drm_printf(p, "TA XGMI feature version: 0x%08x, fw version: 0x%08x\n", + adev->psp.xgmi_context.context.bin_desc.feature_version, + adev->psp.xgmi_context.context.bin_desc.fw_version); + drm_printf(p, "TA RAS feature version: 0x%08x, fw version: 0x%08x\n", + adev->psp.ras_context.context.bin_desc.feature_version, + adev->psp.ras_context.context.bin_desc.fw_version); + drm_printf(p, "TA HDCP feature version: 0x%08x, fw version: 0x%08x\n", + adev->psp.hdcp_context.context.bin_desc.feature_version, + adev->psp.hdcp_context.context.bin_desc.fw_version); + drm_printf(p, "TA DTM feature version: 0x%08x, fw version: 0x%08x\n", + adev->psp.dtm_context.context.bin_desc.feature_version, + adev->psp.dtm_context.context.bin_desc.fw_version); + drm_printf(p, "TA RAP feature version: 0x%08x, fw version: 0x%08x\n", + adev->psp.rap_context.context.bin_desc.feature_version, + adev->psp.rap_context.context.bin_desc.fw_version); + drm_printf(p, + "TA SECURE DISPLAY feature version: 0x%08x, fw version: 0x%08x\n", + adev->psp.securedisplay_context.context.bin_desc.feature_version, + adev->psp.securedisplay_context.context.bin_desc.fw_version); + + /* SMC firmware */ + version = adev->pm.fw_version; + + smu_program = (version >> 24) & 0xff; + smu_major = (version >> 16) & 0xff; + smu_minor = (version >> 8) & 0xff; + smu_debug = (version >> 0) & 0xff; + drm_printf(p, + "SMC feature version: %u, program: %d, fw version: 0x%08x (%d.%d.%d)\n", + 0, smu_program, version, smu_major, smu_minor, smu_debug); + + /* SDMA firmware */ + for (int i = 0; i < adev->sdma.num_instances; i++) { + drm_printf(p, + "SDMA%d feature version: %u, firmware version: 0x%08x\n", + i, adev->sdma.instance[i].feature_version, + adev->sdma.instance[i].fw_version); + } + + drm_printf(p, "VCN feature version: %u, fw version: 0x%08x\n", 0, + adev->vcn.fw_version); + drm_printf(p, "DMCU feature version: %u, fw version: 0x%08x\n", 0, + adev->dm.dmcu_fw_version); + drm_printf(p, "DMCUB feature version: %u, fw version: 0x%08x\n", 0, + adev->dm.dmcub_fw_version); + drm_printf(p, "PSP TOC feature version: %u, fw version: 0x%08x\n", + adev->psp.toc.feature_version, adev->psp.toc.fw_version); + + version = adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK; + feature = (adev->mes.kiq_version & AMDGPU_MES_FEAT_VERSION_MASK) >> + AMDGPU_MES_FEAT_VERSION_SHIFT; + drm_printf(p, "MES_KIQ feature version: %u, fw version: 0x%08x\n", + feature, version); + + version = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; + feature = (adev->mes.sched_version & AMDGPU_MES_FEAT_VERSION_MASK) >> + AMDGPU_MES_FEAT_VERSION_SHIFT; + drm_printf(p, "MES feature version: %u, fw version: 0x%08x\n", feature, + version); + + drm_printf(p, "VPE feature version: %u, fw version: 0x%08x\n", + adev->vpe.feature_version, adev->vpe.fw_version); + + drm_printf(p, "\nVBIOS Information\n"); + drm_printf(p, "vbios name : %s\n", ctx->name); + drm_printf(p, "vbios pn : %s\n", ctx->vbios_pn); + drm_printf(p, "vbios version : %d\n", ctx->version); + drm_printf(p, "vbios ver_str : %s\n", ctx->vbios_ver_str); + drm_printf(p, "vbios date : %s\n", ctx->date); +} + +static ssize_t +amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, + void *data, size_t datalen) +{ + struct drm_printer p; + struct amdgpu_coredump_info *coredump = data; + struct drm_print_iterator iter; + struct amdgpu_vm_fault_info *fault_info; + int i, ver; + + iter.data = buffer; + iter.offset = 0; + iter.start = offset; + iter.remain = count; + + p = drm_coredump_printer(&iter); + + drm_printf(&p, "**** AMDGPU Device Coredump ****\n"); + drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n"); + drm_printf(&p, "kernel: " UTS_RELEASE "\n"); + drm_printf(&p, "module: " KBUILD_MODNAME "\n"); + drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec, + coredump->reset_time.tv_nsec); + + if (coredump->reset_task_info.pid) + drm_printf(&p, "process_name: %s PID: %d\n", + coredump->reset_task_info.process_name, + coredump->reset_task_info.pid); + + /* GPU IP's information of the SOC */ + drm_printf(&p, "\nIP Information\n"); + drm_printf(&p, "SOC Family: %d\n", coredump->adev->family); + drm_printf(&p, "SOC Revision id: %d\n", coredump->adev->rev_id); + drm_printf(&p, "SOC External Revision id: %d\n", coredump->adev->external_rev_id); + + for (int i = 1; i < MAX_HWIP; i++) { + for (int j = 0; j < HWIP_MAX_INSTANCE; j++) { + ver = coredump->adev->ip_versions[i][j]; + if (ver) + drm_printf(&p, "HWIP: %s[%d][%d]: v%d.%d.%d.%d.%d\n", + hw_ip_names[i], i, j, + IP_VERSION_MAJ(ver), + IP_VERSION_MIN(ver), + IP_VERSION_REV(ver), + IP_VERSION_VARIANT(ver), + IP_VERSION_SUBREV(ver)); + } + } + + /* IP firmware information */ + drm_printf(&p, "\nIP Firmwares\n"); + amdgpu_devcoredump_fw_info(coredump->adev, &p); + + if (coredump->ring) { + drm_printf(&p, "\nRing timed out details\n"); + drm_printf(&p, "IP Type: %d Ring Name: %s\n", + coredump->ring->funcs->type, + coredump->ring->name); + } + + /* Add page fault information */ + fault_info = &coredump->adev->vm_manager.fault_info; + drm_printf(&p, "\n[%s] Page fault observed\n", + fault_info->vmhub ? "mmhub" : "gfxhub"); + drm_printf(&p, "Faulty page starting at address: 0x%016llx\n", fault_info->addr); + drm_printf(&p, "Protection fault status register: 0x%x\n\n", fault_info->status); + + /* dump the ip state for each ip */ + drm_printf(&p, "IP Dump\n"); + for (int i = 0; i < coredump->adev->num_ip_blocks; i++) { + if (coredump->adev->ip_blocks[i].version->funcs->print_ip_state) { + drm_printf(&p, "IP: %s\n", + coredump->adev->ip_blocks[i] + .version->funcs->name); + coredump->adev->ip_blocks[i] + .version->funcs->print_ip_state( + (void *)coredump->adev, &p); + drm_printf(&p, "\n"); + } + } + + /* Add ring buffer information */ + drm_printf(&p, "Ring buffer information\n"); + for (int i = 0; i < coredump->adev->num_rings; i++) { + int j = 0; + struct amdgpu_ring *ring = coredump->adev->rings[i]; + + drm_printf(&p, "ring name: %s\n", ring->name); + drm_printf(&p, "Rptr: 0x%llx Wptr: 0x%llx RB mask: %x\n", + amdgpu_ring_get_rptr(ring), + amdgpu_ring_get_wptr(ring), + ring->buf_mask); + drm_printf(&p, "Ring size in dwords: %d\n", + ring->ring_size / 4); + drm_printf(&p, "Ring contents\n"); + drm_printf(&p, "Offset \t Value\n"); + + while (j < ring->ring_size) { + drm_printf(&p, "0x%x \t 0x%x\n", j, ring->ring[j / 4]); + j += 4; + } + } + + if (coredump->reset_vram_lost) + drm_printf(&p, "VRAM is lost due to GPU reset!\n"); + if (coredump->adev->reset_info.num_regs) { + drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n"); + + for (i = 0; i < coredump->adev->reset_info.num_regs; i++) + drm_printf(&p, "0x%08x: 0x%08x\n", + coredump->adev->reset_info.reset_dump_reg_list[i], + coredump->adev->reset_info.reset_dump_reg_value[i]); + } + + return count - iter.remain; +} + +static void amdgpu_devcoredump_free(void *data) +{ + kfree(data); +} + +void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_coredump_info *coredump; + struct drm_device *dev = adev_to_drm(adev); + struct amdgpu_job *job = reset_context->job; + struct drm_sched_job *s_job; + + coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT); + + if (!coredump) { + DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__); + return; + } + + coredump->reset_vram_lost = vram_lost; + + if (reset_context->job && reset_context->job->vm) { + struct amdgpu_task_info *ti; + struct amdgpu_vm *vm = reset_context->job->vm; + + ti = amdgpu_vm_get_task_info_vm(vm); + if (ti) { + coredump->reset_task_info = *ti; + amdgpu_vm_put_task_info(ti); + } + } + + if (job) { + s_job = &job->base; + coredump->ring = to_amdgpu_ring(s_job->sched); + } + + coredump->adev = adev; + + ktime_get_ts64(&coredump->reset_time); + + dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT, + amdgpu_devcoredump_read, amdgpu_devcoredump_free); +} +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h new file mode 100644 index 0000000000..52459512cb --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_DEV_COREDUMP_H__ +#define __AMDGPU_DEV_COREDUMP_H__ + +#include "amdgpu.h" +#include "amdgpu_reset.h" + +#ifdef CONFIG_DEV_COREDUMP + +#define AMDGPU_COREDUMP_VERSION "1" + +struct amdgpu_coredump_info { + struct amdgpu_device *adev; + struct amdgpu_task_info reset_task_info; + struct timespec64 reset_time; + bool reset_vram_lost; + struct amdgpu_ring *ring; +}; +#endif + +void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, + struct amdgpu_reset_context *reset_context); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index eb8af02332..ee7df1d84e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -74,6 +74,7 @@ #include "amdgpu_fru_eeprom.h" #include "amdgpu_reset.h" #include "amdgpu_virt.h" +#include "amdgpu_dev_coredump.h" #include #include @@ -143,6 +144,8 @@ const char *amdgpu_asic_name[] = { "LAST", }; +static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev); + /** * DOC: pcie_replay_count * @@ -335,16 +338,93 @@ bool amdgpu_device_supports_boco(struct drm_device *dev) * * @dev: drm_device pointer * - * Returns true if the device supporte BACO, - * otherwise return false. + * Return: + * 1 if the device supporte BACO; + * 3 if the device support MACO (only works if BACO is supported) + * otherwise return 0. */ -bool amdgpu_device_supports_baco(struct drm_device *dev) +int amdgpu_device_supports_baco(struct drm_device *dev) { struct amdgpu_device *adev = drm_to_adev(dev); return amdgpu_asic_supports_baco(adev); } +void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev) +{ + struct drm_device *dev; + int bamaco_support; + + dev = adev_to_drm(adev); + + adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; + bamaco_support = amdgpu_device_supports_baco(dev); + + switch (amdgpu_runtime_pm) { + case 2: + if (bamaco_support & MACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; + dev_info(adev->dev, "Forcing BAMACO for runtime pm\n"); + } else if (bamaco_support == BACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n"); + } + break; + case 1: + if (bamaco_support & BACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + dev_info(adev->dev, "Forcing BACO for runtime pm\n"); + } + break; + case -1: + case -2: + if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */ + adev->pm.rpm_mode = AMDGPU_RUNPM_PX; + dev_info(adev->dev, "Using ATPX for runtime pm\n"); + } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */ + adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; + dev_info(adev->dev, "Using BOCO for runtime pm\n"); + } else { + if (!bamaco_support) + goto no_runtime_pm; + + switch (adev->asic_type) { + case CHIP_VEGA20: + case CHIP_ARCTURUS: + /* BACO are not supported on vega20 and arctrus */ + break; + case CHIP_VEGA10: + /* enable BACO as runpm mode if noretry=0 */ + if (!adev->gmc.noretry) + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + break; + default: + /* enable BACO as runpm mode on CI+ */ + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + break; + } + + if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { + if (bamaco_support & MACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; + dev_info(adev->dev, "Using BAMACO for runtime pm\n"); + } else { + dev_info(adev->dev, "Using BACO for runtime pm\n"); + } + } + } + break; + case 0: + dev_info(adev->dev, "runtime pm is manually disabled\n"); + break; + default: + break; + } + +no_runtime_pm: + if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) + dev_info(adev->dev, "Runtime PM not available\n"); +} /** * amdgpu_device_supports_smart_shift - Is the device dGPU with * smart shift support @@ -1402,13 +1482,17 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev) */ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) { - unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); + unsigned long flags, offset; + spin_lock_irqsave(&adev->wb.lock, flags); + offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); if (offset < adev->wb.num_wb) { __set_bit(offset, adev->wb.used); + spin_unlock_irqrestore(&adev->wb.lock, flags); *wb = offset << 3; /* convert to dw offset */ return 0; } else { + spin_unlock_irqrestore(&adev->wb.lock, flags); return -EINVAL; } } @@ -1423,9 +1507,13 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) */ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) { + unsigned long flags; + wb >>= 3; + spin_lock_irqsave(&adev->wb.lock, flags); if (wb < adev->wb.num_wb) __clear_bit(wb, adev->wb.used); + spin_unlock_irqrestore(&adev->wb.lock, flags); } /** @@ -1455,7 +1543,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */ if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) - DRM_WARN("System can't access extended configuration space,please check!!\n"); + DRM_WARN("System can't access extended configuration space, please check!!\n"); /* skip if the bios has already enabled large BAR */ if (adev->gmc.real_vram_size && @@ -3981,6 +4069,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->se_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); spin_lock_init(&adev->mm_stats.lock); + spin_lock_init(&adev->wb.lock); INIT_LIST_HEAD(&adev->shadow_list); mutex_init(&adev->shadow_list_lock); @@ -4069,6 +4158,13 @@ int amdgpu_device_init(struct amdgpu_device *adev, /* Enable TMZ based on IP_VERSION */ amdgpu_gmc_tmz_set(adev); + if (amdgpu_sriov_vf(adev) && + amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) + /* VF MMIO access (except mailbox range) from CPU + * will be blocked during sriov runtime + */ + adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; + amdgpu_gmc_noretry_set(adev); /* Need to get xgmi info early to decide the reset behavior*/ if (adev->gmc.xgmi.supported) { @@ -4974,12 +5070,15 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, retry: amdgpu_amdkfd_pre_reset(adev); + amdgpu_device_stop_pending_resets(adev); + if (from_hypervisor) r = amdgpu_virt_request_full_gpu(adev, true); else r = amdgpu_virt_reset_gpu(adev); if (r) return r; + amdgpu_ras_set_fed(adev, false); amdgpu_irq_gpu_reset_resume_helper(adev); /* some sw clean up VF needs to do before recover */ @@ -5266,11 +5365,21 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, struct amdgpu_device *tmp_adev = NULL; bool need_full_reset, skip_hw_reset, vram_lost = false; int r = 0; + uint32_t i; /* Try reset handler method first */ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, reset_list); - amdgpu_reset_reg_dumps(tmp_adev); + + if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) { + amdgpu_reset_reg_dumps(tmp_adev); + + /* Trigger ip dump before we reset the asic */ + for (i = 0; i < tmp_adev->num_ip_blocks; i++) + if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state) + tmp_adev->ip_blocks[i].version->funcs + ->dump_ip_state((void *)tmp_adev); + } reset_context->reset_device_list = device_list_handle; r = amdgpu_reset_perform_reset(tmp_adev, reset_context); @@ -5343,7 +5452,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, vram_lost = amdgpu_device_check_vram_lost(tmp_adev); - amdgpu_coredump(tmp_adev, vram_lost, reset_context); + if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) + amdgpu_coredump(tmp_adev, vram_lost, reset_context); if (vram_lost) { DRM_INFO("VRAM is lost due to GPU reset!\n"); @@ -5541,6 +5651,23 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) } +static int amdgpu_device_health_check(struct list_head *device_list_handle) +{ + struct amdgpu_device *tmp_adev; + int ret = 0; + u32 status; + + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status); + if (PCI_POSSIBLE_ERROR(status)) { + dev_err(tmp_adev->dev, "device lost from bus!"); + ret = -ENODEV; + } + } + + return ret; +} + /** * amdgpu_device_gpu_recover - reset the asic and recover scheduler * @@ -5612,6 +5739,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, device_list_handle = &device_list; } + if (!amdgpu_sriov_vf(adev)) { + r = amdgpu_device_health_check(device_list_handle); + if (r) + goto end_reset; + } + /* We need to lock reset domain only once both for XGMI and single device */ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, reset_list); @@ -5694,11 +5827,12 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ tmp_adev->asic_reset_res = r; } - /* - * Drop all pending non scheduler resets. Scheduler resets - * were already dropped during drm_sched_stop - */ - amdgpu_device_stop_pending_resets(tmp_adev); + if (!amdgpu_sriov_vf(tmp_adev)) + /* + * Drop all pending non scheduler resets. Scheduler resets + * were already dropped during drm_sched_stop + */ + amdgpu_device_stop_pending_resets(tmp_adev); } /* Actual ASIC resets if needed.*/ @@ -5777,6 +5911,7 @@ skip_sched_resume: reset_list); amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); +end_reset: if (hive) { mutex_unlock(&hive->hive_lock); amdgpu_put_xgmi_hive(hive); @@ -6038,7 +6173,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev) adev->nbio.funcs->enable_doorbell_interrupt) adev->nbio.funcs->enable_doorbell_interrupt(adev, true); - if (amdgpu_passthrough(adev) && + if (amdgpu_passthrough(adev) && adev->nbio.funcs && adev->nbio.funcs->clear_doorbell_interrupt) adev->nbio.funcs->clear_doorbell_interrupt(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index ac5bf01fe8..0e31bdb4b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -97,6 +97,7 @@ #include "smuio_v13_0.h" #include "smuio_v13_0_3.h" #include "smuio_v13_0_6.h" +#include "smuio_v14_0_2.h" #include "vcn_v5_0_0.h" #include "jpeg_v5_0_0.h" @@ -245,6 +246,9 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, return -ENOENT; } +#define IP_DISCOVERY_V2 2 +#define IP_DISCOVERY_V4 4 + static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, uint8_t *binary) { @@ -259,14 +263,14 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, * wait for this to complete. Once the C2PMSG is updated, we can * continue. */ - if (dev_is_removable(&adev->pdev->dev)) { - for (i = 0; i < 1000; i++) { - msg = RREG32(mmMP0_SMN_C2PMSG_33); - if (msg & 0x80000000) - break; - msleep(1); - } + + for (i = 0; i < 1000; i++) { + msg = RREG32(mmMP0_SMN_C2PMSG_33); + if (msg & 0x80000000) + break; + usleep_range(1000, 1100); } + vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; if (vram_size) { @@ -1897,6 +1901,8 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) break; case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 1): + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block); break; default: @@ -2678,6 +2684,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(14, 0, 1): adev->smuio.funcs = &smuio_v13_0_6_funcs; break; + case IP_VERSION(14, 0, 2): + adev->smuio.funcs = &smuio_v14_0_2_funcs; + break; default: break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 055ba2ea4c..662d0f28f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -41,8 +41,6 @@ #include #include #include -#include -#include "amdgpu_trace.h" /** * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation @@ -58,42 +56,11 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, struct drm_gem_object *obj = dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - int r; if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) attach->peer2peer = false; - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - trace_amdgpu_runpm_reference_dumps(1, __func__); - if (r < 0) - goto out; - return 0; - -out: - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - trace_amdgpu_runpm_reference_dumps(0, __func__); - return r; -} - -/** - * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation - * - * @dmabuf: DMA-buf where we remove the attachment from - * @attach: the attachment to remove - * - * Called when an attachment is removed from the DMA-buf. - */ -static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, - struct dma_buf_attachment *attach) -{ - struct drm_gem_object *obj = dmabuf->priv; - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - trace_amdgpu_runpm_reference_dumps(0, __func__); } /** @@ -267,7 +234,6 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, const struct dma_buf_ops amdgpu_dmabuf_ops = { .attach = amdgpu_dma_buf_attach, - .detach = amdgpu_dma_buf_detach, .pin = amdgpu_dma_buf_pin, .unpin = amdgpu_dma_buf_unpin, .map_dma_buf = amdgpu_dma_buf_map, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e4277298cf..ea14f1c8f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -925,7 +925,7 @@ module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444); * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco) */ MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)"); -module_param_named(reset_method, amdgpu_reset_method, int, 0444); +module_param_named(reset_method, amdgpu_reset_method, int, 0644); /** * DOC: bad_page_threshold (int) Bad page threshold is specifies the @@ -2481,6 +2481,7 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) /* Use a common context, just need to make sure full reset is done */ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); + set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); r = amdgpu_do_asic_reset(&device_list, &reset_context); if (r) { @@ -2744,7 +2745,8 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) { /* nothing to do */ - } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { + } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) { amdgpu_device_baco_enter(drm_dev); } @@ -2784,7 +2786,8 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) * PCI core handles it for _PR3. */ pci_set_master(pdev); - } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { + } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) { amdgpu_device_baco_exit(drm_dev); } ret = amdgpu_device_resume(drm_dev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 10832b4704..bc3ac73b6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -181,7 +181,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); pm_runtime_get_noresume(adev_to_drm(adev)->dev); - trace_amdgpu_runpm_reference_dumps(1, __func__); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; if (unlikely(rcu_dereference_protected(*ptr, 1))) { struct dma_fence *old; @@ -309,7 +308,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) dma_fence_put(fence); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - trace_amdgpu_runpm_reference_dumps(0, __func__); } while (last_seq != seq); return true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 67c234bcf8..3adaa46701 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -108,6 +108,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, memset(&bp, 0, sizeof(bp)); *obj = NULL; + flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; bp.size = size; bp.byte_align = alignment; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 1d955652f3..e92bdc9a39 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -329,8 +329,9 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id) ring->eop_gpu_addr = kiq->eop_gpu_addr; ring->no_scheduler = true; - snprintf(ring->name, sizeof(ring->name), "kiq_%d.%d.%d.%d", - xcc_id, ring->me, ring->pipe, ring->queue); + snprintf(ring->name, sizeof(ring->name), "kiq_%hhu.%hhu.%hhu.%hhu", + (unsigned char)xcc_id, (unsigned char)ring->me, + (unsigned char)ring->pipe, (unsigned char)ring->queue); r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0, AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 8fcf889ddc..64f197bbc8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -259,7 +259,6 @@ struct amdgpu_cu_info { struct amdgpu_gfx_ras { struct amdgpu_ras_block_object ras_block; void (*enable_watchdog_timer)(struct amdgpu_device *adev); - bool (*query_utcl2_poison_status)(struct amdgpu_device *adev); int (*rlc_gc_fed_irq)(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); @@ -434,6 +433,10 @@ struct amdgpu_gfx { uint32_t num_xcc_per_xcp; struct mutex partition_mutex; bool mcbp; /* mid command buffer preemption */ + + /* IP reg dump */ + uint32_t *ip_dump; + uint32_t reg_count; }; struct amdgpu_gfx_ras_reg_entry { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h index c7b44aeb67..103a837ccc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h @@ -38,6 +38,8 @@ struct amdgpu_gfxhub_funcs { void (*mode2_save_regs)(struct amdgpu_device *adev); void (*mode2_restore_regs)(struct amdgpu_device *adev); void (*halt)(struct amdgpu_device *adev); + bool (*query_utcl2_poison_status)(struct amdgpu_device *adev, + int xcc_id); }; struct amdgpu_gfxhub { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 08b9dfb653..86b096ad03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -878,7 +878,6 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev) struct amdgpu_gmc *gmc = &adev->gmc; uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) || - gc_ver == IP_VERSION(9, 3, 0) || gc_ver == IP_VERSION(9, 4, 0) || gc_ver == IP_VERSION(9, 4, 1) || gc_ver == IP_VERSION(9, 4, 2) || diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index d79cb13e1a..00d6211e0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -279,7 +279,7 @@ amdgpu_i2c_lookup(struct amdgpu_device *adev, return NULL; } -static void amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus, +static int amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus, u8 slave_addr, u8 addr, u8 *val) @@ -304,16 +304,18 @@ static void amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus, out_buf[0] = addr; out_buf[1] = 0; - if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) { - *val = in_buf[0]; - DRM_DEBUG("val = 0x%02x\n", *val); - } else { - DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n", - addr, *val); + if (i2c_transfer(&i2c_bus->adapter, msgs, 2) != 2) { + DRM_DEBUG("i2c 0x%02x read failed\n", addr); + return -EIO; } + + *val = in_buf[0]; + DRM_DEBUG("val = 0x%02x\n", *val); + + return 0; } -static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus, +static int amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus, u8 slave_addr, u8 addr, u8 val) @@ -329,9 +331,12 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus, out_buf[0] = addr; out_buf[1] = val; - if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) - DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n", - addr, val); + if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) { + DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n", addr, val); + return -EIO; + } + + return 0; } /* ddc router switching */ @@ -346,16 +351,18 @@ amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connecto if (!amdgpu_connector->router_bus) return; - amdgpu_i2c_get_byte(amdgpu_connector->router_bus, + if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus, amdgpu_connector->router.i2c_addr, - 0x3, &val); + 0x3, &val)) + return; val &= ~amdgpu_connector->router.ddc_mux_control_pin; amdgpu_i2c_put_byte(amdgpu_connector->router_bus, amdgpu_connector->router.i2c_addr, 0x3, val); - amdgpu_i2c_get_byte(amdgpu_connector->router_bus, + if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus, amdgpu_connector->router.i2c_addr, - 0x1, &val); + 0x1, &val)) + return; val &= ~amdgpu_connector->router.ddc_mux_control_pin; val |= amdgpu_connector->router.ddc_mux_state; amdgpu_i2c_put_byte(amdgpu_connector->router_bus, @@ -375,16 +382,18 @@ amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *amdgpu_connector if (!amdgpu_connector->router_bus) return; - amdgpu_i2c_get_byte(amdgpu_connector->router_bus, + if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus, amdgpu_connector->router.i2c_addr, - 0x3, &val); + 0x3, &val)) + return; val &= ~amdgpu_connector->router.cd_mux_control_pin; amdgpu_i2c_put_byte(amdgpu_connector->router_bus, amdgpu_connector->router.i2c_addr, 0x3, val); - amdgpu_i2c_get_byte(amdgpu_connector->router_bus, + if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus, amdgpu_connector->router.i2c_addr, - 0x1, &val); + 0x1, &val)) + return; val &= ~amdgpu_connector->router.cd_mux_control_pin; val |= amdgpu_connector->router.cd_mux_state; amdgpu_i2c_put_byte(amdgpu_connector->router_bus, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 665c63f552..013ff373e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -279,7 +279,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev) adev->irq.msi_enabled = false; if (!amdgpu_msi_ok(adev)) - flags = PCI_IRQ_LEGACY; + flags = PCI_IRQ_INTX; else flags = PCI_IRQ_ALL_TYPES; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index a2df3025a7..a0ea6fe8d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -149,38 +149,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) goto out; } - adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; - if (amdgpu_device_supports_px(dev) && - (amdgpu_runtime_pm != 0)) { /* enable PX as runtime mode */ - adev->pm.rpm_mode = AMDGPU_RUNPM_PX; - dev_info(adev->dev, "Using ATPX for runtime pm\n"); - } else if (amdgpu_device_supports_boco(dev) && - (amdgpu_runtime_pm != 0)) { /* enable boco as runtime mode */ - adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; - dev_info(adev->dev, "Using BOCO for runtime pm\n"); - } else if (amdgpu_device_supports_baco(dev) && - (amdgpu_runtime_pm != 0)) { - switch (adev->asic_type) { - case CHIP_VEGA20: - case CHIP_ARCTURUS: - /* enable BACO as runpm mode if runpm=1 */ - if (amdgpu_runtime_pm > 0) - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; - break; - case CHIP_VEGA10: - /* enable BACO as runpm mode if noretry=0 */ - if (!adev->gmc.noretry) - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; - break; - default: - /* enable BACO as runpm mode on CI+ */ - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; - break; - } - - if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) - dev_info(adev->dev, "Using BACO for runtime pm\n"); - } + amdgpu_device_detect_runtime_pm_mode(adev); /* Call ACPI methods: require modeset init * but failure is not fatal diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 24ad4b9717..0734490347 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -210,22 +210,26 @@ int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) return -EOPNOTSUPP; } -static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry) +static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry, + struct ras_query_context *qctx) { - dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n"); - dev_info(adev->dev, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n", - idx, entry->regs[MCA_REG_IDX_STATUS]); - dev_info(adev->dev, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n", - idx, entry->regs[MCA_REG_IDX_ADDR]); - dev_info(adev->dev, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n", - idx, entry->regs[MCA_REG_IDX_MISC0]); - dev_info(adev->dev, HW_ERR "aca entry[%02d].IPID=0x%016llx\n", - idx, entry->regs[MCA_REG_IDX_IPID]); - dev_info(adev->dev, HW_ERR "aca entry[%02d].SYND=0x%016llx\n", - idx, entry->regs[MCA_REG_IDX_SYND]); + u64 event_id = qctx->event_id; + + RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n"); + RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_STATUS]); + RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_ADDR]); + RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_MISC0]); + RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].IPID=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_IPID]); + RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].SYND=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_SYND]); } -int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data) +int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, + struct ras_err_data *err_data, struct ras_query_context *qctx) { struct amdgpu_smuio_mcm_config_info mcm_info; struct ras_err_addr err_addr = {0}; @@ -244,7 +248,7 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo list_for_each_entry(node, &mca_set.list, node) { entry = &node->entry; - amdgpu_mca_smu_mca_bank_dump(adev, i++, entry); + amdgpu_mca_smu_mca_bank_dump(adev, i++, entry, qctx); count = 0; ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h index b964110ed1..e5bf07ce34 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -169,6 +169,7 @@ void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set); int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry); void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set); -int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data); +int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, + struct ras_err_data *err_data, struct ras_query_context *qctx); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 1569bef030..5ca5c47ab5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -32,6 +32,18 @@ #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 #define AMDGPU_ONE_DOORBELL_SIZE 8 +signed long amdgpu_mes_fence_wait_polling(u64 *fence, + u64 wait_seq, + signed long timeout) +{ + + while ((s64)(wait_seq - *fence) > 0 && timeout > 0) { + udelay(2); + timeout -= 2; + } + return timeout > 0 ? timeout : 0; +} + int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev) { return roundup(AMDGPU_ONE_DOORBELL_SIZE * @@ -40,7 +52,6 @@ int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev) } static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev, - struct amdgpu_mes_process *process, int ip_type, uint64_t *doorbell_index) { unsigned int offset, found; @@ -65,7 +76,6 @@ static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev, } static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev, - struct amdgpu_mes_process *process, uint32_t doorbell_index) { unsigned int old, rel_index; @@ -656,7 +666,7 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, *queue_id = queue->queue_id = r; /* allocate a doorbell index for the queue */ - r = amdgpu_mes_kernel_doorbell_get(adev, gang->process, + r = amdgpu_mes_kernel_doorbell_get(adev, qprops->queue_type, &qprops->doorbell_off); if (r) @@ -714,8 +724,7 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, return 0; clean_up_doorbell: - amdgpu_mes_kernel_doorbell_free(adev, gang->process, - qprops->doorbell_off); + amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off); clean_up_queue_id: spin_lock_irqsave(&adev->mes.queue_id_lock, flags); idr_remove(&adev->mes.queue_id_idr, queue->queue_id); @@ -769,8 +778,7 @@ int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id) queue_id); list_del(&queue->list); - amdgpu_mes_kernel_doorbell_free(adev, gang->process, - queue->doorbell_off); + amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off); amdgpu_mes_unlock(&adev->mes); amdgpu_mes_queue_free_mqd(queue); @@ -778,6 +786,28 @@ int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id) return 0; } +int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + struct mes_map_legacy_queue_input queue_input; + int r; + + memset(&queue_input, 0, sizeof(queue_input)); + + queue_input.queue_type = ring->funcs->type; + queue_input.doorbell_offset = ring->doorbell_index; + queue_input.pipe_id = ring->pipe; + queue_input.queue_id = ring->queue; + queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); + queue_input.wptr_addr = ring->wptr_gpu_addr; + + r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input); + if (r) + DRM_ERROR("failed to map legacy queue\n"); + + return r; +} + int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring, enum amdgpu_unmap_queues_action action, @@ -1475,7 +1505,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) const struct mes_firmware_header_v1_0 *mes_hdr; struct amdgpu_firmware_info *info; char ucode_prefix[30]; - char fw_name[40]; + char fw_name[50]; bool need_retry = false; int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 4c8fc3117e..df9f0404d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -141,6 +141,12 @@ struct amdgpu_mes { /* ip specific functions */ const struct amdgpu_mes_funcs *funcs; + + /* mes resource_1 bo*/ + struct amdgpu_bo *resource_1; + uint64_t resource_1_gpu_addr; + void *resource_1_addr; + }; struct amdgpu_mes_process { @@ -242,6 +248,15 @@ struct mes_remove_queue_input { uint64_t gang_context_addr; }; +struct mes_map_legacy_queue_input { + uint32_t queue_type; + uint32_t doorbell_offset; + uint32_t pipe_id; + uint32_t queue_id; + uint64_t mqd_addr; + uint64_t wptr_addr; +}; + struct mes_unmap_legacy_queue_input { enum amdgpu_unmap_queues_action action; uint32_t queue_type; @@ -318,6 +333,9 @@ struct amdgpu_mes_funcs { int (*remove_hw_queue)(struct amdgpu_mes *mes, struct mes_remove_queue_input *input); + int (*map_legacy_queue)(struct amdgpu_mes *mes, + struct mes_map_legacy_queue_input *input); + int (*unmap_legacy_queue)(struct amdgpu_mes *mes, struct mes_unmap_legacy_queue_input *input); @@ -334,6 +352,10 @@ struct amdgpu_mes_funcs { #define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev)) #define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev)) +signed long amdgpu_mes_fence_wait_polling(u64 *fence, + u64 wait_seq, + signed long timeout); + int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs); int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe); @@ -357,6 +379,8 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, int *queue_id); int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id); +int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev, + struct amdgpu_ring *ring); int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring, enum amdgpu_unmap_queues_action action, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index 1ca9d4ed80..95d676ee20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -63,6 +63,8 @@ struct amdgpu_mmhub_funcs { uint64_t page_table_base); void (*update_power_gating)(struct amdgpu_device *adev, bool enable); + bool (*query_utcl2_poison_status)(struct amdgpu_device *adev, + int hub_inst); }; struct amdgpu_mmhub { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index f6d503432a..c556c8b653 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -39,6 +39,7 @@ #include "amdgpu.h" #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_vram_mgr.h" /** * DOC: amdgpu_object @@ -153,8 +154,10 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) else places[c].flags |= TTM_PL_FLAG_TOPDOWN; - if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) + if (abo->tbo.type == ttm_bo_type_kernel && + flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) places[c].flags |= TTM_PL_FLAG_CONTIGUOUS; + c++; } @@ -173,6 +176,12 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ? AMDGPU_PL_PREEMPT : TTM_PL_TT; places[c].flags = 0; + /* + * When GTT is just an alternative to VRAM make sure that we + * only use it as fallback and still try to fill up VRAM first. + */ + if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) + places[c].flags |= TTM_PL_FLAG_FALLBACK; c++; } @@ -595,9 +604,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if (!amdgpu_bo_support_uswc(bo->flags)) bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; - if (adev->ras_enabled) - bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; - bo->tbo.bdev = &adev->mman.bdev; if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA | AMDGPU_GEM_DOMAIN_GDS)) @@ -629,7 +635,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, bo->tbo.resource->mem_type == TTM_PL_VRAM) { struct dma_fence *fence; - r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence, true); + r = amdgpu_ttm_clear_buffer(bo, bo->tbo.base.resv, &fence); if (unlikely(r)) goto fail_unreserve; @@ -759,7 +765,7 @@ int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence) return amdgpu_copy_buffer(ring, shadow_addr, parent_addr, amdgpu_bo_size(shadow), NULL, fence, - true, false, false); + true, false, 0); } /** @@ -961,6 +967,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, if (!bo->placements[i].lpfn || (lpfn && lpfn < bo->placements[i].lpfn)) bo->placements[i].lpfn = lpfn; + + if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && + bo->placements[i].mem_type == TTM_PL_VRAM) + bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS; } r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); @@ -1366,8 +1376,9 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) return; - r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence, true); + r = amdgpu_fill_buffer(abo, 0, bo->base.resv, &fence, true); if (!WARN_ON(r)) { + amdgpu_vram_mgr_set_cleared(bo->resource); amdgpu_bo_fence(abo, fence, false); dma_fence_put(fence); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 94b310fdb7..cef9dd0a01 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -640,6 +640,20 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) } } +static bool psp_err_warn(struct psp_context *psp) +{ + struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem; + + /* This response indicates reg list is already loaded */ + if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && + cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW && + cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST && + cmd->resp.status == TEE_ERROR_CANCEL) + return false; + + return true; +} + static int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, @@ -699,10 +713,13 @@ psp_cmd_submit_buf(struct psp_context *psp, dev_warn(psp->adev->dev, "failed to load ucode %s(0x%X) ", amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); - dev_warn(psp->adev->dev, - "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", - psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id, - psp->cmd_buf_mem->resp.status); + if (psp_err_warn(psp)) + dev_warn( + psp->adev->dev, + "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", + psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), + psp->cmd_buf_mem->cmd_id, + psp->cmd_buf_mem->resp.status); /* If any firmware (including CAP) load fails under SRIOV, it should * return failure to stop the VF from initializing. * Also return failure in case of timeout @@ -1053,6 +1070,11 @@ static int psp_asd_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) return 0; + /* bypass asd if display hardware is not available */ + if (!amdgpu_device_has_display_hardware(psp->adev) && + amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10)) + return 0; + psp->asd_context.mem_context.shared_mc_addr = 0; psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; @@ -2260,6 +2282,15 @@ static int psp_hw_start(struct psp_context *psp) } } + if ((is_psp_fw_valid(psp->ipkeymgr_drv)) && + (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) { + ret = psp_bootloader_load_ipkeymgr_drv(psp); + if (ret) { + dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n"); + return ret; + } + } + if ((is_psp_fw_valid(psp->sos)) && (psp->funcs->bootloader_load_sos != NULL)) { ret = psp_bootloader_load_sos(psp); @@ -2617,7 +2648,8 @@ static int psp_load_p2s_table(struct psp_context *psp) struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; - if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)) + if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) return 0; if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) { @@ -2647,7 +2679,8 @@ static int psp_load_smu_fw(struct psp_context *psp) * Skip SMU FW reloading in case of using BACO for runpm only, * as SMU is always alive. */ - if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)) + if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) return 0; if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) @@ -3273,6 +3306,12 @@ static int parse_sos_bin_descriptor(struct psp_context *psp, psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); psp->ras_drv.start_addr = ucode_start_addr; break; + case PSP_FW_TYPE_PSP_IPKEYMGR_DRV: + psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version); + psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version); + psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes); + psp->ipkeymgr_drv.start_addr = ucode_start_addr; + break; default: dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index ee16f134ae..3635303e65 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -73,8 +73,10 @@ enum psp_bootloader_cmd { PSP_BL__LOAD_KEY_DATABASE = 0x80000, PSP_BL__LOAD_SOCDRV = 0xB0000, PSP_BL__LOAD_DBGDRV = 0xC0000, + PSP_BL__LOAD_HADDRV = PSP_BL__LOAD_DBGDRV, PSP_BL__LOAD_INTFDRV = 0xD0000, - PSP_BL__LOAD_RASDRV = 0xE0000, + PSP_BL__LOAD_RASDRV = 0xE0000, + PSP_BL__LOAD_IPKEYMGRDRV = 0xF0000, PSP_BL__DRAM_LONG_TRAIN = 0x100000, PSP_BL__DRAM_SHORT_TRAIN = 0x200000, PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000, @@ -117,6 +119,7 @@ struct psp_funcs { int (*bootloader_load_intf_drv)(struct psp_context *psp); int (*bootloader_load_dbg_drv)(struct psp_context *psp); int (*bootloader_load_ras_drv)(struct psp_context *psp); + int (*bootloader_load_ipkeymgr_drv)(struct psp_context *psp); int (*bootloader_load_sos)(struct psp_context *psp); int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type); @@ -336,6 +339,7 @@ struct psp_context { struct psp_bin_desc intf_drv; struct psp_bin_desc dbg_drv; struct psp_bin_desc ras_drv; + struct psp_bin_desc ipkeymgr_drv; /* tmr buffer */ struct amdgpu_bo *tmr_bo; @@ -424,6 +428,9 @@ struct amdgpu_psp_funcs { #define psp_bootloader_load_ras_drv(psp) \ ((psp)->funcs->bootloader_load_ras_drv ? \ (psp)->funcs->bootloader_load_ras_drv((psp)) : 0) +#define psp_bootloader_load_ipkeymgr_drv(psp) \ + ((psp)->funcs->bootloader_load_ipkeymgr_drv ? \ + (psp)->funcs->bootloader_load_ipkeymgr_drv((psp)) : 0) #define psp_bootloader_load_sos(psp) \ ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0) #define psp_smu_reload_quirk(psp) \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 8ebab6f22e..1adc81a557 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -122,6 +122,8 @@ const char *get_ras_block_str(struct ras_common_if *ras_block) #define MAX_UMC_POISON_POLLING_TIME_ASYNC 100 //ms +#define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms + enum amdgpu_ras_retire_page_reservation { AMDGPU_RAS_RETIRE_PAGE_RESERVED, AMDGPU_RAS_RETIRE_PAGE_PENDING, @@ -1045,6 +1047,7 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, struct ras_manager *ras_mgr, struct ras_err_data *err_data, + struct ras_query_context *qctx, const char *blk_name, bool is_ue, bool is_de) @@ -1052,27 +1055,28 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, struct amdgpu_smuio_mcm_config_info *mcm_info; struct ras_err_node *err_node; struct ras_err_info *err_info; + u64 event_id = qctx->event_id; if (is_ue) { for_each_ras_error(err_node, err_data) { err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; if (err_info->ue_count) { - dev_info(adev->dev, "socket: %d, die: %d, " - "%lld new uncorrectable hardware errors detected in %s block\n", - mcm_info->socket_id, - mcm_info->die_id, - err_info->ue_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " + "%lld new uncorrectable hardware errors detected in %s block\n", + mcm_info->socket_id, + mcm_info->die_id, + err_info->ue_count, + blk_name); } } for_each_ras_error(err_node, &ras_mgr->err_data) { err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; - dev_info(adev->dev, "socket: %d, die: %d, " - "%lld uncorrectable hardware errors detected in total in %s block\n", - mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " + "%lld uncorrectable hardware errors detected in total in %s block\n", + mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name); } } else { @@ -1081,44 +1085,44 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; if (err_info->de_count) { - dev_info(adev->dev, "socket: %d, die: %d, " - "%lld new deferred hardware errors detected in %s block\n", - mcm_info->socket_id, - mcm_info->die_id, - err_info->de_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " + "%lld new deferred hardware errors detected in %s block\n", + mcm_info->socket_id, + mcm_info->die_id, + err_info->de_count, + blk_name); } } for_each_ras_error(err_node, &ras_mgr->err_data) { err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; - dev_info(adev->dev, "socket: %d, die: %d, " - "%lld deferred hardware errors detected in total in %s block\n", - mcm_info->socket_id, mcm_info->die_id, - err_info->de_count, blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " + "%lld deferred hardware errors detected in total in %s block\n", + mcm_info->socket_id, mcm_info->die_id, + err_info->de_count, blk_name); } } else { for_each_ras_error(err_node, err_data) { err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; if (err_info->ce_count) { - dev_info(adev->dev, "socket: %d, die: %d, " - "%lld new correctable hardware errors detected in %s block\n", - mcm_info->socket_id, - mcm_info->die_id, - err_info->ce_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " + "%lld new correctable hardware errors detected in %s block\n", + mcm_info->socket_id, + mcm_info->die_id, + err_info->ce_count, + blk_name); } } for_each_ras_error(err_node, &ras_mgr->err_data) { err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; - dev_info(adev->dev, "socket: %d, die: %d, " - "%lld correctable hardware errors detected in total in %s block\n", - mcm_info->socket_id, mcm_info->die_id, - err_info->ce_count, blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " + "%lld correctable hardware errors detected in total in %s block\n", + mcm_info->socket_id, mcm_info->die_id, + err_info->ce_count, blk_name); } } } @@ -1131,77 +1135,79 @@ static inline bool err_data_has_source_info(struct ras_err_data *data) static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, struct ras_query_if *query_if, - struct ras_err_data *err_data) + struct ras_err_data *err_data, + struct ras_query_context *qctx) { struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head); const char *blk_name = get_ras_block_str(&query_if->head); + u64 event_id = qctx->event_id; if (err_data->ce_count) { if (err_data_has_source_info(err_data)) { - amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, + amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx, blk_name, false, false); } else if (!adev->aid_mask && adev->smuio.funcs && adev->smuio.funcs->get_socket_id && adev->smuio.funcs->get_die_id) { - dev_info(adev->dev, "socket: %d, die: %d " - "%ld correctable hardware errors " - "detected in %s block\n", - adev->smuio.funcs->get_socket_id(adev), - adev->smuio.funcs->get_die_id(adev), - ras_mgr->err_data.ce_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d " + "%ld correctable hardware errors " + "detected in %s block\n", + adev->smuio.funcs->get_socket_id(adev), + adev->smuio.funcs->get_die_id(adev), + ras_mgr->err_data.ce_count, + blk_name); } else { - dev_info(adev->dev, "%ld correctable hardware errors " - "detected in %s block\n", - ras_mgr->err_data.ce_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors " + "detected in %s block\n", + ras_mgr->err_data.ce_count, + blk_name); } } if (err_data->ue_count) { if (err_data_has_source_info(err_data)) { - amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, + amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx, blk_name, true, false); } else if (!adev->aid_mask && adev->smuio.funcs && adev->smuio.funcs->get_socket_id && adev->smuio.funcs->get_die_id) { - dev_info(adev->dev, "socket: %d, die: %d " - "%ld uncorrectable hardware errors " - "detected in %s block\n", - adev->smuio.funcs->get_socket_id(adev), - adev->smuio.funcs->get_die_id(adev), - ras_mgr->err_data.ue_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d " + "%ld uncorrectable hardware errors " + "detected in %s block\n", + adev->smuio.funcs->get_socket_id(adev), + adev->smuio.funcs->get_die_id(adev), + ras_mgr->err_data.ue_count, + blk_name); } else { - dev_info(adev->dev, "%ld uncorrectable hardware errors " - "detected in %s block\n", - ras_mgr->err_data.ue_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors " + "detected in %s block\n", + ras_mgr->err_data.ue_count, + blk_name); } } if (err_data->de_count) { if (err_data_has_source_info(err_data)) { - amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, + amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx, blk_name, false, true); } else if (!adev->aid_mask && adev->smuio.funcs && adev->smuio.funcs->get_socket_id && adev->smuio.funcs->get_die_id) { - dev_info(adev->dev, "socket: %d, die: %d " - "%ld deferred hardware errors " - "detected in %s block\n", - adev->smuio.funcs->get_socket_id(adev), - adev->smuio.funcs->get_die_id(adev), - ras_mgr->err_data.de_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d " + "%ld deferred hardware errors " + "detected in %s block\n", + adev->smuio.funcs->get_socket_id(adev), + adev->smuio.funcs->get_die_id(adev), + ras_mgr->err_data.de_count, + blk_name); } else { - dev_info(adev->dev, "%ld deferred hardware errors " - "detected in %s block\n", - ras_mgr->err_data.de_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors " + "detected in %s block\n", + ras_mgr->err_data.de_count, + blk_name); } } } @@ -1244,6 +1250,10 @@ int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk, { struct ras_manager *obj; + /* in resume phase, no need to create aca fs node */ + if (adev->in_suspend || amdgpu_in_reset(adev)) + return 0; + obj = get_ras_manager(adev, blk); if (!obj) return -EINVAL; @@ -1265,7 +1275,8 @@ int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk) } static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk, - enum aca_error_type type, struct ras_err_data *err_data) + enum aca_error_type type, struct ras_err_data *err_data, + struct ras_query_context *qctx) { struct ras_manager *obj; @@ -1273,7 +1284,7 @@ static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu if (!obj) return -EINVAL; - return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data); + return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx); } ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr, @@ -1287,13 +1298,14 @@ ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *a if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; - return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, - "ce", info.ce_count); + return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count, + "ce", info.ce_count, "de", info.ue_count); } static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, struct ras_query_if *info, struct ras_err_data *err_data, + struct ras_query_context *qctx, unsigned int error_query_mode) { enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT; @@ -1329,17 +1341,21 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, } } else { if (amdgpu_aca_is_enabled(adev)) { - ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data); + ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx); + if (ret) + return ret; + + ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx); if (ret) return ret; - ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data); + ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx); if (ret) return ret; } else { /* FIXME: add code to check return value later */ - amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data); - amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data); + amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx); + amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx); } } @@ -1351,6 +1367,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i { struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); struct ras_err_data err_data; + struct ras_query_context qctx; unsigned int error_query_mode; int ret; @@ -1364,8 +1381,12 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) return -EINVAL; + memset(&qctx, 0, sizeof(qctx)); + qctx.event_id = amdgpu_ras_acquire_event_id(adev, amdgpu_ras_intr_triggered() ? + RAS_EVENT_TYPE_ISR : RAS_EVENT_TYPE_INVALID); ret = amdgpu_ras_query_error_status_helper(adev, info, &err_data, + &qctx, error_query_mode); if (ret) goto out_fini_err_data; @@ -1376,7 +1397,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i info->ce_count = obj->err_data.ce_count; info->de_count = obj->err_data.de_count; - amdgpu_ras_error_generate_report(adev, info, &err_data); + amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx); out_fini_err_data: amdgpu_ras_error_data_fini(&err_data); @@ -2041,7 +2062,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * } } - amdgpu_umc_poison_handler(adev, obj->head.block, false); + amdgpu_umc_poison_handler(adev, obj->head.block, 0); if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption) poison_stat = block_obj->hw_ops->handle_poison_consumption(adev); @@ -2061,6 +2082,17 @@ static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj { dev_info(obj->adev->dev, "Poison is created\n"); + + if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) { + struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev); + + amdgpu_ras_put_poison_req(obj->adev, + AMDGPU_RAS_BLOCK__UMC, 0, NULL, NULL, false); + + atomic_inc(&con->page_retirement_req_cnt); + + wake_up(&con->page_retirement_wq); + } } static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj, @@ -2371,7 +2403,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, }; status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, - data->bps[i].retired_page); + data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT); if (status == -EBUSY) (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; else if (status == -ENOENT) @@ -2384,6 +2416,19 @@ out: return ret; } +static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev, + struct amdgpu_hive_info *hive, bool status) +{ + struct amdgpu_device *tmp_adev; + + if (hive) { + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) + amdgpu_ras_set_fed(tmp_adev, status); + } else { + amdgpu_ras_set_fed(adev, status); + } +} + static void amdgpu_ras_do_recovery(struct work_struct *work) { struct amdgpu_ras *ras = @@ -2393,8 +2438,21 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) struct list_head device_list, *device_list_handle = NULL; struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); - if (hive) + if (hive) { atomic_set(&hive->ras_recovery, 1); + + /* If any device which is part of the hive received RAS fatal + * error interrupt, set fatal error status on all. This + * condition will need a recovery, and flag will be cleared + * as part of recovery. + */ + list_for_each_entry(remote_adev, &hive->device_list, + gmc.xgmi.head) + if (amdgpu_ras_get_fed_status(remote_adev)) { + amdgpu_ras_set_fed_all(adev, hive, true); + break; + } + } if (!ras->disable_ras_err_cnt_harvest) { /* Build list of devices to query RAS related errors */ @@ -2439,18 +2497,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET; set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - /* For any RAS error that needs a full reset to - * recover, set the fatal error status - */ - if (hive) { - list_for_each_entry(remote_adev, - &hive->device_list, - gmc.xgmi.head) - amdgpu_ras_set_fed(remote_adev, - true); - } else { - amdgpu_ras_set_fed(adev, true); - } psp_fatal_error_recovery_quirk(&adev->psp); } } @@ -2516,9 +2562,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, goto out; } - amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr, - bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT, - AMDGPU_GPU_PAGE_SIZE); + amdgpu_ras_reserve_page(adev, bps[i].retired_page); memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps)); data->count++; @@ -2674,10 +2718,167 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev, } } +int amdgpu_ras_put_poison_req(struct amdgpu_device *adev, + enum amdgpu_ras_block block, uint16_t pasid, + pasid_notify pasid_fn, void *data, uint32_t reset) +{ + int ret = 0; + struct ras_poison_msg poison_msg; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + memset(&poison_msg, 0, sizeof(poison_msg)); + poison_msg.block = block; + poison_msg.pasid = pasid; + poison_msg.reset = reset; + poison_msg.pasid_fn = pasid_fn; + poison_msg.data = data; + + ret = kfifo_put(&con->poison_fifo, poison_msg); + if (!ret) { + dev_err(adev->dev, "Poison message fifo is full!\n"); + return -ENOSPC; + } + + return 0; +} + +static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev, + struct ras_poison_msg *poison_msg) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + return kfifo_get(&con->poison_fifo, poison_msg); +} + +static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log) +{ + mutex_init(&ecc_log->lock); + + /* Set any value as siphash key */ + memset(&ecc_log->ecc_key, 0xad, sizeof(ecc_log->ecc_key)); + + INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL); + ecc_log->de_updated = false; +} + +static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log) +{ + struct radix_tree_iter iter; + void __rcu **slot; + struct ras_ecc_err *ecc_err; + + mutex_lock(&ecc_log->lock); + radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) { + ecc_err = radix_tree_deref_slot(slot); + kfree(ecc_err->err_pages.pfn); + kfree(ecc_err); + radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot); + } + mutex_unlock(&ecc_log->lock); + + mutex_destroy(&ecc_log->lock); + ecc_log->de_updated = false; +} + +static void amdgpu_ras_do_page_retirement(struct work_struct *work) +{ + struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, + page_retirement_dwork.work); + struct amdgpu_device *adev = con->adev; + struct ras_err_data err_data; + + if (amdgpu_in_reset(adev) || atomic_read(&con->in_recovery)) + return; + + amdgpu_ras_error_data_init(&err_data); + + amdgpu_umc_handle_bad_pages(adev, &err_data); + + amdgpu_ras_error_data_fini(&err_data); + + mutex_lock(&con->umc_ecc_log.lock); + if (radix_tree_tagged(&con->umc_ecc_log.de_page_tree, + UMC_ECC_NEW_DETECTED_TAG)) + schedule_delayed_work(&con->page_retirement_dwork, + msecs_to_jiffies(AMDGPU_RAS_RETIRE_PAGE_INTERVAL)); + mutex_unlock(&con->umc_ecc_log.lock); +} + +static int amdgpu_ras_query_ecc_status(struct amdgpu_device *adev, + enum amdgpu_ras_block ras_block, uint32_t timeout_ms) +{ + int ret = 0; + struct ras_ecc_log_info *ecc_log; + struct ras_query_if info; + uint32_t timeout = timeout_ms; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + memset(&info, 0, sizeof(info)); + info.head.block = ras_block; + + ecc_log = &ras->umc_ecc_log; + ecc_log->de_updated = false; + do { + ret = amdgpu_ras_query_error_status(adev, &info); + if (ret) { + dev_err(adev->dev, "Failed to query ras error! ret:%d\n", ret); + return ret; + } + + if (timeout && !ecc_log->de_updated) { + msleep(1); + timeout--; + } + } while (timeout && !ecc_log->de_updated); + + if (timeout_ms && !timeout) { + dev_warn(adev->dev, "Can't find deferred error\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev, + uint32_t timeout) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int ret; + + ret = amdgpu_ras_query_ecc_status(adev, AMDGPU_RAS_BLOCK__UMC, timeout); + if (!ret) + schedule_delayed_work(&con->page_retirement_dwork, 0); +} + +static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev, + struct ras_poison_msg *poison_msg) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + uint32_t reset = poison_msg->reset; + uint16_t pasid = poison_msg->pasid; + + kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + + if (poison_msg->pasid_fn) + poison_msg->pasid_fn(adev, pasid, poison_msg->data); + + if (reset) { + flush_delayed_work(&con->page_retirement_dwork); + + con->gpu_reset_flags |= reset; + amdgpu_ras_reset_gpu(adev); + } + + return 0; +} + static int amdgpu_ras_page_retirement_thread(void *param) { struct amdgpu_device *adev = (struct amdgpu_device *)param; struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_poison_msg poison_msg; + enum amdgpu_ras_block ras_block; + bool poison_creation_is_handled = false; while (!kthread_should_stop()) { @@ -2688,13 +2889,34 @@ static int amdgpu_ras_page_retirement_thread(void *param) if (kthread_should_stop()) break; - dev_info(adev->dev, "Start processing page retirement. request:%d\n", - atomic_read(&con->page_retirement_req_cnt)); - atomic_dec(&con->page_retirement_req_cnt); - amdgpu_umc_bad_page_polling_timeout(adev, - false, MAX_UMC_POISON_POLLING_TIME_ASYNC); + if (!amdgpu_ras_get_poison_req(adev, &poison_msg)) + continue; + + ras_block = poison_msg.block; + + dev_info(adev->dev, "Start processing ras block %s(%d)\n", + ras_block_str(ras_block), ras_block); + + if (ras_block == AMDGPU_RAS_BLOCK__UMC) { + amdgpu_ras_poison_creation_handler(adev, + MAX_UMC_POISON_POLLING_TIME_ASYNC); + poison_creation_is_handled = true; + } else { + /* poison_creation_is_handled: + * false: no poison creation interrupt, but it has poison + * consumption interrupt. + * true: It has poison creation interrupt at the beginning, + * but it has no poison creation interrupt later. + */ + amdgpu_ras_poison_creation_handler(adev, + poison_creation_is_handled ? + 0 : MAX_UMC_POISON_POLLING_TIME_ASYNC); + + amdgpu_ras_poison_consumption_handler(adev, &poison_msg); + poison_creation_is_handled = false; + } } return 0; @@ -2763,6 +2985,8 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) } } + mutex_init(&con->page_rsv_lock); + INIT_KFIFO(con->poison_fifo); mutex_init(&con->page_retirement_lock); init_waitqueue_head(&con->page_retirement_wq); atomic_set(&con->page_retirement_req_cnt, 0); @@ -2773,6 +2997,8 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n"); } + INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement); + amdgpu_ras_ecc_log_init(&con->umc_ecc_log); #ifdef CONFIG_X86_MCE_AMD if ((adev->asic_type == CHIP_ALDEBARAN) && (adev->gmc.xgmi.connected_to_cpu)) @@ -2813,8 +3039,14 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) atomic_set(&con->page_retirement_req_cnt, 0); + mutex_destroy(&con->page_rsv_lock); + cancel_work_sync(&con->recovery_work); + cancel_delayed_work_sync(&con->page_retirement_dwork); + + amdgpu_ras_ecc_log_fini(&con->umc_ecc_log); + mutex_lock(&con->recovery_lock); con->eh_data = NULL; kfree(data->bps); @@ -3036,6 +3268,35 @@ static int amdgpu_get_ras_schema(struct amdgpu_device *adev) AMDGPU_RAS_ERROR__PARITY; } +static void ras_event_mgr_init(struct ras_event_manager *mgr) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mgr->seqnos); i++) + atomic64_set(&mgr->seqnos[i], 0); +} + +static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev) +{ + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + struct amdgpu_hive_info *hive; + + if (!ras) + return; + + hive = amdgpu_get_xgmi_hive(adev); + ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr; + + /* init event manager with node 0 on xgmi system */ + if (!amdgpu_in_reset(adev)) { + if (!hive || adev->gmc.xgmi.node_id == 0) + ras_event_mgr_init(ras->event_mgr); + } + + if (hive) + amdgpu_put_xgmi_hive(hive); +} + int amdgpu_ras_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -3356,6 +3617,8 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return 0; + amdgpu_ras_event_mgr_init(adev); + if (amdgpu_aca_is_enabled(adev)) { if (amdgpu_in_reset(adev)) r = amdgpu_aca_reset(adev); @@ -3472,14 +3735,39 @@ void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status) atomic_set(&ras->fed, !!status); } +bool amdgpu_ras_event_id_is_valid(struct amdgpu_device *adev, u64 id) +{ + return !(id & BIT_ULL(63)); +} + +u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type) +{ + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + u64 id; + + switch (type) { + case RAS_EVENT_TYPE_ISR: + id = (u64)atomic64_read(&ras->event_mgr->seqnos[type]); + break; + case RAS_EVENT_TYPE_INVALID: + default: + id = BIT_ULL(63) | 0ULL; + break; + } + + return id; +} + void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) { if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + u64 event_id = (u64)atomic64_inc_return(&ras->event_mgr->seqnos[RAS_EVENT_TYPE_ISR]); - dev_info(adev->dev, "uncorrectable hardware error" - "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); + RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error" + "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); + amdgpu_ras_set_fed(adev, true); ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; amdgpu_ras_reset_gpu(adev); } @@ -3998,6 +4286,8 @@ void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_a { struct ras_err_addr *mca_err_addr; + /* This function will be retired. */ + return; mca_err_addr = kzalloc(sizeof(*mca_err_addr), GFP_KERNEL); if (!mca_err_addr) return; @@ -4195,3 +4485,19 @@ void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances) amdgpu_ras_boot_time_error_reporting(adev, i, boot_error); } } + +int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; + uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT; + int ret = 0; + + mutex_lock(&con->page_rsv_lock); + ret = amdgpu_vram_mgr_query_page_status(mgr, start); + if (ret == -ENOENT) + ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE); + mutex_unlock(&con->page_rsv_lock); + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index db9cb2b4e9..7021c4a66f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -26,6 +26,9 @@ #include #include +#include +#include +#include #include "ta_ras_if.h" #include "amdgpu_ras_eeprom.h" #include "amdgpu_smuio.h" @@ -64,6 +67,14 @@ struct amdgpu_iv_entry; /* The high three bits indicates socketid */ #define AMDGPU_RAS_GET_FEATURES(val) ((val) & ~AMDGPU_RAS_FEATURES_SOCKETID_MASK) +#define RAS_EVENT_LOG(_adev, _id, _fmt, ...) \ +do { \ + if (amdgpu_ras_event_id_is_valid((_adev), (_id))) \ + dev_info((_adev)->dev, "{%llu}" _fmt, (_id), ##__VA_ARGS__); \ + else \ + dev_info((_adev)->dev, _fmt, ##__VA_ARGS__); \ +} while (0) + enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__UMC = 0, AMDGPU_RAS_BLOCK__SDMA, @@ -419,6 +430,52 @@ struct umc_ecc_info { int record_ce_addr_supported; }; +enum ras_event_type { + RAS_EVENT_TYPE_INVALID = -1, + RAS_EVENT_TYPE_ISR = 0, + RAS_EVENT_TYPE_COUNT, +}; + +struct ras_event_manager { + atomic64_t seqnos[RAS_EVENT_TYPE_COUNT]; +}; + +struct ras_query_context { + enum ras_event_type type; + u64 event_id; +}; + +typedef int (*pasid_notify)(struct amdgpu_device *adev, + uint16_t pasid, void *data); + +struct ras_poison_msg { + enum amdgpu_ras_block block; + uint16_t pasid; + uint32_t reset; + pasid_notify pasid_fn; + void *data; +}; + +struct ras_err_pages { + uint32_t count; + uint64_t *pfn; +}; + +struct ras_ecc_err { + u64 hash_index; + uint64_t status; + uint64_t ipid; + uint64_t addr; + struct ras_err_pages err_pages; +}; + +struct ras_ecc_log_info { + struct mutex lock; + siphash_key_t ecc_key; + struct radix_tree_root de_page_tree; + bool de_updated; +}; + struct amdgpu_ras { /* ras infrastructure */ /* for ras itself. */ @@ -477,8 +534,18 @@ struct amdgpu_ras { wait_queue_head_t page_retirement_wq; struct mutex page_retirement_lock; atomic_t page_retirement_req_cnt; + struct mutex page_rsv_lock; + DECLARE_KFIFO(poison_fifo, struct ras_poison_msg, 128); + struct ras_ecc_log_info umc_ecc_log; + struct delayed_work page_retirement_dwork; + /* Fatal error detected flag */ atomic_t fed; + + /* RAS event manager */ + struct ras_event_manager __event_mgr; + struct ras_event_manager *event_mgr; + }; struct ras_fs_data { @@ -512,6 +579,7 @@ struct ras_err_data { unsigned long de_count; unsigned long err_addr_cnt; struct eeprom_table_record *err_addr; + unsigned long err_addr_len; u32 err_list_count; struct list_head err_node_list; }; @@ -879,4 +947,13 @@ void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status); bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev); +bool amdgpu_ras_event_id_is_valid(struct amdgpu_device *adev, u64 id); +u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type); + +int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn); + +int amdgpu_ras_put_poison_req(struct amdgpu_device *adev, + enum amdgpu_ras_block block, uint16_t pasid, + pasid_notify pasid_fn, void *data, uint32_t reset); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index b12808c0c3..06a62a8a99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -404,6 +404,22 @@ static int amdgpu_ras_eeprom_correct_header_tag( return res; } +static void amdgpu_ras_set_eeprom_table_version(struct amdgpu_ras_eeprom_control *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; + + switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { + case IP_VERSION(8, 10, 0): + case IP_VERSION(12, 0, 0): + hdr->version = RAS_TABLE_VER_V2_1; + return; + default: + hdr->version = RAS_TABLE_VER_V1; + return; + } +} + /** * amdgpu_ras_eeprom_reset_table -- Reset the RAS EEPROM table * @control: pointer to control structure @@ -423,11 +439,7 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) mutex_lock(&control->ras_tbl_mutex); hdr->header = RAS_TABLE_HDR_VAL; - if (adev->umc.ras && - adev->umc.ras->set_eeprom_table_version) - adev->umc.ras->set_eeprom_table_version(hdr); - else - hdr->version = RAS_TABLE_VER_V1; + amdgpu_ras_set_eeprom_table_version(control); if (hdr->version == RAS_TABLE_VER_V2_1) { hdr->first_rec_offset = RAS_RECORD_START_V2_1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h index 381101d2bf..50fcd86e10 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -164,4 +164,29 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) } } +/** + * amdgpu_res_cleared - check if blocks are cleared + * + * @cur: the cursor to extract the block + * + * Check if the @cur block is cleared + */ +static inline bool amdgpu_res_cleared(struct amdgpu_res_cursor *cur) +{ + struct drm_buddy_block *block; + + switch (cur->mem_type) { + case TTM_PL_VRAM: + block = cur->node; + + if (!amdgpu_vram_mgr_is_cleared(block)) + return false; + break; + default: + return false; + } + + return true; +} + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index 147100c27c..ea4873f6cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -21,9 +21,6 @@ * */ -#include -#include - #include "amdgpu_reset.h" #include "aldebaran.h" #include "sienna_cichlid.h" @@ -161,105 +158,3 @@ void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain) atomic_set(&reset_domain->in_gpu_reset, 0); up_write(&reset_domain->sem); } - -#ifndef CONFIG_DEV_COREDUMP -void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, - struct amdgpu_reset_context *reset_context) -{ -} -#else -static ssize_t -amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, - void *data, size_t datalen) -{ - struct drm_printer p; - struct amdgpu_coredump_info *coredump = data; - struct drm_print_iterator iter; - int i; - - iter.data = buffer; - iter.offset = 0; - iter.start = offset; - iter.remain = count; - - p = drm_coredump_printer(&iter); - - drm_printf(&p, "**** AMDGPU Device Coredump ****\n"); - drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n"); - drm_printf(&p, "kernel: " UTS_RELEASE "\n"); - drm_printf(&p, "module: " KBUILD_MODNAME "\n"); - drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec, - coredump->reset_time.tv_nsec); - - if (coredump->reset_task_info.pid) - drm_printf(&p, "process_name: %s PID: %d\n", - coredump->reset_task_info.process_name, - coredump->reset_task_info.pid); - - if (coredump->ring) { - drm_printf(&p, "\nRing timed out details\n"); - drm_printf(&p, "IP Type: %d Ring Name: %s\n", - coredump->ring->funcs->type, - coredump->ring->name); - } - - if (coredump->reset_vram_lost) - drm_printf(&p, "VRAM is lost due to GPU reset!\n"); - if (coredump->adev->reset_info.num_regs) { - drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n"); - - for (i = 0; i < coredump->adev->reset_info.num_regs; i++) - drm_printf(&p, "0x%08x: 0x%08x\n", - coredump->adev->reset_info.reset_dump_reg_list[i], - coredump->adev->reset_info.reset_dump_reg_value[i]); - } - - return count - iter.remain; -} - -static void amdgpu_devcoredump_free(void *data) -{ - kfree(data); -} - -void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, - struct amdgpu_reset_context *reset_context) -{ - struct amdgpu_coredump_info *coredump; - struct drm_device *dev = adev_to_drm(adev); - struct amdgpu_job *job = reset_context->job; - struct drm_sched_job *s_job; - - coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT); - - if (!coredump) { - DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__); - return; - } - - coredump->reset_vram_lost = vram_lost; - - if (reset_context->job && reset_context->job->vm) { - struct amdgpu_task_info *ti; - struct amdgpu_vm *vm = reset_context->job->vm; - - ti = amdgpu_vm_get_task_info_vm(vm); - if (ti) { - coredump->reset_task_info = *ti; - amdgpu_vm_put_task_info(ti); - } - } - - if (job) { - s_job = &job->base; - coredump->ring = to_amdgpu_ring(s_job->sched); - } - - coredump->adev = adev; - - ktime_get_ts64(&coredump->reset_time); - - dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT, - amdgpu_devcoredump_read, amdgpu_devcoredump_free); -} -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h index 60522963aa..b11d190ece 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -32,6 +32,7 @@ enum AMDGPU_RESET_FLAGS { AMDGPU_NEED_FULL_RESET = 0, AMDGPU_SKIP_HW_RESET = 1, + AMDGPU_SKIP_COREDUMP = 2, }; struct amdgpu_reset_context { @@ -88,19 +89,6 @@ struct amdgpu_reset_domain { atomic_t reset_res; }; -#ifdef CONFIG_DEV_COREDUMP - -#define AMDGPU_COREDUMP_VERSION "1" - -struct amdgpu_coredump_info { - struct amdgpu_device *adev; - struct amdgpu_task_info reset_task_info; - struct timespec64 reset_time; - bool reset_vram_lost; - struct amdgpu_ring *ring; -}; -#endif - int amdgpu_reset_init(struct amdgpu_device *adev); int amdgpu_reset_fini(struct amdgpu_device *adev); @@ -141,9 +129,6 @@ void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain); void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain); -void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, - struct amdgpu_reset_context *reset_context); - #define for_each_handler(i, handler, reset_ctl) \ for (i = 0; (i < AMDGPU_RESET_MAX_HANDLERS) && \ (handler = (*reset_ctl->reset_handlers)[i]); \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 173a2a3080..b51a82e711 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -132,7 +132,7 @@ struct amdgpu_buffer_funcs { uint64_t dst_offset, /* number of byte to transfer */ uint32_t byte_count, - bool tmz); + uint32_t copy_flags); /* maximum bytes in a single operation */ uint32_t fill_max_bytes; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h index ff44351810..ec9d12f85f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h @@ -44,6 +44,7 @@ struct amdgpu_smuio_funcs { u32 (*get_socket_id)(struct amdgpu_device *adev); enum amdgpu_pkg_type (*get_pkg_type)(struct amdgpu_device *adev); bool (*is_host_gpu_xgmi_supported)(struct amdgpu_device *adev); + u64 (*get_gpu_clock_counter)(struct amdgpu_device *adev); }; struct amdgpu_smuio { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index f539b1d002..383fce40d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -178,10 +178,10 @@ TRACE_EVENT(amdgpu_cs_ioctl, TP_fast_assign( __entry->sched_job_id = job->base.id; - __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)); + __assign_str(timeline); __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; - __assign_str(ring, to_amdgpu_ring(job->base.sched)->name); + __assign_str(ring); __entry->num_ibs = job->num_ibs; ), TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", @@ -203,10 +203,10 @@ TRACE_EVENT(amdgpu_sched_run_job, TP_fast_assign( __entry->sched_job_id = job->base.id; - __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)); + __assign_str(timeline); __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; - __assign_str(ring, to_amdgpu_ring(job->base.sched)->name); + __assign_str(ring); __entry->num_ibs = job->num_ibs; ), TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", @@ -231,7 +231,7 @@ TRACE_EVENT(amdgpu_vm_grab_id, TP_fast_assign( __entry->pasid = vm->pasid; - __assign_str(ring, ring->name); + __assign_str(ring); __entry->vmid = job->vmid; __entry->vm_hub = ring->vm_hub, __entry->pd_addr = job->vm_pd_addr; @@ -425,7 +425,7 @@ TRACE_EVENT(amdgpu_vm_flush, ), TP_fast_assign( - __assign_str(ring, ring->name); + __assign_str(ring); __entry->vmid = vmid; __entry->vm_hub = ring->vm_hub; __entry->pd_addr = pd_addr; @@ -526,7 +526,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync, ), TP_fast_assign( - __assign_str(ring, sched_job->base.sched->name); + __assign_str(ring); __entry->id = sched_job->base.id; __entry->fence = fence; __entry->ctx = fence->context; @@ -554,21 +554,6 @@ TRACE_EVENT(amdgpu_reset_reg_dumps, __entry->value) ); -TRACE_EVENT(amdgpu_runpm_reference_dumps, - TP_PROTO(uint32_t index, const char *func), - TP_ARGS(index, func), - TP_STRUCT__entry( - __field(uint32_t, index) - __string(func, func) - ), - TP_fast_assign( - __entry->index = index; - __assign_str(func, func); - ), - TP_printk("amdgpu runpm reference dump 0x%x: 0x%s\n", - __entry->index, - __get_str(func)) -); #undef AMDGPU_JOB_GET_TIMELINE_NAME #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 29c197c000..e785f12841 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -236,7 +236,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, - dst_addr, num_bytes, false); + dst_addr, num_bytes, 0); amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); @@ -296,6 +296,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, struct dma_fence *fence = NULL; int r = 0; + uint32_t copy_flags = 0; + if (!adev->mman.buffer_funcs_enabled) { DRM_ERROR("Trying to move memory with ring turned off.\n"); return -EINVAL; @@ -323,8 +325,11 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, if (r) goto error; - r = amdgpu_copy_buffer(ring, from, to, cur_size, - resv, &next, false, true, tmz); + if (tmz) + copy_flags |= AMDGPU_COPY_FLAGS_TMZ; + + r = amdgpu_copy_buffer(ring, from, to, cur_size, resv, + &next, false, true, copy_flags); if (r) goto error; @@ -378,11 +383,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) { struct dma_fence *wipe_fence = NULL; - r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence, - false); + r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence, + false); if (r) { goto error; } else if (wipe_fence) { + amdgpu_vram_mgr_set_cleared(bo->resource); dma_fence_put(fence); fence = wipe_fence; } @@ -1492,7 +1498,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, swap(src_addr, dst_addr); amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, - PAGE_SIZE, false); + PAGE_SIZE, 0); amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); @@ -2143,7 +2149,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, struct dma_resv *resv, struct dma_fence **fence, bool direct_submit, - bool vm_needs_flush, bool tmz) + bool vm_needs_flush, uint32_t copy_flags) { struct amdgpu_device *adev = ring->adev; unsigned int num_loops, num_dw; @@ -2169,8 +2175,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, uint32_t cur_size_in_bytes = min(byte_count, max_bytes); amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, - dst_offset, cur_size_in_bytes, tmz); - + dst_offset, cur_size_in_bytes, copy_flags); src_offset += cur_size_in_bytes; dst_offset += cur_size_in_bytes; byte_count -= cur_size_in_bytes; @@ -2230,6 +2235,71 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, return 0; } +/** + * amdgpu_ttm_clear_buffer - clear memory buffers + * @bo: amdgpu buffer object + * @resv: reservation object + * @fence: dma_fence associated with the operation + * + * Clear the memory buffer resource. + * + * Returns: + * 0 for success or a negative error code on failure. + */ +int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo, + struct dma_resv *resv, + struct dma_fence **fence) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; + struct amdgpu_res_cursor cursor; + u64 addr; + int r; + + if (!adev->mman.buffer_funcs_enabled) + return -EINVAL; + + if (!fence) + return -EINVAL; + + *fence = dma_fence_get_stub(); + + amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); + + mutex_lock(&adev->mman.gtt_window_lock); + while (cursor.remaining) { + struct dma_fence *next = NULL; + u64 size; + + if (amdgpu_res_cleared(&cursor)) { + amdgpu_res_next(&cursor, cursor.size); + continue; + } + + /* Never clear more than 256MiB at once to avoid timeouts */ + size = min(cursor.size, 256ULL << 20); + + r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &cursor, + 1, ring, false, &size, &addr); + if (r) + goto err; + + r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv, + &next, true, true); + if (r) + goto err; + + dma_fence_put(*fence); + *fence = next; + + amdgpu_res_next(&cursor, size); + } +err: + mutex_unlock(&adev->mman.gtt_window_lock); + + return r; +} + int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct dma_resv *resv, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 32cf6b6f6e..b6f53129de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -38,8 +38,6 @@ #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 -#define AMDGPU_POISON 0xd0bed0be - extern const struct attribute_group amdgpu_vram_mgr_attr_group; extern const struct attribute_group amdgpu_gtt_mgr_attr_group; @@ -111,6 +109,8 @@ struct amdgpu_copy_mem { unsigned long offset; }; +#define AMDGPU_COPY_FLAGS_TMZ (1 << 0) + int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size); void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev); int amdgpu_preempt_mgr_init(struct amdgpu_device *adev); @@ -151,13 +151,16 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, struct dma_resv *resv, struct dma_fence **fence, bool direct_submit, - bool vm_needs_flush, bool tmz); + bool vm_needs_flush, uint32_t copy_flags); int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, const struct amdgpu_copy_mem *src, const struct amdgpu_copy_mem *dst, uint64_t size, bool tmz, struct dma_resv *resv, struct dma_fence **f); +int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo, + struct dma_resv *resv, + struct dma_fence **fence); int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct dma_resv *resv, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 6194457600..105d4de061 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -125,6 +125,7 @@ enum psp_fw_type { PSP_FW_TYPE_PSP_INTF_DRV, PSP_FW_TYPE_PSP_DBG_DRV, PSP_FW_TYPE_PSP_RAS_DRV, + PSP_FW_TYPE_PSP_IPKEYMGR_DRV, PSP_FW_TYPE_MAX_INDEX, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 6f7451e3ee..540e0f066b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -21,10 +21,13 @@ * */ +#include #include "amdgpu.h" #include "umc_v6_7.h" #define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms +#define MAX_UMC_HASH_STRING_SIZE 256 + static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev, struct ras_err_data *err_data, uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst) @@ -63,6 +66,8 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev, goto out_fini_err_data; } + err_data.err_addr_len = adev->umc.max_ras_err_cnt_per_query; + /* * Translate UMC channel address to Physical address */ @@ -86,7 +91,7 @@ out_fini_err_data: return ret; } -static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev, +void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; @@ -118,6 +123,8 @@ static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev, if(!err_data->err_addr) dev_warn(adev->dev, "Failed to alloc memory for " "umc error address record!\n"); + else + err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query; /* umc query_ras_error_address is also responsible for clearing * error status @@ -143,6 +150,8 @@ static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev, if(!err_data->err_addr) dev_warn(adev->dev, "Failed to alloc memory for " "umc error address record!\n"); + else + err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query; /* umc query_ras_error_address is also responsible for clearing * error status @@ -178,7 +187,7 @@ static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev, static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, void *ras_error_status, struct amdgpu_iv_entry *entry, - bool reset) + uint32_t reset) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -187,9 +196,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, amdgpu_umc_handle_bad_pages(adev, ras_error_status); if (err_data->ue_count && reset) { - /* use mode-2 reset for poison consumption */ - if (!entry) - con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET; + con->gpu_reset_flags |= reset; amdgpu_ras_reset_gpu(adev); } @@ -197,7 +204,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, } int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev, - bool reset, uint32_t timeout_ms) + uint32_t reset, uint32_t timeout_ms) { struct ras_err_data err_data; struct ras_common_if head = { @@ -239,16 +246,16 @@ int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev, if (reset) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - /* use mode-2 reset for poison consumption */ - con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET; + con->gpu_reset_flags |= reset; amdgpu_ras_reset_gpu(adev); } return 0; } -int amdgpu_umc_poison_handler(struct amdgpu_device *adev, - enum amdgpu_ras_block block, bool reset) +int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev, + enum amdgpu_ras_block block, uint16_t pasid, + pasid_notify pasid_fn, void *data, uint32_t reset) { int ret = AMDGPU_RAS_SUCCESS; @@ -286,16 +293,14 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev, amdgpu_ras_error_data_fini(&err_data); } else { - if (reset) { - amdgpu_umc_bad_page_polling_timeout(adev, - reset, MAX_UMC_POISON_POLLING_TIME_SYNC); - } else { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + amdgpu_ras_put_poison_req(adev, + block, pasid, pasid_fn, data, reset); + atomic_inc(&con->page_retirement_req_cnt); wake_up(&con->page_retirement_wq); - } } } else { if (adev->virt.ops && adev->virt.ops->ras_poison_handler) @@ -308,11 +313,19 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev, return ret; } +int amdgpu_umc_poison_handler(struct amdgpu_device *adev, + enum amdgpu_ras_block block, uint32_t reset) +{ + return amdgpu_umc_pasid_poison_handler(adev, + block, 0, NULL, NULL, reset); +} + int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, void *ras_error_status, struct amdgpu_iv_entry *entry) { - return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true); + return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, + AMDGPU_RAS_GPU_RESET_MODE1_RESET); } int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev) @@ -389,14 +402,20 @@ int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, return 0; } -void amdgpu_umc_fill_error_record(struct ras_err_data *err_data, +int amdgpu_umc_fill_error_record(struct ras_err_data *err_data, uint64_t err_addr, uint64_t retired_page, uint32_t channel_index, uint32_t umc_inst) { - struct eeprom_table_record *err_rec = - &err_data->err_addr[err_data->err_addr_cnt]; + struct eeprom_table_record *err_rec; + + if (!err_data || + !err_data->err_addr || + (err_data->err_addr_cnt >= err_data->err_addr_len)) + return -EINVAL; + + err_rec = &err_data->err_addr[err_data->err_addr_cnt]; err_rec->address = err_addr; /* page frame address is saved */ @@ -408,6 +427,8 @@ void amdgpu_umc_fill_error_record(struct ras_err_data *err_data, err_rec->mcumc_id = umc_inst; err_data->err_addr_cnt++; + + return 0; } int amdgpu_umc_loop_channels(struct amdgpu_device *adev, @@ -440,3 +461,76 @@ int amdgpu_umc_loop_channels(struct amdgpu_device *adev, return 0; } + +int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev, + uint64_t status, uint64_t ipid, uint64_t addr) +{ + if (adev->umc.ras->update_ecc_status) + return adev->umc.ras->update_ecc_status(adev, + status, ipid, addr); + return 0; +} + +static int amdgpu_umc_uint64_cmp(const void *a, const void *b) +{ + uint64_t *addr_a = (uint64_t *)a; + uint64_t *addr_b = (uint64_t *)b; + + if (*addr_a > *addr_b) + return 1; + else if (*addr_a < *addr_b) + return -1; + else + return 0; +} + +/* Use string hash to avoid logging the same bad pages repeatedly */ +int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev, + uint64_t *pfns, int len, uint64_t *val) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + char buf[MAX_UMC_HASH_STRING_SIZE] = {0}; + int offset = 0, i = 0; + uint64_t hash_val; + + if (!pfns || !len) + return -EINVAL; + + sort(pfns, len, sizeof(uint64_t), amdgpu_umc_uint64_cmp, NULL); + + for (i = 0; i < len; i++) + offset += snprintf(&buf[offset], sizeof(buf) - offset, "%llx", pfns[i]); + + hash_val = siphash(buf, offset, &con->umc_ecc_log.ecc_key); + + *val = hash_val; + + return 0; +} + +int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev, + struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_ecc_log_info *ecc_log; + int ret; + + ecc_log = &con->umc_ecc_log; + + mutex_lock(&ecc_log->lock); + ret = radix_tree_insert(ecc_tree, ecc_err->hash_index, ecc_err); + if (!ret) { + struct ras_err_pages *err_pages = &ecc_err->err_pages; + int i; + + /* Reserve memory */ + for (i = 0; i < err_pages->count; i++) + amdgpu_ras_reserve_page(adev, err_pages->pfn[i]); + + radix_tree_tag_set(ecc_tree, + ecc_err->hash_index, UMC_ECC_NEW_DETECTED_TAG); + } + mutex_unlock(&ecc_log->lock); + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 26d2ae498d..5f50c69c3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -52,6 +52,8 @@ #define LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) \ LOOP_UMC_NODE_INST((node_inst)) LOOP_UMC_INST_AND_CH((umc_inst), (ch_inst)) +/* Page retirement tag */ +#define UMC_ECC_NEW_DETECTED_TAG 0x1 typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst, void *data); @@ -66,8 +68,8 @@ struct amdgpu_umc_ras { void *ras_error_status); bool (*check_ecc_err_status)(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, void *ras_error_status); - /* support different eeprom table version for different asic */ - void (*set_eeprom_table_version)(struct amdgpu_ras_eeprom_table_header *hdr); + int (*update_ecc_status)(struct amdgpu_device *adev, + uint64_t status, uint64_t ipid, uint64_t addr); }; struct amdgpu_umc_funcs { @@ -103,11 +105,14 @@ struct amdgpu_umc { int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev); int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_umc_poison_handler(struct amdgpu_device *adev, - enum amdgpu_ras_block block, bool reset); + enum amdgpu_ras_block block, uint32_t reset); +int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev, + enum amdgpu_ras_block block, uint16_t pasid, + pasid_notify pasid_fn, void *data, uint32_t reset); int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); -void amdgpu_umc_fill_error_record(struct ras_err_data *err_data, +int amdgpu_umc_fill_error_record(struct ras_err_data *err_data, uint64_t err_addr, uint64_t retired_page, uint32_t channel_index, @@ -123,5 +128,15 @@ int amdgpu_umc_loop_channels(struct amdgpu_device *adev, umc_func func, void *data); int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev, - bool reset, uint32_t timeout_ms); + uint32_t reset, uint32_t timeout_ms); + +int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev, + uint64_t status, uint64_t ipid, uint64_t addr); +int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev, + uint64_t *pfns, int len, uint64_t *val); +int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev, + struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err); + +void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev, + void *ras_error_status); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c index f7c73533e3..e01c1c8e64 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c @@ -878,6 +878,8 @@ static const struct amd_ip_funcs umsch_mm_v4_0_ip_funcs = { .hw_fini = umsch_mm_hw_fini, .suspend = umsch_mm_suspend, .resume = umsch_mm_resume, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 9c514a606a..677eb14155 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -93,7 +93,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work); int amdgpu_vcn_early_init(struct amdgpu_device *adev) { - char ucode_prefix[30]; + char ucode_prefix[25]; char fw_name[40]; int r, i; @@ -185,7 +185,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); - if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) { + if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(5, 0, 0)) { + fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)); + log_offset = offsetof(struct amdgpu_vcn5_fw_shared, fw_log); + } else if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) { fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)); log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index a418393d89..9f06def236 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -454,6 +454,16 @@ struct amdgpu_vcn_rb_metadata { uint8_t pad[26]; }; +struct amdgpu_vcn5_fw_shared { + uint32_t present_flag_0; + uint8_t pad[12]; + struct amdgpu_fw_shared_unified_queue_struct sq; + uint8_t pad1[8]; + struct amdgpu_fw_shared_fw_logging fw_log; + struct amdgpu_fw_shared_rb_setup rb_setup; + uint8_t pad2[4]; +}; + #define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80 #define VCN_BLOCK_DECODE_DISABLE_MASK 0x40 #define VCN_BLOCK_QUEUE_DISABLE_MASK 0xC0 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 7a4eae3677..54ab51a4ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -32,6 +32,7 @@ #include "amdgpu.h" #include "amdgpu_ras.h" +#include "amdgpu_reset.h" #include "vi.h" #include "soc15.h" #include "nv.h" @@ -424,7 +425,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) return -EINVAL; if (pf2vf_info->size > 1024) { - DRM_ERROR("invalid pf2vf message size\n"); + dev_err(adev->dev, "invalid pf2vf message size: 0x%x\n", pf2vf_info->size); return -EINVAL; } @@ -435,7 +436,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size, adev->virt.fw_reserve.checksum_key, checksum); if (checksum != checkval) { - DRM_ERROR("invalid pf2vf message\n"); + dev_err(adev->dev, + "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n", + checksum, checkval); return -EINVAL; } @@ -449,7 +452,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size, 0, checksum); if (checksum != checkval) { - DRM_ERROR("invalid pf2vf message\n"); + dev_err(adev->dev, + "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n", + checksum, checkval); return -EINVAL; } @@ -485,7 +490,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid; break; default: - DRM_ERROR("invalid pf2vf version\n"); + dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version); return -EINVAL; } @@ -571,6 +576,11 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) vf2pf_info->decode_usage = 0; vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr; + vf2pf_info->mes_info_addr = (uint64_t)adev->mes.resource_1_gpu_addr; + + if (adev->mes.resource_1) { + vf2pf_info->mes_info_size = adev->mes.resource_1->tbo.base.size; + } vf2pf_info->checksum = amd_sriov_msg_checksum( vf2pf_info, vf2pf_info->header.size, 0, 0); @@ -584,8 +594,22 @@ static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work) int ret; ret = amdgpu_virt_read_pf2vf_data(adev); - if (ret) + if (ret) { + adev->virt.vf2pf_update_retry_cnt++; + if ((adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) && + amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev)) { + amdgpu_ras_set_fed(adev, true); + if (amdgpu_reset_domain_schedule(adev->reset_domain, + &adev->virt.flr_work)) + return; + else + dev_err(adev->dev, "Failed to queue work! at %s", __func__); + } + goto out; + } + + adev->virt.vf2pf_update_retry_cnt = 0; amdgpu_virt_write_vf2pf_data(adev); out: @@ -606,6 +630,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) adev->virt.fw_reserve.p_pf2vf = NULL; adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.vf2pf_update_interval_ms = 0; + adev->virt.vf2pf_update_retry_cnt = 0; if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) { DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!"); @@ -705,12 +730,6 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } - if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) - /* VF MMIO access (except mailbox range) from CPU - * will be blocked during sriov runtime - */ - adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; - /* we have the ability to check now */ if (amdgpu_sriov_vf(adev)) { switch (adev->asic_type) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 3f59b7b552..642f1fd287 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -52,6 +52,8 @@ /* tonga/fiji use this offset */ #define mmBIF_IOV_FUNC_IDENTIFIER 0x1503 +#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 5 + enum amdgpu_sriov_vf_mode { SRIOV_VF_MODE_BARE_METAL = 0, SRIOV_VF_MODE_ONE_VF, @@ -130,6 +132,8 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6), /* VCN RB decouple */ AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7), + /* MES info */ + AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8), }; enum AMDGIM_REG_ACCESS_FLAG { @@ -257,6 +261,7 @@ struct amdgpu_virt { /* vf2pf message */ struct delayed_work vf2pf_work; uint32_t vf2pf_update_interval_ms; + int vf2pf_update_retry_cnt; /* multimedia bandwidth config */ bool is_mm_bw_enabled; @@ -332,6 +337,8 @@ static inline bool is_virtual_machine(void) ((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT) #define amdgpu_sriov_is_vcn_rb_decouple(adev) \ ((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE) +#define amdgpu_sriov_is_mes_info_enable(adev) \ + ((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index ba6d1876ce..fde66225c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -672,6 +672,8 @@ static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = { .soft_reset = amdgpu_vkms_soft_reset, .set_clockgating_state = amdgpu_vkms_set_clockgating_state, .set_powergating_state = amdgpu_vkms_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 94089069c9..0f71060664 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -434,7 +434,7 @@ uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm) if (!vm) return result; - result += vm->generation; + result += lower_32_bits(vm->generation); /* Add one if the page tables will be re-generated on next CS */ if (drm_sched_entity_error(&vm->delayed)) ++result; @@ -463,13 +463,14 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*validate)(void *p, struct amdgpu_bo *bo), void *param) { + uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm); struct amdgpu_vm_bo_base *bo_base; struct amdgpu_bo *shadow; struct amdgpu_bo *bo; int r; - if (drm_sched_entity_error(&vm->delayed)) { - ++vm->generation; + if (vm->generation != new_vm_generation) { + vm->generation = new_vm_generation; amdgpu_vm_bo_reset_state_machine(vm); amdgpu_vm_fini_entities(vm); r = amdgpu_vm_init_entities(adev, vm); @@ -885,6 +886,44 @@ static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence, kfree(tlb_cb); } +/** + * amdgpu_vm_tlb_flush - prepare TLB flush + * + * @params: parameters for update + * @fence: input fence to sync TLB flush with + * @tlb_cb: the callback structure + * + * Increments the tlb sequence to make sure that future CS execute a VM flush. + */ +static void +amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params, + struct dma_fence **fence, + struct amdgpu_vm_tlb_seq_struct *tlb_cb) +{ + struct amdgpu_vm *vm = params->vm; + + if (!fence || !*fence) + return; + + tlb_cb->vm = vm; + if (!dma_fence_add_callback(*fence, &tlb_cb->cb, + amdgpu_vm_tlb_seq_cb)) { + dma_fence_put(vm->last_tlb_flush); + vm->last_tlb_flush = dma_fence_get(*fence); + } else { + amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb); + } + + /* Prepare a TLB flush fence to be attached to PTs */ + if (!params->unlocked && vm->is_compute_context) { + amdgpu_vm_tlb_fence_create(params->adev, vm, fence); + + /* Makes sure no PD/PT is freed before the flush */ + dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence, + DMA_RESV_USAGE_BOOKKEEP); + } +} + /** * amdgpu_vm_update_range - update a range in the vm page table * @@ -916,8 +955,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct ttm_resource *res, dma_addr_t *pages_addr, struct dma_fence **fence) { - struct amdgpu_vm_update_params params; struct amdgpu_vm_tlb_seq_struct *tlb_cb; + struct amdgpu_vm_update_params params; struct amdgpu_res_cursor cursor; enum amdgpu_sync_mode sync_mode; int r, idx; @@ -927,8 +966,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL); if (!tlb_cb) { - r = -ENOMEM; - goto error_unlock; + drm_dev_exit(idx); + return -ENOMEM; } /* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache, @@ -948,7 +987,9 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, params.immediate = immediate; params.pages_addr = pages_addr; params.unlocked = unlocked; + params.needs_flush = flush_tlb; params.allow_override = allow_override; + INIT_LIST_HEAD(¶ms.tlb_flush_waitlist); /* Implicitly sync to command submissions in the same VM before * unmapping. Sync to moving fences before mapping. @@ -1031,24 +1072,18 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, } r = vm->update_funcs->commit(¶ms, fence); + if (r) + goto error_free; - if (flush_tlb || params.table_freed) { - tlb_cb->vm = vm; - if (fence && *fence && - !dma_fence_add_callback(*fence, &tlb_cb->cb, - amdgpu_vm_tlb_seq_cb)) { - dma_fence_put(vm->last_tlb_flush); - vm->last_tlb_flush = dma_fence_get(*fence); - } else { - amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb); - } + if (params.needs_flush) { + amdgpu_vm_tlb_flush(¶ms, fence, tlb_cb); tlb_cb = NULL; } + amdgpu_vm_pt_free_list(adev, ¶ms); + error_free: kfree(tlb_cb); - -error_unlock: amdgpu_vm_eviction_unlock(vm); drm_dev_exit(idx); return r; @@ -2407,10 +2442,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->last_update = dma_fence_get_stub(); vm->last_unlocked = dma_fence_get_stub(); vm->last_tlb_flush = dma_fence_get_stub(); - vm->generation = 0; + vm->generation = amdgpu_vm_generation(adev, NULL); mutex_init(&vm->eviction_lock); vm->evicting = false; + vm->tlb_fence_context = dma_fence_context_alloc(1); r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, false, &root, xcp_id); @@ -2944,6 +2980,14 @@ void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev, if (vm && status) { vm->fault_info.addr = addr; vm->fault_info.status = status; + /* + * Update the fault information globally for later usage + * when vm could be stale or freed. + */ + adev->vm_manager.fault_info.addr = addr; + adev->vm_manager.fault_info.vmhub = vmhub; + adev->vm_manager.fault_info.status = status; + if (AMDGPU_IS_GFXHUB(vmhub)) { vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX; vm->fault_info.vmhub |= diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 047ec1930d..54d7da396d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -257,15 +257,20 @@ struct amdgpu_vm_update_params { unsigned int num_dw_left; /** - * @table_freed: return true if page table is freed when updating + * @needs_flush: true whenever we need to invalidate the TLB */ - bool table_freed; + bool needs_flush; /** * @allow_override: true for memory that is not uncached: allows MTYPE * to be overridden for NUMA local memory. */ bool allow_override; + + /** + * @tlb_flush_waitlist: temporary storage for BOs until tlb_flush + */ + struct list_head tlb_flush_waitlist; }; struct amdgpu_vm_update_funcs { @@ -342,6 +347,7 @@ struct amdgpu_vm { atomic64_t tlb_seq; struct dma_fence *last_tlb_flush; atomic64_t kfd_last_flushed_seq; + uint64_t tlb_fence_context; /* How many times we had to re-generate the page tables */ uint64_t generation; @@ -422,6 +428,8 @@ struct amdgpu_vm_manager { * look up VM of a page fault */ struct xarray pasids; + /* Global registration of recent page fault information */ + struct amdgpu_vm_fault_info fault_info; }; struct amdgpu_bo_va_mapping; @@ -544,6 +552,8 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params, uint64_t start, uint64_t end, uint64_t dst, uint64_t flags); void amdgpu_vm_pt_free_work(struct work_struct *work); +void amdgpu_vm_pt_free_list(struct amdgpu_device *adev, + struct amdgpu_vm_update_params *params); #if defined(CONFIG_DEBUG_FS) void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m); @@ -609,5 +619,8 @@ void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev, uint64_t addr, uint32_t status, unsigned int vmhub); +void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct dma_fence **fence); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c index 6e31621452..3895bd7d17 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c @@ -108,7 +108,9 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p, struct dma_fence **fence) { - /* Flush HDP */ + if (p->needs_flush) + atomic64_inc(&p->vm->tlb_seq); + mb(); amdgpu_device_flush_hdp(p->adev, NULL); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 124389a6bf..f07647a9a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -622,40 +622,58 @@ void amdgpu_vm_pt_free_work(struct work_struct *work) } /** - * amdgpu_vm_pt_free_dfs - free PD/PT levels + * amdgpu_vm_pt_free_list - free PD/PT levels * * @adev: amdgpu device structure - * @vm: amdgpu vm structure - * @start: optional cursor where to start freeing PDs/PTs - * @unlocked: vm resv unlock status + * @params: see amdgpu_vm_update_params definition * - * Free the page directory or page table level and all sub levels. + * Free the page directory objects saved in the flush list */ -static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_vm_pt_cursor *start, - bool unlocked) +void amdgpu_vm_pt_free_list(struct amdgpu_device *adev, + struct amdgpu_vm_update_params *params) { - struct amdgpu_vm_pt_cursor cursor; - struct amdgpu_vm_bo_base *entry; + struct amdgpu_vm_bo_base *entry, *next; + struct amdgpu_vm *vm = params->vm; + bool unlocked = params->unlocked; + + if (list_empty(¶ms->tlb_flush_waitlist)) + return; if (unlocked) { spin_lock(&vm->status_lock); - for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) - list_move(&entry->vm_status, &vm->pt_freed); - - if (start) - list_move(&start->entry->vm_status, &vm->pt_freed); + list_splice_init(¶ms->tlb_flush_waitlist, &vm->pt_freed); spin_unlock(&vm->status_lock); schedule_work(&vm->pt_free_work); return; } - for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) + list_for_each_entry_safe(entry, next, ¶ms->tlb_flush_waitlist, vm_status) amdgpu_vm_pt_free(entry); +} - if (start) - amdgpu_vm_pt_free(start->entry); +/** + * amdgpu_vm_pt_add_list - add PD/PT level to the flush list + * + * @params: parameters for the update + * @cursor: first PT entry to start DF search from, non NULL + * + * This list will be freed after TLB flush. + */ +static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params, + struct amdgpu_vm_pt_cursor *cursor) +{ + struct amdgpu_vm_pt_cursor seek; + struct amdgpu_vm_bo_base *entry; + + spin_lock(¶ms->vm->status_lock); + for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) { + if (entry && entry->bo) + list_move(&entry->vm_status, ¶ms->tlb_flush_waitlist); + } + + /* enter start node now */ + list_move(&cursor->entry->vm_status, ¶ms->tlb_flush_waitlist); + spin_unlock(¶ms->vm->status_lock); } /** @@ -667,7 +685,13 @@ static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev, */ void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - amdgpu_vm_pt_free_dfs(adev, vm, NULL, false); + struct amdgpu_vm_pt_cursor cursor; + struct amdgpu_vm_bo_base *entry; + + for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) { + if (entry) + amdgpu_vm_pt_free(entry); + } } /** @@ -682,11 +706,15 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params, struct amdgpu_vm_bo_base *entry) { struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry); - struct amdgpu_bo *bo = parent->bo, *pbo; + struct amdgpu_bo *bo, *pbo; struct amdgpu_vm *vm = params->vm; uint64_t pde, pt, flags; unsigned int level; + if (WARN_ON(!parent)) + return -EINVAL; + + bo = parent->bo; for (level = 0, pbo = bo->parent; pbo; ++level) pbo = pbo->parent; @@ -972,10 +1000,8 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params, while (cursor.pfn < frag_start) { /* Make sure previous mapping is freed */ if (cursor.entry->bo) { - params->table_freed = true; - amdgpu_vm_pt_free_dfs(adev, params->vm, - &cursor, - params->unlocked); + params->needs_flush = true; + amdgpu_vm_pt_add_list(params, &cursor); } amdgpu_vm_pt_next(adev, &cursor); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 349416e176..66e8a01612 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -126,6 +126,10 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, WARN_ON(ib->length_dw == 0); amdgpu_ring_pad_ib(ring, ib); + + if (p->needs_flush) + atomic64_inc(&p->vm->tlb_seq); + WARN_ON(ib->length_dw > p->num_dw_left); f = amdgpu_job_submit(p->job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c new file mode 100644 index 0000000000..51cddfa3f1 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include "amdgpu.h" +#include "amdgpu_vm.h" +#include "amdgpu_gmc.h" + +struct amdgpu_tlb_fence { + struct dma_fence base; + struct amdgpu_device *adev; + struct dma_fence *dependency; + struct work_struct work; + spinlock_t lock; + uint16_t pasid; + +}; + +static const char *amdgpu_tlb_fence_get_driver_name(struct dma_fence *fence) +{ + return "amdgpu tlb fence"; +} + +static const char *amdgpu_tlb_fence_get_timeline_name(struct dma_fence *f) +{ + return "amdgpu tlb timeline"; +} + +static void amdgpu_tlb_fence_work(struct work_struct *work) +{ + struct amdgpu_tlb_fence *f = container_of(work, typeof(*f), work); + int r; + + if (f->dependency) { + dma_fence_wait(f->dependency, false); + dma_fence_put(f->dependency); + f->dependency = NULL; + } + + r = amdgpu_gmc_flush_gpu_tlb_pasid(f->adev, f->pasid, 2, true, 0); + if (r) { + dev_err(f->adev->dev, "TLB flush failed for PASID %d.\n", + f->pasid); + dma_fence_set_error(&f->base, r); + } + + dma_fence_signal(&f->base); + dma_fence_put(&f->base); +} + +static const struct dma_fence_ops amdgpu_tlb_fence_ops = { + .use_64bit_seqno = true, + .get_driver_name = amdgpu_tlb_fence_get_driver_name, + .get_timeline_name = amdgpu_tlb_fence_get_timeline_name +}; + +void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct dma_fence **fence) +{ + struct amdgpu_tlb_fence *f; + + f = kmalloc(sizeof(*f), GFP_KERNEL); + if (!f) { + /* + * We can't fail since the PDEs and PTEs are already updated, so + * just block for the dependency and execute the TLB flush + */ + if (*fence) + dma_fence_wait(*fence, false); + + amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, 2, true, 0); + *fence = dma_fence_get_stub(); + return; + } + + f->adev = adev; + f->dependency = *fence; + f->pasid = vm->pasid; + INIT_WORK(&f->work, amdgpu_tlb_fence_work); + spin_lock_init(&f->lock); + + dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock, + vm->tlb_fence_context, atomic64_read(&vm->tlb_seq)); + + /* TODO: We probably need a separate wq here */ + dma_fence_get(&f->base); + schedule_work(&f->work); + + *fence = &f->base; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 8db8802443..6c30eceec8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -450,6 +450,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); u64 vis_usage = 0, max_bytes, min_block_size; struct amdgpu_vram_mgr_resource *vres; u64 size, remaining_size, lpfn, fpfn; @@ -468,7 +469,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, if (tbo->type != ttm_bo_type_kernel) max_bytes -= AMDGPU_VM_RESERVED_VRAM; - if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { + if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) { pages_per_block = ~0ul; } else { #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -477,7 +478,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, /* default to 2MB */ pages_per_block = 2UL << (20UL - PAGE_SHIFT); #endif - pages_per_block = max_t(uint32_t, pages_per_block, + pages_per_block = max_t(u32, pages_per_block, tbo->page_alignment); } @@ -498,9 +499,12 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, if (place->flags & TTM_PL_FLAG_TOPDOWN) vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; - if (place->flags & TTM_PL_FLAG_CONTIGUOUS) + if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION; + if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED) + vres->flags |= DRM_BUDDY_CLEAR_ALLOCATION; + if (fpfn || lpfn != mgr->mm.size) /* Allocate blocks in desired range */ vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; @@ -514,21 +518,31 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, else min_block_size = mgr->default_page_size; - BUG_ON(min_block_size < mm->chunk_size); - /* Limit maximum size to 2GiB due to SG table limitations */ size = min(remaining_size, 2ULL << 30); if ((size >= (u64)pages_per_block << PAGE_SHIFT) && - !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1))) + !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1))) min_block_size = (u64)pages_per_block << PAGE_SHIFT; + BUG_ON(min_block_size < mm->chunk_size); + r = drm_buddy_alloc_blocks(mm, fpfn, lpfn, size, min_block_size, &vres->blocks, vres->flags); + + if (unlikely(r == -ENOSPC) && pages_per_block == ~0ul && + !(place->flags & TTM_PL_FLAG_CONTIGUOUS)) { + vres->flags &= ~DRM_BUDDY_CONTIGUOUS_ALLOCATION; + pages_per_block = max_t(u32, 2UL << (20UL - PAGE_SHIFT), + tbo->page_alignment); + + continue; + } + if (unlikely(r)) goto error_free_blocks; @@ -571,7 +585,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, return 0; error_free_blocks: - drm_buddy_free_list(mm, &vres->blocks); + drm_buddy_free_list(mm, &vres->blocks, 0); mutex_unlock(&mgr->lock); error_fini: ttm_resource_fini(man, &vres->base); @@ -604,7 +618,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, amdgpu_vram_mgr_do_reserve(man); - drm_buddy_free_list(mm, &vres->blocks); + drm_buddy_free_list(mm, &vres->blocks, vres->flags); mutex_unlock(&mgr->lock); atomic64_sub(vis_usage, &mgr->vis_usage); @@ -912,7 +926,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) kfree(rsv); list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) { - drm_buddy_free_list(&mgr->mm, &rsv->allocated); + drm_buddy_free_list(&mgr->mm, &rsv->allocated, 0); kfree(rsv); } if (!adev->gmc.is_app_apu) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h index 0e04e42cf8..b256cbc2bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h @@ -53,10 +53,20 @@ static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block) return (u64)PAGE_SIZE << drm_buddy_block_order(block); } +static inline bool amdgpu_vram_mgr_is_cleared(struct drm_buddy_block *block) +{ + return drm_buddy_block_is_clear(block); +} + static inline struct amdgpu_vram_mgr_resource * to_amdgpu_vram_mgr_resource(struct ttm_resource *res) { return container_of(res, struct amdgpu_vram_mgr_resource, base); } +static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res) +{ + to_amdgpu_vram_mgr_resource(res)->flags |= DRM_BUDDY_CLEARED; +} + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 20d51f6c9b..dd2ec48cf5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -1035,15 +1035,16 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) return 0; } -static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, - struct aca_bank_report *report, void *data) +static int xgmi_v6_4_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) { struct amdgpu_device *adev = handle->adev; + struct aca_bank_info info; const char *error_str; - u64 status; + u64 status, count; int ret, ext_error_code; - ret = aca_bank_info_decode(bank, &report->info); + ret = aca_bank_info_decode(bank, &info); if (ret) return ret; @@ -1055,15 +1056,28 @@ static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struc if (error_str) dev_info(adev->dev, "%s detected\n", error_str); - if ((type == ACA_ERROR_TYPE_UE && ext_error_code == 0) || - (type == ACA_ERROR_TYPE_CE && ext_error_code == 6)) - report->count[type] = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]); + count = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]); - return 0; + switch (type) { + case ACA_SMU_TYPE_UE: + if (ext_error_code != 0 && ext_error_code != 9) + count = 0ULL; + + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, count); + break; + case ACA_SMU_TYPE_CE: + count = ext_error_code == 6 ? count : 0ULL; + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE, count); + break; + default: + return -EINVAL; + } + + return ret; } static const struct aca_bank_ops xgmi_v6_4_0_aca_bank_ops = { - .aca_bank_generate_report = xgmi_v6_4_0_aca_bank_generate_report, + .aca_bank_parser = xgmi_v6_4_0_aca_bank_parser, }; static const struct aca_info xgmi_v6_4_0_aca_info = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index 1592c63b30..a3bfc16de6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -44,6 +44,7 @@ struct amdgpu_hive_info { struct amdgpu_reset_domain *reset_domain; atomic_t ras_recovery; + struct ras_event_manager event_mgr; }; struct amdgpu_pcs_ras_field { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 51a14f6d93..fb2b394bb9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -94,7 +94,8 @@ union amd_sriov_msg_feature_flags { uint32_t reg_indirect_acc : 1; uint32_t av1_support : 1; uint32_t vcn_rb_decouple : 1; - uint32_t reserved : 24; + uint32_t mes_info_enable : 1; + uint32_t reserved : 23; } flags; uint32_t all; }; @@ -157,7 +158,7 @@ struct amd_sriov_msg_pf2vf_info_header { uint32_t reserved[2]; }; -#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (48) +#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (49) struct amd_sriov_msg_pf2vf_info { /* header contains size and version */ struct amd_sriov_msg_pf2vf_info_header header; @@ -208,6 +209,8 @@ struct amd_sriov_msg_pf2vf_info { struct amd_sriov_msg_uuid_info uuid_info; /* PCIE atomic ops support flag */ uint32_t pcie_atomic_ops_support_flags; + /* Portion of GPU memory occupied by VF. MAX value is 65535, but set to uint32_t to maintain alignment with reserved size */ + uint32_t gpu_capacity; /* reserved */ uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE]; }; @@ -221,7 +224,7 @@ struct amd_sriov_msg_vf2pf_info_header { uint32_t reserved[2]; }; -#define AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE (70) +#define AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE (73) struct amd_sriov_msg_vf2pf_info { /* header contains size and version */ struct amd_sriov_msg_vf2pf_info_header header; @@ -265,7 +268,9 @@ struct amd_sriov_msg_vf2pf_info { uint32_t version; } ucode_info[AMD_SRIOV_MSG_RESERVE_UCODE]; uint64_t dummy_page_addr; - + /* FB allocated for guest MES to record UQ info */ + uint64_t mes_info_addr; + uint32_t mes_info_size; /* reserved */ uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE]; }; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index fbb43ae762..d4e2aed2ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -422,7 +422,7 @@ __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr) if (adev->gmc.num_mem_partitions == num_xcc / 2) return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE : - AMDGPU_QPX_PARTITION_MODE; + AMDGPU_CPX_PARTITION_MODE; if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU)) return AMDGPU_DPX_PARTITION_MODE; @@ -630,7 +630,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) { - u32 mask, inst_mask = adev->sdma.sdma_mask; + u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask; int ret, i; /* generally 1 AID supports 4 instances */ @@ -642,7 +642,9 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask; inst_mask >>= adev->sdma.num_inst_per_aid, ++i) { - if ((inst_mask & mask) == mask) + avail_inst = inst_mask & mask; + if (avail_inst == mask || avail_inst == 0x3 || + avail_inst == 0xc) adev->aid_mask |= (1 << i); } diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 72362df352..d552e01335 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -1243,6 +1243,7 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, ectx.ps_size = params_size; ectx.abort = false; ectx.last_jump = 0; + ectx.last_jump_jiffies = 0; if (ws) { ectx.ws = kcalloc(4, ws, GFP_KERNEL); ectx.ws_size = ws; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index a3a643254d..cf1d5d462b 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1375,14 +1375,14 @@ static int cik_asic_pci_config_reset(struct amdgpu_device *adev) return r; } -static bool cik_asic_supports_baco(struct amdgpu_device *adev) +static int cik_asic_supports_baco(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_BONAIRE: case CHIP_HAWAII: return amdgpu_dpm_is_baco_supported(adev); default: - return false; + return 0; } } @@ -2210,6 +2210,8 @@ static const struct amd_ip_funcs cik_common_ip_funcs = { .soft_reset = cik_common_soft_reset, .set_clockgating_state = cik_common_set_clockgating_state, .set_powergating_state = cik_common_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ip_block_version cik_common_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index f24e34dc33..576baa9dbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -435,6 +435,8 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = { .soft_reset = cik_ih_soft_reset, .set_clockgating_state = cik_ih_set_clockgating_state, .set_powergating_state = cik_ih_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs cik_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index a3fccc4c1f..6948ebda0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -1228,6 +1228,8 @@ static const struct amd_ip_funcs cik_sdma_ip_funcs = { .soft_reset = cik_sdma_soft_reset, .set_clockgating_state = cik_sdma_set_clockgating_state, .set_powergating_state = cik_sdma_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { @@ -1290,7 +1292,7 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer - * @tmz: is this a secure operation + * @copy_flags: unused * * Copy GPU buffers using the DMA engine (CIK). * Used by the amdgpu ttm implementation to move pages if @@ -1300,7 +1302,7 @@ static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, - bool tmz) + uint32_t copy_flags) { ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0); ib->ptr[ib->length_dw++] = byte_count; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index c19681492e..0726437873 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -433,6 +433,8 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = { .soft_reset = cz_ih_soft_reset, .set_clockgating_state = cz_ih_set_clockgating_state, .set_powergating_state = cz_ih_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs cz_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 221af054d8..b44fce44c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -3333,6 +3333,8 @@ static const struct amd_ip_funcs dce_v10_0_ip_funcs = { .soft_reset = dce_v10_0_soft_reset, .set_clockgating_state = dce_v10_0_set_clockgating_state, .set_powergating_state = dce_v10_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 69e8b0db6c..80b2e7f79a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -3464,6 +3464,8 @@ static const struct amd_ip_funcs dce_v11_0_ip_funcs = { .soft_reset = dce_v11_0_soft_reset, .set_clockgating_state = dce_v11_0_set_clockgating_state, .set_powergating_state = dce_v11_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 60d40201fd..db20012600 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -3154,6 +3154,8 @@ static const struct amd_ip_funcs dce_v6_0_ip_funcs = { .soft_reset = dce_v6_0_soft_reset, .set_clockgating_state = dce_v6_0_set_clockgating_state, .set_powergating_state = dce_v6_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 5a5fcc45e4..5b56100ec9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -3242,6 +3242,8 @@ static const struct amd_ip_funcs dce_v8_0_ip_funcs = { .soft_reset = dce_v8_0_soft_reset, .set_clockgating_state = dce_v8_0_set_clockgating_state, .set_powergating_state = dce_v8_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static void diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 701146d649..536287ddd2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -276,6 +276,99 @@ MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec2.bin"); MODULE_FIRMWARE("amdgpu/gc_10_3_7_rlc.bin"); +static const struct amdgpu_hwip_reg_entry gc_reg_list_10_1[] = { + SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2), + SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS3), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT1), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT2), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STALLED_STAT1), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STALLED_STAT1), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_BUSY_STAT), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_BUSY_STAT), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_BUSY_STAT), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_BUSY_STAT2), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_BUSY_STAT2), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_ERROR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_HPD_STATUS0), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_BASE), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_BASE), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_BASE), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_RPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_BASE), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_CMD_BUFSZ), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_CMD_BUFSZ), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_CMD_BUFSZ), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_CMD_BUFSZ), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BUFSZ), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BUFSZ), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BUFSZ), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_LO), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_HI), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BUFSZ), + SOC15_REG_ENTRY_STR(GC, 0, mmCPF_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmCPC_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmCPG_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmGDS_PROTECTION_FAULT), + SOC15_REG_ENTRY_STR(GC, 0, mmGDS_VM_PROTECTION_FAULT), + SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_STATUS_2), + SOC15_REG_ENTRY_STR(GC, 0, mmPA_CL_CNTL_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmRMI_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmSQC_DCACHE_UTCL0_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmSQC_ICACHE_UTCL0_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmSQG_UTCL0_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmTCP_UTCL0_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmWD_UTCL1_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmGCVM_L2_PROTECTION_FAULT_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_DEBUG), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MES_CNTL), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_INSTR_PNTR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC1_INSTR_PNTR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC2_INSTR_PNTR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MES_DEBUG_INTERRUPT_INSTR_PNTR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MES_INSTR_PNTR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_INSTR_PNTR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_INSTR_PNTR), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_STAT), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_COMMAND), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_MESSAGE), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_1), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_2), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_3), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_4), + SOC15_REG_ENTRY_STR(GC, 0, mmSMU_RLC_RESPONSE), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SAFE_MODE), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_SAFE_MODE), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_RLCS_GPM_STAT_2), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SPP_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_RLCS_BOOTLOAD_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_INT_STAT), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_GENERAL_6), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_DEBUG_INST_A), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_DEBUG_INST_B), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_DEBUG_INST_ADDR), + SOC15_REG_ENTRY_STR(GC, 0, mmRLC_LX6_CORE_PDEBUG_INST) +}; + static const struct soc15_reg_golden golden_settings_gc_10_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100), @@ -3964,7 +4057,7 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) { - char fw_name[40]; + char fw_name[53]; char ucode_prefix[30]; const char *wks = ""; int err; @@ -4490,6 +4583,22 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, hw_prio, NULL); } +static void gfx_v10_0_alloc_dump_mem(struct amdgpu_device *adev) +{ + uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1); + uint32_t *ptr; + + ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL); + if (ptr == NULL) { + DRM_ERROR("Failed to allocate memory for IP Dump\n"); + adev->gfx.ip_dump = NULL; + adev->gfx.reg_count = 0; + } else { + adev->gfx.ip_dump = ptr; + adev->gfx.reg_count = reg_count; + } +} + static int gfx_v10_0_sw_init(void *handle) { int i, j, k, r, ring_id = 0; @@ -4518,7 +4627,7 @@ static int gfx_v10_0_sw_init(void *handle) case IP_VERSION(10, 3, 3): case IP_VERSION(10, 3, 7): adev->gfx.me.num_me = 1; - adev->gfx.me.num_pipe_per_me = 1; + adev->gfx.me.num_pipe_per_me = 2; adev->gfx.me.num_queue_per_pipe = 1; adev->gfx.mec.num_mec = 2; adev->gfx.mec.num_pipe_per_mec = 4; @@ -4642,6 +4751,8 @@ static int gfx_v10_0_sw_init(void *handle) gfx_v10_0_gpu_early_init(adev); + gfx_v10_0_alloc_dump_mem(adev); + return 0; } @@ -4694,6 +4805,8 @@ static int gfx_v10_0_sw_fini(void *handle) gfx_v10_0_free_microcode(adev); + kfree(adev->gfx.ip_dump); + return 0; } @@ -8317,7 +8430,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) } reg_mem_engine = 0; } else { - ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; + ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe; reg_mem_engine = 1; /* pfp */ } @@ -9154,6 +9267,36 @@ static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ } +static void gfx_v10_ip_print(void *handle, struct drm_printer *p) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t i; + uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1); + + if (!adev->gfx.ip_dump) + return; + + for (i = 0; i < reg_count; i++) + drm_printf(p, "%-50s \t 0x%08x\n", + gc_reg_list_10_1[i].reg_name, + adev->gfx.ip_dump[i]); +} + +static void gfx_v10_ip_dump(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t i; + uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1); + + if (!adev->gfx.ip_dump) + return; + + amdgpu_gfx_off_ctrl(adev, false); + for (i = 0; i < reg_count; i++) + adev->gfx.ip_dump[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_10_1[i])); + amdgpu_gfx_off_ctrl(adev, true); +} + static const struct amd_ip_funcs gfx_v10_0_ip_funcs = { .name = "gfx_v10_0", .early_init = gfx_v10_0_early_init, @@ -9170,6 +9313,8 @@ static const struct amd_ip_funcs gfx_v10_0_ip_funcs = { .set_clockgating_state = gfx_v10_0_set_clockgating_state, .set_powergating_state = gfx_v10_0_set_powergating_state, .get_clockgating_state = gfx_v10_0_get_clockgating_state, + .dump_ip_state = gfx_v10_ip_dump, + .print_ip_state = gfx_v10_ip_print, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index f00e05aba4..ad6431013c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -510,7 +510,7 @@ static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev) static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) { char fw_name[40]; - char ucode_prefix[30]; + char ucode_prefix[25]; int err; const struct rlc_firmware_header_v2_0 *rlc_hdr; uint16_t version_major; @@ -4506,14 +4506,11 @@ static int gfx_v11_0_soft_reset(void *handle) gfx_v11_0_set_safe_mode(adev, 0); + mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k); - WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); + soc21_grbm_select(adev, i, k, j, 0); WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); @@ -4523,16 +4520,14 @@ static int gfx_v11_0_soft_reset(void *handle) for (i = 0; i < adev->gfx.me.num_me; ++i) { for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { - tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k); - WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); + soc21_grbm_select(adev, i, k, j, 0); WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1); } } } + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); /* Try to acquire the gfx mutex before access to CP_VMID_RESET */ r = gfx_v11_0_request_gfx_index_mutex(adev, 1); @@ -6174,6 +6169,8 @@ static const struct amd_ip_funcs gfx_v11_0_ip_funcs = { .set_clockgating_state = gfx_v11_0_set_clockgating_state, .set_powergating_state = gfx_v11_0_set_powergating_state, .get_clockgating_state = gfx_v11_0_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 34f9211b26..d0992ce9fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -3457,6 +3457,8 @@ static const struct amd_ip_funcs gfx_v6_0_ip_funcs = { .soft_reset = gfx_v6_0_soft_reset, .set_clockgating_state = gfx_v6_0_set_clockgating_state, .set_powergating_state = gfx_v6_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 86a4865b1a..541dbd70d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4977,6 +4977,8 @@ static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { .soft_reset = gfx_v7_0_soft_reset, .set_clockgating_state = gfx_v7_0_set_clockgating_state, .set_powergating_state = gfx_v7_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 202ddda57f..2f0e72caee 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6878,6 +6878,8 @@ static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .set_clockgating_state = gfx_v8_0_set_clockgating_state, .set_powergating_state = gfx_v8_0_set_powergating_state, .get_clockgating_state = gfx_v8_0_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 99dbd23411..3c8c5abf35 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1249,7 +1249,7 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev, char *chip_name) { - char fw_name[30]; + char fw_name[50]; int err; snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); @@ -1282,7 +1282,7 @@ out: static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev, char *chip_name) { - char fw_name[30]; + char fw_name[53]; int err; const struct rlc_firmware_header_v2_0 *rlc_hdr; uint16_t version_major; @@ -1337,7 +1337,7 @@ static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev) static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, char *chip_name) { - char fw_name[30]; + char fw_name[50]; int err; if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN)) @@ -6856,6 +6856,8 @@ static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { .set_clockgating_state = gfx_v9_0_set_clockgating_state, .set_powergating_state = gfx_v9_0_set_powergating_state, .get_clockgating_state = gfx_v9_0_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 065b2bd5f5..3f4fd2f081 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -1909,18 +1909,7 @@ static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev) mutex_unlock(&adev->grbm_idx_mutex); } -static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev) -{ - u32 status = 0; - struct amdgpu_vmhub *hub; - - hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; - status = RREG32(hub->vm_l2_pro_fault_status); - /* reset page fault status */ - WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); - return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); -} struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops = { .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count, @@ -1934,5 +1923,4 @@ struct amdgpu_gfx_ras gfx_v9_4_2_ras = { .hw_ops = &gfx_v9_4_2_ras_ops, }, .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer, - .query_utcl2_poison_status = gfx_v9_4_2_query_uctl2_poison_status, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index b10fdd8b54..f5b9f443cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -680,38 +680,44 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst, }; -static int gfx_v9_4_3_aca_bank_generate_report(struct aca_handle *handle, - struct aca_bank *bank, enum aca_error_type type, - struct aca_bank_report *report, void *data) +static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle, + struct aca_bank *bank, enum aca_smu_type type, + void *data) { - u64 status, misc0; + struct aca_bank_info info; + u64 misc0; u32 instlo; int ret; - status = bank->regs[ACA_REG_IDX_STATUS]; - if ((type == ACA_ERROR_TYPE_UE && - ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) || - (type == ACA_ERROR_TYPE_CE && - ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) { + ret = aca_bank_info_decode(bank, &info); + if (ret) + return ret; - ret = aca_bank_info_decode(bank, &report->info); - if (ret) - return ret; + /* NOTE: overwrite info.die_id with xcd id for gfx */ + instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); + instlo &= GENMASK(31, 1); + info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1; - /* NOTE: overwrite info.die_id with xcd id for gfx */ - instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); - instlo &= GENMASK(31, 1); - report->info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1; + misc0 = bank->regs[ACA_REG_IDX_MISC0]; - misc0 = bank->regs[ACA_REG_IDX_MISC0]; - report->count[type] = ACA_REG__MISC0__ERRCNT(misc0); + switch (type) { + case ACA_SMU_TYPE_UE: + ret = aca_error_cache_log_bank_error(handle, &info, + ACA_ERROR_TYPE_UE, 1ULL); + break; + case ACA_SMU_TYPE_CE: + ret = aca_error_cache_log_bank_error(handle, &info, + ACA_ERROR_TYPE_CE, ACA_REG__MISC0__ERRCNT(misc0)); + break; + default: + return -EINVAL; } - return 0; + return ret; } static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, - enum aca_error_type type, void *data) + enum aca_smu_type type, void *data) { u32 instlo; @@ -730,7 +736,7 @@ static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_b } static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = { - .aca_bank_generate_report = gfx_v9_4_3_aca_bank_generate_report, + .aca_bank_parser = gfx_v9_4_3_aca_bank_parser, .aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid, }; @@ -2398,10 +2404,10 @@ gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev, if (def != data) WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); - /* enable cgcg FSM(0x0000363F) */ + /* CGCG Hysteresis: 400us */ def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); - data = (0x36 + data = (0x2710 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) @@ -2410,10 +2416,10 @@ gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev, if (def != data) WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data); - /* set IDLE_POLL_COUNT(0x00900100) */ + /* set IDLE_POLL_COUNT(0x33450100)*/ def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL); data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | - (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); + (0x3345 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); if (def != data) WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data); } else { @@ -4010,6 +4016,8 @@ static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = { .set_clockgating_state = gfx_v9_4_3_set_clockgating_state, .set_powergating_state = gfx_v9_4_3_set_powergating_state, .get_clockgating_state = gfx_v9_4_3_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 22175da0e1..d200310d17 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -443,6 +443,22 @@ static void gfxhub_v1_0_init(struct amdgpu_device *adev) mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; } +static bool gfxhub_v1_0_query_utcl2_poison_status(struct amdgpu_device *adev, + int xcc_id) +{ + u32 status = 0; + struct amdgpu_vmhub *hub; + + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) + return false; + + hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; + status = RREG32(hub->vm_l2_pro_fault_status); + /* reset page fault status */ + WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); + + return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); +} const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = { .get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset, @@ -452,4 +468,5 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = { .set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default, .init = gfxhub_v1_0_init, .get_xgmi_info = gfxhub_v1_1_get_xgmi_info, + .query_utcl2_poison_status = gfxhub_v1_0_query_utcl2_poison_status, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 49aecdcee0..77df8c9cba 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -620,6 +620,20 @@ static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev) return 0; } +static bool gfxhub_v1_2_query_utcl2_poison_status(struct amdgpu_device *adev, + int xcc_id) +{ + u32 fed, status; + + status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVM_L2_PROTECTION_FAULT_STATUS); + fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); + /* reset page fault status */ + WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), + regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1); + + return fed; +} + const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = { .get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset, .setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs, @@ -628,6 +642,7 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = { .set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default, .init = gfxhub_v1_2_init, .get_xgmi_info = gfxhub_v1_2_get_xgmi_info, + .query_utcl2_poison_status = gfxhub_v1_2_query_utcl2_poison_status, }; static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 23b4786399..3e38d8bfcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -1115,6 +1115,8 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = { .soft_reset = gmc_v6_0_soft_reset, .set_clockgating_state = gmc_v6_0_set_clockgating_state, .set_powergating_state = gmc_v6_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 3da7b6a2b0..85df8fc810 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1354,6 +1354,8 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = { .soft_reset = gmc_v7_0_soft_reset, .set_clockgating_state = gmc_v7_0_set_clockgating_state, .set_powergating_state = gmc_v7_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index d20e5f20ee..fc97757e33 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1717,6 +1717,8 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { .set_clockgating_state = gmc_v8_0_set_clockgating_state, .set_powergating_state = gmc_v8_0_set_powergating_state, .get_clockgating_state = gmc_v8_0_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 47b63a4ce6..f7f4924751 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -548,7 +548,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, { bool retry_fault = !!(entry->src_data[1] & 0x80); bool write_fault = !!(entry->src_data[1] & 0x20); - uint32_t status = 0, cid = 0, rw = 0; + uint32_t status = 0, cid = 0, rw = 0, fed = 0; struct amdgpu_task_info *task_info; struct amdgpu_vmhub *hub; const char *mmhub_cid; @@ -664,6 +664,13 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, status = RREG32(hub->vm_l2_pro_fault_status); cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID); rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW); + fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); + + /* for fed error, kfd will handle it, return directly */ + if (fed && amdgpu_ras_is_poison_mode_supported(adev) && + (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) + return 0; + WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub); @@ -1450,7 +1457,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET; adev->umc.active_mask = adev->aid_mask; adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; - adev->umc.channel_idx_tbl = &umc_v12_0_channel_idx_tbl[0][0][0]; if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) adev->umc.ras = &umc_v12_0_ras; break; @@ -1904,7 +1910,7 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, break; } - size = adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT; + size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT; size /= adev->gmc.num_mem_partitions; for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 2c02ae6988..07984f7c3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -425,6 +425,8 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = { .soft_reset = iceland_ih_soft_reset, .set_clockgating_state = iceland_ih_set_clockgating_state, .set_powergating_state = iceland_ih_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs iceland_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index ad4ad39f12..3cb64c8f71 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -346,6 +346,21 @@ static int ih_v6_0_irq_init(struct amdgpu_device *adev) DELAY, 3); WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp); + /* Redirect the interrupts to IH RB1 for dGPU */ + if (adev->irq.ih1.ring_size) { + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0); + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp); + + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, + SOURCE_ID_MATCH_ENABLE, 0x1); + + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp); + } + pci_set_master(adev->pdev); /* enable interrupts */ @@ -549,8 +564,15 @@ static int ih_v6_0_sw_init(void *handle) adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; - adev->irq.ih1.ring_size = 0; - adev->irq.ih2.ring_size = 0; + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE, + use_bus_addr); + if (r) + return r; + + adev->irq.ih1.use_doorbell = true; + adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; + } /* initialize ih control register offset */ ih_v6_0_init_register_offset(adev); @@ -748,6 +770,8 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = { .set_clockgating_state = ih_v6_0_set_clockgating_state, .set_powergating_state = ih_v6_0_set_powergating_state, .get_clockgating_state = ih_v6_0_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs ih_v6_0_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c index b8da0fc293..0fbf5fa7b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c @@ -346,6 +346,21 @@ static int ih_v6_1_irq_init(struct amdgpu_device *adev) DELAY, 3); WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp); + /* Redirect the interrupts to IH RB1 for dGPU */ + if (adev->irq.ih1.ring_size) { + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0); + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp); + + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, + SOURCE_ID_MATCH_ENABLE, 0x1); + + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp); + } + pci_set_master(adev->pdev); /* enable interrupts */ @@ -550,8 +565,15 @@ static int ih_v6_1_sw_init(void *handle) adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; - adev->irq.ih1.ring_size = 0; - adev->irq.ih2.ring_size = 0; + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE, + use_bus_addr); + if (r) + return r; + + adev->irq.ih1.use_doorbell = true; + adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; + } /* initialize ih control register offset */ ih_v6_1_init_register_offset(adev); @@ -753,6 +775,8 @@ static const struct amd_ip_funcs ih_v6_1_ip_funcs = { .set_clockgating_state = ih_v6_1_set_clockgating_state, .set_powergating_state = ih_v6_1_set_powergating_state, .get_clockgating_state = ih_v6_1_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs ih_v6_1_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c index 7aed96fa10..aa6235dd4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c @@ -749,6 +749,8 @@ static const struct amd_ip_funcs ih_v7_0_ip_funcs = { .set_clockgating_state = ih_v7_0_set_clockgating_state, .set_powergating_state = ih_v7_0_set_powergating_state, .get_clockgating_state = ih_v7_0_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs ih_v7_0_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 1c8116d75f..ef3e42f6b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -759,6 +759,8 @@ static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = jpeg_v2_0_set_clockgating_state, .set_powergating_state = jpeg_v2_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index 99cd49ee8e..afeaf3c64e 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -632,6 +632,8 @@ static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = jpeg_v2_5_set_clockgating_state, .set_powergating_state = jpeg_v2_5_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = { @@ -652,6 +654,8 @@ static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = jpeg_v2_5_set_clockgating_state, .set_powergating_state = jpeg_v2_5_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index a92481da60..1c7cf4800b 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -557,6 +557,8 @@ static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = jpeg_v3_0_set_clockgating_state, .set_powergating_state = jpeg_v3_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 88ea58d5c4..237fe5df5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -719,6 +719,8 @@ static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = jpeg_v4_0_set_clockgating_state, .set_powergating_state = jpeg_v4_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 32caeb37ce..d66af11aa6 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -1053,6 +1053,8 @@ static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = jpeg_v4_0_3_set_clockgating_state, .set_powergating_state = jpeg_v4_0_3_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c index edf5bcdd2b..da6bb9022b 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -762,6 +762,8 @@ static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = jpeg_v4_0_5_set_clockgating_state, .set_powergating_state = jpeg_v4_0_5_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c index e70200f975..64c856bfe0 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c @@ -513,6 +513,8 @@ static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = jpeg_v5_0_0_set_clockgating_state, .set_powergating_state = jpeg_v5_0_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index 1e5ad1e08d..a626bf9049 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -1176,6 +1176,8 @@ static const struct amd_ip_funcs mes_v10_1_ip_funcs = { .hw_fini = mes_v10_1_hw_fini, .suspend = mes_v10_1_suspend, .resume = mes_v10_1_resume, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version mes_v10_1_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 63f281a998..32d4519541 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -100,18 +100,76 @@ static const struct amdgpu_ring_funcs mes_v11_0_ring_funcs = { .insert_nop = amdgpu_ring_insert_nop, }; +static const char *mes_v11_0_opcodes[] = { + "SET_HW_RSRC", + "SET_SCHEDULING_CONFIG", + "ADD_QUEUE", + "REMOVE_QUEUE", + "PERFORM_YIELD", + "SET_GANG_PRIORITY_LEVEL", + "SUSPEND", + "RESUME", + "RESET", + "SET_LOG_BUFFER", + "CHANGE_GANG_PRORITY", + "QUERY_SCHEDULER_STATUS", + "PROGRAM_GDS", + "SET_DEBUG_VMID", + "MISC", + "UPDATE_ROOT_PAGE_TABLE", + "AMD_LOG", +}; + +static const char *mes_v11_0_misc_opcodes[] = { + "WRITE_REG", + "INV_GART", + "QUERY_STATUS", + "READ_REG", + "WAIT_REG_MEM", + "SET_SHADER_DEBUGGER", +}; + +static const char *mes_v11_0_get_op_string(union MESAPI__MISC *x_pkt) +{ + const char *op_str = NULL; + + if (x_pkt->header.opcode < ARRAY_SIZE(mes_v11_0_opcodes)) + op_str = mes_v11_0_opcodes[x_pkt->header.opcode]; + + return op_str; +} + +static const char *mes_v11_0_get_misc_op_string(union MESAPI__MISC *x_pkt) +{ + const char *op_str = NULL; + + if ((x_pkt->header.opcode == MES_SCH_API_MISC) && + (x_pkt->opcode < ARRAY_SIZE(mes_v11_0_misc_opcodes))) + op_str = mes_v11_0_misc_opcodes[x_pkt->opcode]; + + return op_str; +} + static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, void *pkt, int size, int api_status_off) { - int ndw = size / 4; - signed long r; - union MESAPI__ADD_QUEUE *x_pkt = pkt; - struct MES_API_STATUS *api_status; + union MESAPI__QUERY_MES_STATUS mes_status_pkt; + signed long timeout = 3000000; /* 3000 ms */ struct amdgpu_device *adev = mes->adev; struct amdgpu_ring *ring = &mes->ring; + struct MES_API_STATUS *api_status; + union MESAPI__MISC *x_pkt = pkt; + const char *op_str, *misc_op_str; unsigned long flags; - signed long timeout = adev->usec_timeout; + u64 status_gpu_addr; + u32 status_offset; + u64 *status_ptr; + signed long r; + int ret; + + if (x_pkt->header.opcode >= MES_SCH_API_MAX) + return -EINVAL; if (amdgpu_emu_mode) { timeout *= 100; @@ -119,37 +177,82 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, /* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */ timeout = 15 * 600 * 1000; } - BUG_ON(size % 4 != 0); + + ret = amdgpu_device_wb_get(adev, &status_offset); + if (ret) + return ret; + + status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4); + status_ptr = (u64 *)&adev->wb.wb[status_offset]; + *status_ptr = 0; spin_lock_irqsave(&mes->ring_lock, flags); - if (amdgpu_ring_alloc(ring, ndw)) { - spin_unlock_irqrestore(&mes->ring_lock, flags); - return -ENOMEM; - } + r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4); + if (r) + goto error_unlock_free; api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off); - api_status->api_completion_fence_addr = mes->ring.fence_drv.gpu_addr; - api_status->api_completion_fence_value = ++mes->ring.fence_drv.sync_seq; + api_status->api_completion_fence_addr = status_gpu_addr; + api_status->api_completion_fence_value = 1; + + amdgpu_ring_write_multiple(ring, pkt, size / 4); + + memset(&mes_status_pkt, 0, sizeof(mes_status_pkt)); + mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS; + mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + mes_status_pkt.api_status.api_completion_fence_addr = + ring->fence_drv.gpu_addr; + mes_status_pkt.api_status.api_completion_fence_value = + ++ring->fence_drv.sync_seq; + + amdgpu_ring_write_multiple(ring, &mes_status_pkt, + sizeof(mes_status_pkt) / 4); - amdgpu_ring_write_multiple(ring, pkt, ndw); amdgpu_ring_commit(ring); spin_unlock_irqrestore(&mes->ring_lock, flags); - DRM_DEBUG("MES msg=%d was emitted\n", x_pkt->header.opcode); + op_str = mes_v11_0_get_op_string(x_pkt); + misc_op_str = mes_v11_0_get_misc_op_string(x_pkt); - r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, - timeout); - if (r < 1) { - DRM_ERROR("MES failed to response msg=%d\n", - x_pkt->header.opcode); + if (misc_op_str) + dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str, + misc_op_str); + else if (op_str) + dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str); + else + dev_dbg(adev->dev, "MES msg=%d was emitted\n", + x_pkt->header.opcode); + + r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout); + if (r < 1 || !*status_ptr) { + + if (misc_op_str) + dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n", + op_str, misc_op_str); + else if (op_str) + dev_err(adev->dev, "MES failed to respond to msg=%s\n", + op_str); + else + dev_err(adev->dev, "MES failed to respond to msg=%d\n", + x_pkt->header.opcode); while (halt_if_hws_hang) schedule(); - return -ETIMEDOUT; + r = -ETIMEDOUT; + goto error_wb_free; } + amdgpu_device_wb_free(adev, status_offset); return 0; + +error_unlock_free: + spin_unlock_irqrestore(&mes->ring_lock, flags); + +error_wb_free: + amdgpu_device_wb_free(adev, status_offset); + return r; } static int convert_to_mes_queue_type(int queue_type) @@ -422,6 +525,36 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes) offsetof(union MESAPI_SET_HW_RESOURCES, api_status)); } +static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes) +{ + int size = 128 * PAGE_SIZE; + int ret = 0; + struct amdgpu_device *adev = mes->adev; + union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_pkt; + memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt)); + + mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1; + mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + mes_set_hw_res_pkt.enable_mes_info_ctx = 1; + + ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &mes->resource_1, + &mes->resource_1_gpu_addr, + &mes->resource_1_addr); + if (ret) { + dev_err(adev->dev, "(%d) failed to create mes resource_1 bo\n", ret); + return ret; + } + + mes_set_hw_res_pkt.mes_info_ctx_mc_addr = mes->resource_1_gpu_addr; + mes_set_hw_res_pkt.mes_info_ctx_size = mes->resource_1->tbo.base.size; + return mes_v11_0_submit_pkt_and_poll_completion(mes, + &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt), + offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status)); +} + static const struct amdgpu_mes_funcs mes_v11_0_funcs = { .add_hw_queue = mes_v11_0_add_hw_queue, .remove_hw_queue = mes_v11_0_remove_hw_queue, @@ -1203,6 +1336,14 @@ static int mes_v11_0_hw_init(void *handle) if (r) goto failure; + if (amdgpu_sriov_is_mes_info_enable(adev)) { + r = mes_v11_0_set_hw_resources_1(&adev->mes); + if (r) { + DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r); + goto failure; + } + } + r = mes_v11_0_query_sched_status(&adev->mes); if (r) { DRM_ERROR("MES is busy\n"); @@ -1226,6 +1367,11 @@ failure: static int mes_v11_0_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (amdgpu_sriov_is_mes_info_enable(adev)) { + amdgpu_bo_free_kernel(&adev->mes.resource_1, &adev->mes.resource_1_gpu_addr, + &adev->mes.resource_1_addr); + } return 0; } @@ -1291,6 +1437,8 @@ static const struct amd_ip_funcs mes_v11_0_ip_funcs = { .hw_fini = mes_v11_0_hw_fini, .suspend = mes_v11_0_suspend, .resume = mes_v11_0_resume, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version mes_v11_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index c0fc44cdd6..7a1ff29841 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -559,6 +559,20 @@ static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags) } +static bool mmhub_v1_8_query_utcl2_poison_status(struct amdgpu_device *adev, + int hub_inst) +{ + u32 fed, status; + + status = RREG32_SOC15(MMHUB, hub_inst, regVM_L2_PROTECTION_FAULT_STATUS); + fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); + /* reset page fault status */ + WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst, + regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1); + + return fed; +} + const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = { .get_fb_location = mmhub_v1_8_get_fb_location, .init = mmhub_v1_8_init, @@ -568,6 +582,7 @@ const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = { .setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs, .set_clockgating = mmhub_v1_8_set_clockgating, .get_clockgating = mmhub_v1_8_get_clockgating, + .query_utcl2_poison_status = mmhub_v1_8_query_utcl2_poison_status, }; static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = { @@ -706,28 +721,32 @@ static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = { .reset_ras_error_count = mmhub_v1_8_reset_ras_error_count, }; -static int mmhub_v1_8_aca_bank_generate_report(struct aca_handle *handle, - struct aca_bank *bank, enum aca_error_type type, - struct aca_bank_report *report, void *data) +static int mmhub_v1_8_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) { - u64 status, misc0; + struct aca_bank_info info; + u64 misc0; int ret; - status = bank->regs[ACA_REG_IDX_STATUS]; - if ((type == ACA_ERROR_TYPE_UE && - ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) || - (type == ACA_ERROR_TYPE_CE && - ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) { - - ret = aca_bank_info_decode(bank, &report->info); - if (ret) - return ret; - - misc0 = bank->regs[ACA_REG_IDX_MISC0]; - report->count[type] = ACA_REG__MISC0__ERRCNT(misc0); + ret = aca_bank_info_decode(bank, &info); + if (ret) + return ret; + + misc0 = bank->regs[ACA_REG_IDX_MISC0]; + switch (type) { + case ACA_SMU_TYPE_UE: + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, + 1ULL); + break; + case ACA_SMU_TYPE_CE: + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE, + ACA_REG__MISC0__ERRCNT(misc0)); + break; + default: + return -EINVAL; } - return 0; + return ret; } /* reference to smu driver if header file */ @@ -741,7 +760,7 @@ static int mmhub_v1_8_err_codes[] = { }; static bool mmhub_v1_8_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, - enum aca_error_type type, void *data) + enum aca_smu_type type, void *data) { u32 instlo; @@ -760,7 +779,7 @@ static bool mmhub_v1_8_aca_bank_is_valid(struct aca_handle *handle, struct aca_b } static const struct aca_bank_ops mmhub_v1_8_aca_bank_ops = { - .aca_bank_generate_report = mmhub_v1_8_aca_bank_generate_report, + .aca_bank_parser = mmhub_v1_8_aca_bank_parser, .aca_bank_is_valid = mmhub_v1_8_aca_bank_is_valid, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index a2bd2c3b1e..0c7275bca8 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -276,6 +276,8 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) timeout -= 10; } while (timeout > 1); + dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n"); + flr_done: atomic_set(&adev->reset_domain->in_gpu_reset, 0); up_write(&adev->reset_domain->sem); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 77f5b55dec..aba00d9616 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -309,6 +309,8 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) timeout -= 10; } while (timeout > 1); + dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n"); + flr_done: atomic_set(&adev->reset_domain->in_gpu_reset, 0); up_write(&adev->reset_domain->sem); @@ -444,7 +446,6 @@ static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev, amdgpu_virt_fini_data_exchange(adev); xgpu_nv_send_access_requests_with_param(adev, IDH_RAS_POISON, block, 0, 0); - amdgpu_virt_init_data_exchange(adev); } } diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index 4178f4e5da..b281462093 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -713,6 +713,8 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = { .set_clockgating_state = navi10_ih_set_clockgating_state, .set_powergating_state = navi10_ih_set_powergating_state, .get_clockgating_state = navi10_ih_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs navi10_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 4d7976b777..12e54047bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -110,7 +110,7 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; @@ -121,7 +121,7 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, }; @@ -199,7 +199,7 @@ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; @@ -1131,4 +1131,6 @@ static const struct amd_ip_funcs nv_common_ip_funcs = { .set_clockgating_state = nv_common_set_clockgating_state, .set_powergating_state = nv_common_set_powergating_state, .get_clockgating_state = nv_common_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 7566973ed8..37b5ddd6f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -464,8 +464,9 @@ struct psp_gfx_rb_frame #define PSP_ERR_UNKNOWN_COMMAND 0x00000100 enum tee_error_code { - TEE_SUCCESS = 0x00000000, - TEE_ERROR_NOT_SUPPORTED = 0xFFFF000A, + TEE_SUCCESS = 0x00000000, + TEE_ERROR_CANCEL = 0xFFFF0002, + TEE_ERROR_NOT_SUPPORTED = 0xFFFF000A, }; #endif /* _PSP_TEE_GFX_IF_H_ */ diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c index 238abd9807..40b28298af 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c @@ -174,7 +174,8 @@ static int psp_v14_0_bootloader_load_intf_drv(struct psp_context *psp) static int psp_v14_0_bootloader_load_dbg_drv(struct psp_context *psp) { - return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV); + /* dbg_drv was renamed to had_drv in psp v14 */ + return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_HADDRV); } static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp) @@ -182,6 +183,10 @@ static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp) return psp_v14_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV); } +static int psp_v14_0_bootloader_load_ipkeymgr_drv(struct psp_context *psp) +{ + return psp_v14_0_bootloader_load_component(psp, &psp->ipkeymgr_drv, PSP_BL__LOAD_IPKEYMGRDRV); +} static int psp_v14_0_bootloader_load_sos(struct psp_context *psp) { @@ -658,6 +663,7 @@ static const struct psp_funcs psp_v14_0_funcs = { .bootloader_load_intf_drv = psp_v14_0_bootloader_load_intf_drv, .bootloader_load_dbg_drv = psp_v14_0_bootloader_load_dbg_drv, .bootloader_load_ras_drv = psp_v14_0_bootloader_load_ras_drv, + .bootloader_load_ipkeymgr_drv = psp_v14_0_bootloader_load_ipkeymgr_drv, .bootloader_load_sos = psp_v14_0_bootloader_load_sos, .ring_create = psp_v14_0_ring_create, .ring_stop = psp_v14_0_ring_stop, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 07e19caf2b..ac8a9b9b3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -1113,6 +1113,8 @@ static const struct amd_ip_funcs sdma_v2_4_ip_funcs = { .soft_reset = sdma_v2_4_soft_reset, .set_clockgating_state = sdma_v2_4_set_clockgating_state, .set_powergating_state = sdma_v2_4_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { @@ -1176,7 +1178,7 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev) * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer - * @tmz: unused + * @copy_flags: unused * * Copy GPU buffers using the DMA engine (VI). * Used by the amdgpu ttm implementation to move pages if @@ -1186,7 +1188,7 @@ static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, - bool tmz) + uint32_t copy_flags) { ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 2ad615be4b..b8ebdc4ae6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1553,6 +1553,8 @@ static const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .set_clockgating_state = sdma_v3_0_set_clockgating_state, .set_powergating_state = sdma_v3_0_set_powergating_state, .get_clockgating_state = sdma_v3_0_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { @@ -1616,7 +1618,7 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer - * @tmz: unused + * @copy_flags: unused * * Copy GPU buffers using the DMA engine (VI). * Used by the amdgpu ttm implementation to move pages if @@ -1626,7 +1628,7 @@ static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, - bool tmz) + uint32_t copy_flags) { ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 3fac292334..772604feb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -2451,7 +2451,7 @@ static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev) * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer - * @tmz: if a secure copy should be used + * @copy_flags: copy flags for the buffers * * Copy GPU buffers using the DMA engine (VEGA10/12). * Used by the amdgpu ttm implementation to move pages if @@ -2461,11 +2461,11 @@ static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, - bool tmz) + uint32_t copy_flags) { ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | - SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0); + SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0); ib->ptr[ib->length_dw++] = byte_count - 1; ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index e708468ac5..341b24d832 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1945,7 +1945,7 @@ static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev) * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer - * @tmz: if a secure copy should be used + * @copy_flags: copy flags for the buffers * * Copy GPU buffers using the DMA engine. * Used by the amdgpu ttm implementation to move pages if @@ -1955,11 +1955,11 @@ static void sdma_v4_4_2_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, - bool tmz) + uint32_t copy_flags) { ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | - SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0); + SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0); ib->ptr[ib->length_dw++] = byte_count - 1; ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); @@ -2180,35 +2180,39 @@ static const struct amdgpu_ras_block_hw_ops sdma_v4_4_2_ras_hw_ops = { .reset_ras_error_count = sdma_v4_4_2_reset_ras_error_count, }; -static int sdma_v4_4_2_aca_bank_generate_report(struct aca_handle *handle, - struct aca_bank *bank, enum aca_error_type type, - struct aca_bank_report *report, void *data) +static int sdma_v4_4_2_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) { - u64 status, misc0; + struct aca_bank_info info; + u64 misc0; int ret; - status = bank->regs[ACA_REG_IDX_STATUS]; - if ((type == ACA_ERROR_TYPE_UE && - ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) || - (type == ACA_ERROR_TYPE_CE && - ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) { + ret = aca_bank_info_decode(bank, &info); + if (ret) + return ret; - ret = aca_bank_info_decode(bank, &report->info); - if (ret) - return ret; - - misc0 = bank->regs[ACA_REG_IDX_MISC0]; - report->count[type] = ACA_REG__MISC0__ERRCNT(misc0); + misc0 = bank->regs[ACA_REG_IDX_MISC0]; + switch (type) { + case ACA_SMU_TYPE_UE: + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, + 1ULL); + break; + case ACA_SMU_TYPE_CE: + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE, + ACA_REG__MISC0__ERRCNT(misc0)); + break; + default: + return -EINVAL; } - return 0; + return ret; } /* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */ static int sdma_v4_4_2_err_codes[] = { 33, 34, 35, 36 }; static bool sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, - enum aca_error_type type, void *data) + enum aca_smu_type type, void *data) { u32 instlo; @@ -2227,7 +2231,7 @@ static bool sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_ } static const struct aca_bank_ops sdma_v4_4_2_aca_bank_ops = { - .aca_bank_generate_report = sdma_v4_4_2_aca_bank_generate_report, + .aca_bank_parser = sdma_v4_4_2_aca_bank_parser, .aca_bank_is_valid = sdma_v4_4_2_aca_bank_is_valid, }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 883e8a1b8a..b7d33d78bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -999,7 +999,8 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring) r = amdgpu_ring_alloc(ring, 20); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_device_wb_free(adev, index); + if (!ring->is_mes_queue) + amdgpu_device_wb_free(adev, index); return r; } @@ -1805,7 +1806,7 @@ static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev) * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer - * @tmz: if a secure copy should be used + * @copy_flags: copy flags for the buffers * * Copy GPU buffers using the DMA engine (NAVI10). * Used by the amdgpu ttm implementation to move pages if @@ -1815,11 +1816,11 @@ static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, - bool tmz) + uint32_t copy_flags) { ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | - SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0); + SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0); ib->ptr[ib->length_dw++] = byte_count - 1; ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index da01b524b9..af1e90159c 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -176,6 +176,14 @@ static void sdma_v5_2_ring_set_wptr(struct amdgpu_ring *ring) DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", ring->doorbell_index, ring->wptr << 2); WDOORBELL64(ring->doorbell_index, ring->wptr << 2); + /* SDMA seems to miss doorbells sometimes when powergating kicks in. + * Updating the wptr directly will wake it. This is only safe because + * we disallow gfxoff in begin_use() and then allow it again in end_use(). + */ + WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), + lower_32_bits(ring->wptr << 2)); + WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), + upper_32_bits(ring->wptr << 2)); } else { DRM_DEBUG("Not using doorbell -- " "mmSDMA%i_GFX_RB_WPTR == 0x%08x " @@ -839,7 +847,8 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring) r = amdgpu_ring_alloc(ring, 20); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_device_wb_free(adev, index); + if (!ring->is_mes_queue) + amdgpu_device_wb_free(adev, index); return r; } @@ -1646,6 +1655,10 @@ static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring) * but it shouldn't hurt for other parts since * this GFXOFF will be disallowed anyway when SDMA is * active, this just makes it explicit. + * sdma_v5_2_ring_set_wptr() takes advantage of this + * to update the wptr because sometimes SDMA seems to miss + * doorbells when entering PG. If you remove this, update + * sdma_v5_2_ring_set_wptr() as well! */ amdgpu_gfx_off_ctrl(adev, false); } @@ -1751,7 +1764,7 @@ static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev) * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer - * @tmz: if a secure copy should be used + * @copy_flags: copy flags for the buffers * * Copy GPU buffers using the DMA engine. * Used by the amdgpu ttm implementation to move pages if @@ -1761,11 +1774,11 @@ static void sdma_v5_2_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, - bool tmz) + uint32_t copy_flags) { ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | - SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0); + SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0); ib->ptr[ib->length_dw++] = byte_count - 1; ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 361835a61f..c833b6b837 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -507,6 +507,13 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) /* set minor_ptr_update to 0 after wptr programed */ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0); + /* Set up sdma hang watchdog */ + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL)); + /* 100ms per unit */ + temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT, + max(adev->usec_timeout/100000, 1)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp); + /* Set up RESP_MODE to non-copy addresses */ temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL)); temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); @@ -854,7 +861,8 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring) r = amdgpu_ring_alloc(ring, 5); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_device_wb_free(adev, index); + if (!ring->is_mes_queue) + amdgpu_device_wb_free(adev, index); return r; } @@ -1567,7 +1575,7 @@ static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev) * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer - * @tmz: if a secure copy should be used + * @copy_flags: copy flags for the buffers * * Copy GPU buffers using the DMA engine. * Used by the amdgpu ttm implementation to move pages if @@ -1577,11 +1585,11 @@ static void sdma_v6_0_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, - bool tmz) + uint32_t copy_flags) { ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) | SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | - SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0); + SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0); ib->ptr[ib->length_dw++] = byte_count - 1; ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 23e4ef4fff..85235470e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1409,9 +1409,9 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev) return r; } -static bool si_asic_supports_baco(struct amdgpu_device *adev) +static int si_asic_supports_baco(struct amdgpu_device *adev) { - return false; + return 0; } static enum amd_reset_method @@ -2706,6 +2706,8 @@ static const struct amd_ip_funcs si_common_ip_funcs = { .soft_reset = si_common_soft_reset, .set_clockgating_state = si_common_set_clockgating_state, .set_powergating_state = si_common_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ip_block_version si_common_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 9aa0e11ee6..11db5b7558 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -708,6 +708,8 @@ static const struct amd_ip_funcs si_dma_ip_funcs = { .soft_reset = si_dma_soft_reset, .set_clockgating_state = si_dma_set_clockgating_state, .set_powergating_state = si_dma_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs si_dma_ring_funcs = { @@ -761,7 +763,7 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev) * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer - * @tmz: is this a secure operation + * @copy_flags: unused * * Copy GPU buffers using the DMA engine (VI). * Used by the amdgpu ttm implementation to move pages if @@ -771,7 +773,7 @@ static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, - bool tmz) + uint32_t copy_flags) { ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, byte_count); diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index cada9f300a..5237395e4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -296,6 +296,8 @@ static const struct amd_ip_funcs si_ih_ip_funcs = { .soft_reset = si_ih_soft_reset, .set_clockgating_state = si_ih_set_clockgating_state, .set_powergating_state = si_ih_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs si_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c index 04c797d545..0af648931d 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c +++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c @@ -91,7 +91,7 @@ static int smu_v13_0_10_mode2_suspend_ip(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = false; } - return r; + return 0; } static int diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c new file mode 100644 index 0000000000..2a51a70d48 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c @@ -0,0 +1,62 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "smuio_v14_0_2.h" +#include "smuio/smuio_14_0_2_offset.h" +#include "smuio/smuio_14_0_2_sh_mask.h" +#include + +static u32 smuio_v14_0_2_get_rom_index_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, regROM_INDEX); +} + +static u32 smuio_v14_0_2_get_rom_data_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, regROM_DATA); +} + +static u64 smuio_v14_0_2_get_gpu_clock_counter(struct amdgpu_device *adev) +{ + u64 clock; + u64 clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after; + + preempt_disable(); + clock_counter_hi_pre = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); + clock_counter_lo = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); + /* the clock counter may be udpated during polling the counters */ + clock_counter_hi_after = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); + if (clock_counter_hi_pre != clock_counter_hi_after) + clock_counter_lo = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); + preempt_enable(); + + clock = clock_counter_lo | (clock_counter_hi_after << 32ULL); + + return clock; +} + +const struct amdgpu_smuio_funcs smuio_v14_0_2_funcs = { + .get_rom_index_offset = smuio_v14_0_2_get_rom_index_offset, + .get_rom_data_offset = smuio_v14_0_2_get_rom_data_offset, + .get_gpu_clock_counter = smuio_v14_0_2_get_gpu_clock_counter, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h new file mode 100644 index 0000000000..6e617f832d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h @@ -0,0 +1,30 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMUIO_V14_0_2_H__ +#define __SMUIO_V14_0_2_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_smuio_funcs smuio_v14_0_2_funcs; + +#endif /* __SMUIO_V14_0_2_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index dec81ccf62..170f02e967 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -143,7 +143,7 @@ static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] = {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, }; @@ -156,7 +156,7 @@ static const struct amdgpu_video_codecs rn_video_codecs_decode = static const struct amdgpu_video_codec_info vcn_4_0_3_video_codecs_decode_array[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; @@ -502,7 +502,7 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev) static enum amd_reset_method soc15_asic_reset_method(struct amdgpu_device *adev) { - bool baco_reset = false; + int baco_reset = 0; bool connected_to_cpu = false; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); @@ -540,7 +540,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev) */ if (ras && adev->ras_enabled && adev->pm.fw_version <= 0x283400) - baco_reset = false; + baco_reset = 0; } else { baco_reset = amdgpu_dpm_is_baco_supported(adev); } @@ -620,7 +620,7 @@ static int soc15_asic_reset(struct amdgpu_device *adev) } } -static bool soc15_supports_baco(struct amdgpu_device *adev) +static int soc15_supports_baco(struct amdgpu_device *adev) { switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(9, 0, 0): @@ -628,13 +628,13 @@ static bool soc15_supports_baco(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA20) { if (adev->psp.sos.fw_version >= 0x80067) return amdgpu_dpm_is_baco_supported(adev); - return false; + return 0; } else { return amdgpu_dpm_is_baco_supported(adev); } break; default: - return false; + return 0; } } @@ -1501,4 +1501,6 @@ static const struct amd_ip_funcs soc15_common_ip_funcs = { .set_clockgating_state = soc15_common_set_clockgating_state, .set_powergating_state = soc15_common_set_powergating_state, .get_clockgating_state= soc15_common_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index 1444b7765e..282584a48b 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -88,6 +88,8 @@ struct soc15_ras_field_entry { }; #define SOC15_REG_ENTRY(ip, inst, reg) ip##_HWIP, inst, reg##_BASE_IDX, reg +#define SOC15_REG_ENTRY_STR(ip, inst, reg) \ + { ip##_HWIP, inst, reg##_BASE_IDX, reg, #reg } #define SOC15_REG_ENTRY_OFFSET(entry) (adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.reg_offset) diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 43ca63fe85..fb67974675 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -72,7 +72,7 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 = { static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; @@ -80,7 +80,7 @@ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, }; @@ -985,4 +985,6 @@ static const struct amd_ip_funcs soc21_common_ip_funcs = { .set_clockgating_state = soc21_common_set_clockgating_state, .set_powergating_state = soc21_common_set_powergating_state, .get_clockgating_state = soc21_common_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h index 056d4df8fa..3ac56a9645 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h @@ -146,6 +146,7 @@ struct ta_ras_mca_addr { uint32_t ch_inst; uint32_t umc_inst; uint32_t node_inst; + uint32_t socket_id; }; struct ta_ras_phy_addr { diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 450b6e8315..24d49d8136 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -486,6 +486,8 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = { .post_soft_reset = tonga_ih_post_soft_reset, .set_clockgating_state = tonga_ih_set_clockgating_state, .set_powergating_state = tonga_ih_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ih_funcs tonga_ih_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 77af4e25ff..bfe61d86ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -28,27 +28,7 @@ #include "umc/umc_12_0_0_sh_mask.h" #include "mp/mp_13_0_6_sh_mask.h" -const uint32_t - umc_v12_0_channel_idx_tbl[] - [UMC_V12_0_UMC_INSTANCE_NUM] - [UMC_V12_0_CHANNEL_INSTANCE_NUM] = { - {{3, 7, 11, 15, 2, 6, 10, 14}, {1, 5, 9, 13, 0, 4, 8, 12}, - {19, 23, 27, 31, 18, 22, 26, 30}, {17, 21, 25, 29, 16, 20, 24, 28}}, - {{47, 43, 39, 35, 46, 42, 38, 34}, {45, 41, 37, 33, 44, 40, 36, 32}, - {63, 59, 55, 51, 62, 58, 54, 50}, {61, 57, 53, 49, 60, 56, 52, 48}}, - {{79, 75, 71, 67, 78, 74, 70, 66}, {77, 73, 69, 65, 76, 72, 68, 64}, - {95, 91, 87, 83, 94, 90, 86, 82}, {93, 89, 85, 81, 92, 88, 84, 80}}, - {{99, 103, 107, 111, 98, 102, 106, 110}, {97, 101, 105, 109, 96, 100, 104, 108}, - {115, 119, 123, 127, 114, 118, 122, 126}, {113, 117, 121, 125, 112, 116, 120, 124}} - }; - -/* mapping of MCA error address to normalized address */ -static const uint32_t umc_v12_0_ma2na_mapping[] = { - 0, 5, 6, 8, 9, 14, 12, 13, - 10, 11, 15, 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, - 24, 7, 29, 30, -}; +#define MAX_ECC_NUM_PER_RETIREMENT 32 static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev, uint32_t node_inst, @@ -192,99 +172,74 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev, umc_v12_0_reset_error_count(adev); } -static bool umc_v12_0_bit_wise_xor(uint32_t val) +static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, + struct ras_err_data *err_data, + struct ta_ras_query_address_input *addr_in) { - bool result = 0; - int i; + uint32_t col, row, row_xor, bank, channel_index; + uint64_t soc_pa, retired_page, column, err_addr; + struct ta_ras_query_address_output addr_out; - for (i = 0; i < 32; i++) - result = result ^ ((val >> i) & 0x1); + err_addr = addr_in->ma.err_addr; + addr_in->addr_type = TA_RAS_MCA_TO_PA; + if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) { + dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx", + err_addr); - return result; -} + return; + } + + soc_pa = addr_out.pa.pa; + bank = addr_out.pa.bank; + channel_index = addr_out.pa.channel_idx; -static void umc_v12_0_mca_addr_to_pa(struct amdgpu_device *adev, - uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst, - uint32_t node_inst, - struct ta_ras_query_address_output *addr_out) -{ - uint32_t channel_index, i; - uint64_t na, soc_pa; - uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row; - uint32_t bank0, bank1, bank2, bank3, bank; - - bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL; - bank_hash1 = (err_addr >> UMC_V12_0_MCA_B1_BIT) & 0x1ULL; - bank_hash2 = (err_addr >> UMC_V12_0_MCA_B2_BIT) & 0x1ULL; - bank_hash3 = (err_addr >> UMC_V12_0_MCA_B3_BIT) & 0x1ULL; col = (err_addr >> 1) & 0x1fULL; row = (err_addr >> 10) & 0x3fffULL; + row_xor = row ^ (0x1ULL << 13); + /* clear [C3 C2] in soc physical address */ + soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT); + /* clear [C4] in soc physical address */ + soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT); + + /* loop for all possibilities of [C4 C3 C2] */ + for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) { + retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT); + retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT); + /* include column bit 0 and 1 */ + col &= 0x3; + col |= (column << 2); + dev_info(adev->dev, + "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", + retired_page, row, col, bank, channel_index); + amdgpu_umc_fill_error_record(err_data, err_addr, + retired_page, channel_index, addr_in->ma.umc_inst); - /* apply bank hash algorithm */ - bank0 = - bank_hash0 ^ (UMC_V12_0_XOR_EN0 & - (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR0) ^ - (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR0)))); - bank1 = - bank_hash1 ^ (UMC_V12_0_XOR_EN1 & - (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR1) ^ - (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR1)))); - bank2 = - bank_hash2 ^ (UMC_V12_0_XOR_EN2 & - (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR2) ^ - (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR2)))); - bank3 = - bank_hash3 ^ (UMC_V12_0_XOR_EN3 & - (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR3) ^ - (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR3)))); - - bank = bank0 | (bank1 << 1) | (bank2 << 2) | (bank3 << 3); - err_addr &= ~0x3c0ULL; - err_addr |= (bank << UMC_V12_0_MCA_B0_BIT); - - na = 0x0; - /* convert mca error address to normalized address */ - for (i = 1; i < ARRAY_SIZE(umc_v12_0_ma2na_mapping); i++) - na |= ((err_addr >> i) & 0x1ULL) << umc_v12_0_ma2na_mapping[i]; - - channel_index = - adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num * - adev->umc.channel_inst_num + - umc_inst * adev->umc.channel_inst_num + - ch_inst]; - /* translate umc channel address to soc pa, 3 parts are included */ - soc_pa = ADDR_OF_32KB_BLOCK(na) | - ADDR_OF_256B_BLOCK(channel_index) | - OFFSET_IN_256B_BLOCK(na); - - /* the umc channel bits are not original values, they are hashed */ - UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa); - - addr_out->pa.pa = soc_pa; - addr_out->pa.bank = bank; - addr_out->pa.channel_idx = channel_index; + /* shift R13 bit */ + retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT); + dev_info(adev->dev, + "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", + retired_page, row_xor, col, bank, channel_index); + amdgpu_umc_fill_error_record(err_data, err_addr, + retired_page, channel_index, addr_in->ma.umc_inst); + } } -static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, - struct ras_err_data *err_data, uint64_t err_addr, - uint32_t ch_inst, uint32_t umc_inst, - uint32_t node_inst) +static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev, + struct ta_ras_query_address_input *addr_in, + uint64_t *pfns, int len) { uint32_t col, row, row_xor, bank, channel_index; - uint64_t soc_pa, retired_page, column; - struct ta_ras_query_address_input addr_in; + uint64_t soc_pa, retired_page, column, err_addr; struct ta_ras_query_address_output addr_out; + uint32_t pos = 0; - addr_in.addr_type = TA_RAS_MCA_TO_PA; - addr_in.ma.err_addr = err_addr; - addr_in.ma.ch_inst = ch_inst; - addr_in.ma.umc_inst = umc_inst; - addr_in.ma.node_inst = node_inst; - - if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out)) - /* fallback to old path if fail to get pa from psp */ - umc_v12_0_mca_addr_to_pa(adev, err_addr, ch_inst, umc_inst, - node_inst, &addr_out); + err_addr = addr_in->ma.err_addr; + addr_in->addr_type = TA_RAS_MCA_TO_PA; + if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) { + dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx", + err_addr); + return 0; + } soc_pa = addr_out.pa.pa; bank = addr_out.pa.bank; @@ -302,33 +257,42 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) { retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT); retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT); + + if (pos >= len) + return 0; + pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + /* include column bit 0 and 1 */ col &= 0x3; col |= (column << 2); dev_info(adev->dev, "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", retired_page, row, col, bank, channel_index); - amdgpu_umc_fill_error_record(err_data, err_addr, - retired_page, channel_index, umc_inst); /* shift R13 bit */ retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT); + + if (pos >= len) + return 0; + pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + dev_info(adev->dev, "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", retired_page, row_xor, col, bank, channel_index); - amdgpu_umc_fill_error_record(err_data, err_addr, - retired_page, channel_index, umc_inst); } + + return pos; } static int umc_v12_0_query_error_address(struct amdgpu_device *adev, uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst, void *data) { + struct ras_err_data *err_data = (struct ras_err_data *)data; + struct ta_ras_query_address_input addr_in; uint64_t mc_umc_status_addr; uint64_t mc_umc_status, err_addr; uint64_t mc_umc_addrt0; - struct ras_err_data *err_data = (struct ras_err_data *)data; uint64_t umc_reg_offset = get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); @@ -357,8 +321,19 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev, err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); - umc_v12_0_convert_error_address(adev, err_data, err_addr, - ch_inst, umc_inst, node_inst); + if (!adev->aid_mask && + adev->smuio.funcs && + adev->smuio.funcs->get_socket_id) + addr_in.ma.socket_id = adev->smuio.funcs->get_socket_id(adev); + else + addr_in.ma.socket_id = 0; + + addr_in.ma.err_addr = err_addr; + addr_in.ma.ch_inst = ch_inst; + addr_in.ma.umc_inst = umc_inst; + addr_in.ma.node_inst = node_inst; + + umc_v12_0_convert_error_address(adev, err_data, &addr_in); } /* clear umc status */ @@ -401,13 +376,20 @@ static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev, return 0; } +#ifdef TO_BE_REMOVED static void umc_v12_0_ecc_info_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { + struct ras_query_context qctx; + + memset(&qctx, 0, sizeof(qctx)); + qctx.event_id = amdgpu_ras_acquire_event_id(adev, amdgpu_ras_intr_triggered() ? + RAS_EVENT_TYPE_ISR : RAS_EVENT_TYPE_INVALID); + amdgpu_mca_smu_log_ras_error(adev, - AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status); + AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status, &qctx); amdgpu_mca_smu_log_ras_error(adev, - AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status); + AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status, &qctx); } static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *adev, @@ -418,12 +400,16 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade struct ras_err_info *err_info; struct ras_err_addr *mca_err_addr, *tmp; struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + struct ta_ras_query_address_input addr_in; for_each_ras_error(err_node, err_data) { err_info = &err_node->err_info; if (list_empty(&err_info->err_addr_list)) continue; + addr_in.ma.node_inst = err_info->mcm_info.die_id; + addr_in.ma.socket_id = err_info->mcm_info.socket_id; + list_for_each_entry_safe(mca_err_addr, tmp, &err_info->err_addr_list, node) { mc_umc_status = mca_err_addr->err_status; if (mc_umc_status && @@ -439,6 +425,10 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo); + addr_in.ma.err_addr = err_addr; + addr_in.ma.ch_inst = MCA_IPID_LO_2_UMC_CH(InstanceIdLo); + addr_in.ma.umc_inst = MCA_IPID_LO_2_UMC_INST(InstanceIdLo); + dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n", mca_ipid, err_info->mcm_info.die_id, @@ -447,10 +437,7 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade err_addr); umc_v12_0_convert_error_address(adev, - err_data, err_addr, - MCA_IPID_LO_2_UMC_CH(InstanceIdLo), - MCA_IPID_LO_2_UMC_INST(InstanceIdLo), - err_info->mcm_info.die_id); + err_data, &addr_in); } /* Delete error address node from list and free memory */ @@ -458,6 +445,7 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade } } } +#endif static bool umc_v12_0_check_ecc_err_status(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, void *ras_error_status) @@ -498,43 +486,49 @@ const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = { .query_ras_error_address = umc_v12_0_query_ras_error_address, }; -static int umc_v12_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, - struct aca_bank_report *report, void *data) +static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) { struct amdgpu_device *adev = handle->adev; - u64 status; + struct aca_bank_info info; + enum aca_error_type err_type; + u64 status, count; + u32 ext_error_code; int ret; - ret = aca_bank_info_decode(bank, &report->info); + status = bank->regs[ACA_REG_IDX_STATUS]; + if (umc_v12_0_is_deferred_error(adev, status)) + err_type = ACA_ERROR_TYPE_DEFERRED; + else if (umc_v12_0_is_uncorrectable_error(adev, status)) + err_type = ACA_ERROR_TYPE_UE; + else if (umc_v12_0_is_correctable_error(adev, status)) + err_type = ACA_ERROR_TYPE_CE; + else + return 0; + + ret = aca_bank_info_decode(bank, &info); if (ret) return ret; - status = bank->regs[ACA_REG_IDX_STATUS]; - switch (type) { - case ACA_ERROR_TYPE_UE: - if (umc_v12_0_is_uncorrectable_error(adev, status)) { - report->count[type] = 1; - } - break; - case ACA_ERROR_TYPE_CE: - if (umc_v12_0_is_correctable_error(adev, status)) { - report->count[type] = 1; - } - break; - default: - return -EINVAL; - } + amdgpu_umc_update_ecc_status(adev, + bank->regs[ACA_REG_IDX_STATUS], + bank->regs[ACA_REG_IDX_IPID], + bank->regs[ACA_REG_IDX_ADDR]); - return 0; + ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status); + count = ext_error_code == 0 ? + ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL; + + return aca_error_cache_log_bank_error(handle, &info, err_type, count); } static const struct aca_bank_ops umc_v12_0_aca_bank_ops = { - .aca_bank_generate_report = umc_v12_0_aca_bank_generate_report, + .aca_bank_parser = umc_v12_0_aca_bank_parser, }; const struct aca_info umc_v12_0_aca_info = { .hwip = ACA_HWIP_TYPE_UMC, - .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK, + .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK | ACA_ERROR_DEFERRED_MASK, .bank_ops = &umc_v12_0_aca_bank_ops, }; @@ -554,6 +548,152 @@ static int umc_v12_0_ras_late_init(struct amdgpu_device *adev, struct ras_common return 0; } +static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev, + uint64_t status, uint64_t ipid, uint64_t addr) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + uint16_t hwid, mcatype; + struct ta_ras_query_address_input addr_in; + uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL]; + uint64_t err_addr, hash_val = 0; + struct ras_ecc_err *ecc_err; + int count; + int ret; + + hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID); + mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType); + + if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0)) + return 0; + + if (!status) + return 0; + + if (!umc_v12_0_is_deferred_error(adev, status)) + return 0; + + err_addr = REG_GET_FIELD(addr, + MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); + + dev_info(adev->dev, + "UMC:IPID:0x%llx, socket:%llu, aid:%llu, inst:%llu, ch:%llu, err_addr:0x%llx\n", + ipid, + MCA_IPID_2_SOCKET_ID(ipid), + MCA_IPID_2_DIE_ID(ipid), + MCA_IPID_2_UMC_INST(ipid), + MCA_IPID_2_UMC_CH(ipid), + err_addr); + + memset(page_pfn, 0, sizeof(page_pfn)); + + memset(&addr_in, 0, sizeof(addr_in)); + addr_in.ma.err_addr = err_addr; + addr_in.ma.ch_inst = MCA_IPID_2_UMC_CH(ipid); + addr_in.ma.umc_inst = MCA_IPID_2_UMC_INST(ipid); + addr_in.ma.node_inst = MCA_IPID_2_DIE_ID(ipid); + addr_in.ma.socket_id = MCA_IPID_2_SOCKET_ID(ipid); + + count = umc_v12_0_convert_err_addr(adev, + &addr_in, page_pfn, ARRAY_SIZE(page_pfn)); + if (count <= 0) { + dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count); + return 0; + } + + ret = amdgpu_umc_build_pages_hash(adev, + page_pfn, count, &hash_val); + if (ret) { + dev_err(adev->dev, "Fail to build error pages hash\n"); + return ret; + } + + ecc_err = kzalloc(sizeof(*ecc_err), GFP_KERNEL); + if (!ecc_err) + return -ENOMEM; + + ecc_err->err_pages.pfn = kcalloc(count, sizeof(*ecc_err->err_pages.pfn), GFP_KERNEL); + if (!ecc_err->err_pages.pfn) { + kfree(ecc_err); + return -ENOMEM; + } + + memcpy(ecc_err->err_pages.pfn, page_pfn, count * sizeof(*ecc_err->err_pages.pfn)); + ecc_err->err_pages.count = count; + + ecc_err->hash_index = hash_val; + ecc_err->status = status; + ecc_err->ipid = ipid; + ecc_err->addr = addr; + + ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err); + if (ret) { + if (ret == -EEXIST) + con->umc_ecc_log.de_updated = true; + else + dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret); + + kfree(ecc_err->err_pages.pfn); + kfree(ecc_err); + return ret; + } + + con->umc_ecc_log.de_updated = true; + + return 0; +} + +static int umc_v12_0_fill_error_record(struct amdgpu_device *adev, + struct ras_ecc_err *ecc_err, void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t i = 0; + int ret = 0; + + if (!err_data || !ecc_err) + return -EINVAL; + + for (i = 0; i < ecc_err->err_pages.count; i++) { + ret = amdgpu_umc_fill_error_record(err_data, + ecc_err->addr, + ecc_err->err_pages.pfn[i] << AMDGPU_GPU_PAGE_SHIFT, + MCA_IPID_2_UMC_CH(ecc_err->ipid), + MCA_IPID_2_UMC_INST(ecc_err->ipid)); + if (ret) + break; + } + + err_data->de_count++; + + return ret; +} + +static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_ecc_err *entries[MAX_ECC_NUM_PER_RETIREMENT]; + struct radix_tree_root *ecc_tree; + int new_detected, ret, i; + + ecc_tree = &con->umc_ecc_log.de_page_tree; + + mutex_lock(&con->umc_ecc_log.lock); + new_detected = radix_tree_gang_lookup_tag(ecc_tree, (void **)entries, + 0, ARRAY_SIZE(entries), UMC_ECC_NEW_DETECTED_TAG); + for (i = 0; i < new_detected; i++) { + if (!entries[i]) + continue; + + ret = umc_v12_0_fill_error_record(adev, entries[i], ras_error_status); + if (ret) { + dev_err(adev->dev, "Fail to fill umc error record, ret:%d\n", ret); + break; + } + radix_tree_tag_clear(ecc_tree, entries[i]->hash_index, UMC_ECC_NEW_DETECTED_TAG); + } + mutex_unlock(&con->umc_ecc_log.lock); +} + struct amdgpu_umc_ras umc_v12_0_ras = { .ras_block = { .hw_ops = &umc_v12_0_ras_hw_ops, @@ -561,8 +701,8 @@ struct amdgpu_umc_ras umc_v12_0_ras = { }, .err_cnt_init = umc_v12_0_err_cnt_init, .query_ras_poison_mode = umc_v12_0_query_ras_poison_mode, - .ecc_info_query_ras_error_count = umc_v12_0_ecc_info_query_ras_error_count, - .ecc_info_query_ras_error_address = umc_v12_0_ecc_info_query_ras_error_address, + .ecc_info_query_ras_error_address = umc_v12_0_query_ras_ecc_err_addr, .check_ecc_err_status = umc_v12_0_check_ecc_err_status, + .update_ecc_status = umc_v12_0_update_ecc_status, }; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h index 5973bfb14f..b497479385 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h @@ -55,83 +55,38 @@ #define UMC_V12_0_NA_MAP_PA_NUM 8 /* R13 bit shift should be considered, double the number */ #define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2) -/* bank bits in MCA error address */ -#define UMC_V12_0_MCA_B0_BIT 6 -#define UMC_V12_0_MCA_B1_BIT 7 -#define UMC_V12_0_MCA_B2_BIT 8 -#define UMC_V12_0_MCA_B3_BIT 9 + /* column bits in SOC physical address */ #define UMC_V12_0_PA_C2_BIT 15 #define UMC_V12_0_PA_C4_BIT 21 /* row bits in SOC physical address */ #define UMC_V12_0_PA_R13_BIT 35 -/* channel index bits in SOC physical address */ -#define UMC_V12_0_PA_CH4_BIT 12 -#define UMC_V12_0_PA_CH5_BIT 13 -#define UMC_V12_0_PA_CH6_BIT 14 - -/* bank hash settings */ -#define UMC_V12_0_XOR_EN0 1 -#define UMC_V12_0_XOR_EN1 1 -#define UMC_V12_0_XOR_EN2 1 -#define UMC_V12_0_XOR_EN3 1 -#define UMC_V12_0_COL_XOR0 0x0 -#define UMC_V12_0_COL_XOR1 0x0 -#define UMC_V12_0_COL_XOR2 0x800 -#define UMC_V12_0_COL_XOR3 0x1000 -#define UMC_V12_0_ROW_XOR0 0x11111 -#define UMC_V12_0_ROW_XOR1 0x22222 -#define UMC_V12_0_ROW_XOR2 0x4444 -#define UMC_V12_0_ROW_XOR3 0x8888 - -/* channel hash settings */ -#define UMC_V12_0_HASH_4K 0 -#define UMC_V12_0_HASH_64K 1 -#define UMC_V12_0_HASH_2M 1 -#define UMC_V12_0_HASH_1G 1 -#define UMC_V12_0_HASH_1T 1 - -/* XOR some bits of PA into CH4~CH6 bits (bits 12~14 of PA), - * hash bit is only effective when related setting is enabled - */ -#define UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) ((((channel_idx) >> 5) & 0x1) ^ \ - (((pa) >> 20) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ - (((pa) >> 27) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ - (((pa) >> 34) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ - (((pa) >> 41) & 0x1ULL & UMC_V12_0_HASH_1T)) -#define UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) ((((channel_idx) >> 6) & 0x1) ^ \ - (((pa) >> 21) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ - (((pa) >> 28) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ - (((pa) >> 35) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ - (((pa) >> 42) & 0x1ULL & UMC_V12_0_HASH_1T)) -#define UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) ((((channel_idx) >> 4) & 0x1) ^ \ - (((pa) >> 19) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ - (((pa) >> 26) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ - (((pa) >> 33) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ - (((pa) >> 40) & 0x1ULL & UMC_V12_0_HASH_1T) ^ \ - (((pa) >> 47) & 0x1ULL & UMC_V12_0_HASH_4K)) -#define UMC_V12_0_SET_CHANNEL_HASH(channel_idx, pa) do { \ - (pa) &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); \ - (pa) |= (UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) << UMC_V12_0_PA_CH4_BIT); \ - (pa) |= (UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) << UMC_V12_0_PA_CH5_BIT); \ - (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \ - } while (0) + +#define MCA_UMC_HWID_V12_0 0x96 +#define MCA_UMC_MCATYPE_V12_0 0x0 #define MCA_IPID_LO_2_UMC_CH(_ipid_lo) (((((_ipid_lo) >> 20) & 0x1) * 4) + \ (((_ipid_lo) >> 12) & 0xF)) #define MCA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7) +#define MCA_IPID_2_DIE_ID(ipid) ((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) >> 2) & 0x03) + +#define MCA_IPID_2_UMC_CH(ipid) \ + (MCA_IPID_LO_2_UMC_CH(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo))) + +#define MCA_IPID_2_UMC_INST(ipid) \ + (MCA_IPID_LO_2_UMC_INST(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo))) + +#define MCA_IPID_2_SOCKET_ID(ipid) \ + (((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \ + (REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03)) + bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status); bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status); bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status); typedef bool (*check_error_type_func)(struct amdgpu_device *adev, uint64_t mc_umc_status); -extern const uint32_t - umc_v12_0_channel_idx_tbl[] - [UMC_V12_0_UMC_INSTANCE_NUM] - [UMC_V12_0_CHANNEL_INSTANCE_NUM]; - extern struct amdgpu_umc_ras umc_v12_0_ras; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c index c4c7725771..a32f87992f 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c @@ -442,11 +442,6 @@ static void umc_v8_10_ecc_info_query_ras_error_address(struct amdgpu_device *ade umc_v8_10_ecc_info_query_error_address, ras_error_status); } -static void umc_v8_10_set_eeprom_table_version(struct amdgpu_ras_eeprom_table_header *hdr) -{ - hdr->version = RAS_TABLE_VER_V2_1; -} - const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = { .query_ras_error_count = umc_v8_10_query_ras_error_count, .query_ras_error_address = umc_v8_10_query_ras_error_address, @@ -460,5 +455,4 @@ struct amdgpu_umc_ras umc_v8_10_ras = { .query_ras_poison_mode = umc_v8_10_query_ras_poison_mode, .ecc_info_query_ras_error_count = umc_v8_10_ecc_info_query_ras_error_count, .ecc_info_query_ras_error_address = umc_v8_10_ecc_info_query_ras_error_address, - .set_eeprom_table_version = umc_v8_10_set_eeprom_table_version, }; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c index a6006f231c..805d6662c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c @@ -819,6 +819,8 @@ static const struct amd_ip_funcs uvd_v3_1_ip_funcs = { .soft_reset = uvd_v3_1_soft_reset, .set_clockgating_state = uvd_v3_1_set_clockgating_state, .set_powergating_state = uvd_v3_1_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version uvd_v3_1_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 1aa09ad7bb..3f19c606f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -769,6 +769,8 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { .soft_reset = uvd_v4_2_soft_reset, .set_clockgating_state = uvd_v4_2_set_clockgating_state, .set_powergating_state = uvd_v4_2_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index f8b229b754..efd903c21d 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -877,6 +877,8 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { .set_clockgating_state = uvd_v5_0_set_clockgating_state, .set_powergating_state = uvd_v5_0_set_powergating_state, .get_clockgating_state = uvd_v5_0_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index a9a6880f44..495de50684 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -1545,6 +1545,8 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .set_clockgating_state = uvd_v6_0_set_clockgating_state, .set_powergating_state = uvd_v6_0_set_powergating_state, .get_clockgating_state = uvd_v6_0_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index a08e7abca4..66fada199b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -626,6 +626,8 @@ static const struct amd_ip_funcs vce_v2_0_ip_funcs = { .soft_reset = vce_v2_0_soft_reset, .set_clockgating_state = vce_v2_0_set_clockgating_state, .set_powergating_state = vce_v2_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index f4760748d3..32517c364c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -913,6 +913,8 @@ static const struct amd_ip_funcs vce_v3_0_ip_funcs = { .set_clockgating_state = vce_v3_0_set_clockgating_state, .set_powergating_state = vce_v3_0_set_powergating_state, .get_clockgating_state = vce_v3_0_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index aaceecd558..cb253bd3a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1902,6 +1902,8 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */, .set_clockgating_state = vcn_v1_0_set_clockgating_state, .set_powergating_state = vcn_v1_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index e357d8cf0c..f18fd61c43 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -2008,6 +2008,8 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v2_0_set_clockgating_state, .set_powergating_state = vcn_v2_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 1cd8a94b0f..baec14bde2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -1901,6 +1901,8 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v2_5_set_clockgating_state, .set_powergating_state = vcn_v2_5_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amd_ip_funcs vcn_v2_6_ip_funcs = { @@ -1921,6 +1923,8 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v2_5_set_clockgating_state, .set_powergating_state = vcn_v2_5_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version vcn_v2_5_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 8f82fb887e..6b31cf4b8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -359,6 +359,7 @@ static int vcn_v3_0_hw_init(void *handle) } } + return 0; done: if (!r) DRM_INFO("VCN decode and encode initialized successfully(under %s).\n", @@ -2230,6 +2231,8 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v3_0_set_clockgating_state, .set_powergating_state = vcn_v3_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version vcn_v3_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 832d15f7b5..9a33d3d000 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -288,6 +288,7 @@ static int vcn_v4_0_hw_init(void *handle) } } + return 0; done: if (!r) DRM_INFO("VCN decode and encode initialized successfully(under %s).\n", @@ -1052,6 +1053,9 @@ static int vcn_v4_0_start(struct amdgpu_device *adev) amdgpu_dpm_enable_uvd(adev, true); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { @@ -1505,6 +1509,9 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev) int i, r = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; @@ -2130,6 +2137,8 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_set_clockgating_state, .set_powergating_state = vcn_v4_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version vcn_v4_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 203fa98832..2279d8fce0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -1660,6 +1660,8 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_3_set_clockgating_state, .set_powergating_state = vcn_v4_0_3_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 501e53e69f..30e80c6f11 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -237,6 +237,7 @@ static int vcn_v4_0_5_hw_init(void *handle) goto done; } + return 0; done: if (!r) DRM_INFO("VCN decode and encode initialized successfully(under %s).\n", @@ -963,6 +964,9 @@ static int vcn_v4_0_5_start(struct amdgpu_device *adev) amdgpu_dpm_enable_uvd(adev, true); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { @@ -1167,6 +1171,9 @@ static int vcn_v4_0_5_stop(struct amdgpu_device *adev) int i, r = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; @@ -1752,6 +1759,8 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v4_0_5_set_clockgating_state, .set_powergating_state = vcn_v4_0_5_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c index bc60c554eb..fbd3f7a582 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -95,7 +95,7 @@ static int vcn_v5_0_0_sw_init(void *handle) return r; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + volatile struct amdgpu_vcn5_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -154,7 +154,7 @@ static int vcn_v5_0_0_sw_fini(void *handle) if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + volatile struct amdgpu_vcn5_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -203,6 +203,7 @@ static int vcn_v5_0_0_hw_init(void *handle) goto done; } + return 0; done: if (!r) DRM_INFO("VCN decode and encode initialized successfully(under %s).\n", @@ -334,7 +335,7 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst) upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0); WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0, - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared))); } /** @@ -438,7 +439,7 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0), - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect); + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect); /* VCN global tiling registers */ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( @@ -615,7 +616,7 @@ static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_device *adev, int inst) */ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t tmp; @@ -712,7 +713,7 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b */ static int vcn_v5_0_0_start(struct amdgpu_device *adev) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + volatile struct amdgpu_vcn5_fw_shared *fw_shared; struct amdgpu_ring *ring; uint32_t tmp; int i, j, k, r; @@ -721,6 +722,9 @@ static int vcn_v5_0_0_start(struct amdgpu_device *adev) amdgpu_dpm_enable_uvd(adev, true); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { @@ -893,11 +897,14 @@ static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) */ static int vcn_v5_0_0_stop(struct amdgpu_device *adev) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + volatile struct amdgpu_vcn5_fw_shared *fw_shared; uint32_t tmp; int i, r = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; @@ -1328,6 +1335,8 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = { .post_soft_reset = NULL, .set_clockgating_state = vcn_v5_0_0_set_clockgating_state, .set_powergating_state = vcn_v5_0_0_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 1a98812981..d39c670f62 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -897,7 +897,7 @@ static int vi_asic_pci_config_reset(struct amdgpu_device *adev) return r; } -static bool vi_asic_supports_baco(struct amdgpu_device *adev) +static int vi_asic_supports_baco(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_FIJI: @@ -908,14 +908,14 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev) case CHIP_TOPAZ: return amdgpu_dpm_is_baco_supported(adev); default: - return false; + return 0; } } static enum amd_reset_method vi_asic_reset_method(struct amdgpu_device *adev) { - bool baco_reset; + int baco_reset; if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY || amdgpu_reset_method == AMD_RESET_METHOD_BACO) @@ -935,7 +935,7 @@ vi_asic_reset_method(struct amdgpu_device *adev) baco_reset = amdgpu_dpm_is_baco_supported(adev); break; default: - baco_reset = false; + baco_reset = 0; break; } @@ -2058,6 +2058,8 @@ static const struct amd_ip_funcs vi_common_ip_funcs = { .set_clockgating_state = vi_common_set_clockgating_state, .set_powergating_state = vi_common_set_powergating_state, .get_clockgating_state = vi_common_get_clockgating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; static const struct amdgpu_ip_block_version vi_common_ip_block = diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 1e6cc0bfc4..fdf171ad4a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -371,6 +371,11 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, err = -EINVAL; goto err_wptr_map_gart; } + if (dev->adev != amdgpu_ttm_adev(wptr_bo->tbo.bdev)) { + pr_err("Queue memory allocated to wrong device\n"); + err = -EINVAL; + goto err_wptr_map_gart; + } err = amdgpu_amdkfd_map_gtt_bo_to_gart(wptr_bo); if (err) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index ff01610fbc..afc57df421 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -428,12 +428,12 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) if (!f2g) { if (amdgpu_ip_version(adev, GC_HWIP, 0)) - dev_err(kfd_device, + dev_info(kfd_device, "GC IP %06x %s not supported in kfd\n", amdgpu_ip_version(adev, GC_HWIP, 0), vf ? "VF" : ""); else - dev_err(kfd_device, "%s %s not supported in kfd\n", + dev_info(kfd_device, "%s %s not supported in kfd\n", amdgpu_asic_name[adev->asic_type], vf ? "VF" : ""); return NULL; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 0b655555e1..c08b6ee252 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1997,8 +1997,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, * check those fields */ mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; - if (mqd_mgr->read_doorbell_id(dqm->packet_mgr.priv_queue->queue->mqd)) { - dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n"); + if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) { while (halt_if_hws_hang) schedule(); kfd_hws_hang(dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c index 40a21be6c0..8e0d0356e8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c @@ -134,6 +134,7 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev, { enum amdgpu_ras_block block = 0; int old_poison, ret = -EINVAL; + uint32_t reset = 0; struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); if (!p) @@ -153,6 +154,8 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev, case SOC15_IH_CLIENTID_UTCL2: ret = kfd_dqm_evict_pasid(dev->dqm, pasid); block = AMDGPU_RAS_BLOCK__GFX; + if (ret) + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; break; case SOC15_IH_CLIENTID_SDMA0: case SOC15_IH_CLIENTID_SDMA1: @@ -160,6 +163,7 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev, case SOC15_IH_CLIENTID_SDMA3: case SOC15_IH_CLIENTID_SDMA4: block = AMDGPU_RAS_BLOCK__SDMA; + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; break; default: break; @@ -170,17 +174,16 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev, /* resetting queue passes, do page retirement without gpu reset * resetting queue fails, fallback to gpu reset solution */ - if (!ret) { + if (!ret) dev_warn(dev->adev->dev, "RAS poison consumption, unmap queue flow succeeded: client id %d\n", client_id); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false); - } else { + else dev_warn(dev->adev->dev, "RAS poison consumption, fall back to gpu reset flow: client id %d\n", client_id); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true); - } + + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset); } static bool event_interrupt_isr_v10(struct kfd_node *dev, @@ -368,10 +371,25 @@ static void event_interrupt_wq_v10(struct kfd_node *dev, client_id == SOC15_IH_CLIENTID_UTCL2) { struct kfd_vm_fault_info info = {0}; uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); + uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry); + uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry); + int hub_inst = 0; struct kfd_hsa_memory_exception_data exception_data; - if (client_id == SOC15_IH_CLIENTID_UTCL2 && - amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) { + /* gfxhub */ + if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) { + hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev, + node_id); + if (hub_inst < 0) + hub_inst = 0; + } + + /* mmhub */ + if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC) + hub_inst = node_id / 4; + + if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev, + hub_inst, vmid_type)) { event_interrupt_poison_consumption(dev, pasid, client_id); return; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index fe2ad0c0de..f524a55eee 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -193,6 +193,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev, { enum amdgpu_ras_block block = 0; int ret = -EINVAL; + uint32_t reset = 0; struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); if (!p) @@ -212,10 +213,13 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev, if (dev->dqm->ops.reset_queues) ret = dev->dqm->ops.reset_queues(dev->dqm, pasid); block = AMDGPU_RAS_BLOCK__GFX; + if (ret) + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; break; case SOC21_INTSRC_SDMA_ECC: default: block = AMDGPU_RAS_BLOCK__GFX; + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; break; } @@ -223,10 +227,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev, /* resetting queue passes, do page retirement without gpu reset resetting queue fails, fallback to gpu reset solution */ - if (!ret) - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false); - else - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset); } static bool event_interrupt_isr_v11(struct kfd_node *dev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index c4c6a29052..e1c21d2506 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -144,7 +144,8 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, uint16_t pasid, uint16_t client_id) { enum amdgpu_ras_block block = 0; - int old_poison, ret = -EINVAL; + int old_poison; + uint32_t reset = 0; struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); if (!p) @@ -162,8 +163,13 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, case SOC15_IH_CLIENTID_SE2SH: case SOC15_IH_CLIENTID_SE3SH: case SOC15_IH_CLIENTID_UTCL2: - ret = kfd_dqm_evict_pasid(dev->dqm, pasid); block = AMDGPU_RAS_BLOCK__GFX; + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; + break; + case SOC15_IH_CLIENTID_VMC: + case SOC15_IH_CLIENTID_VMC1: + block = AMDGPU_RAS_BLOCK__MMHUB; + reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; break; case SOC15_IH_CLIENTID_SDMA0: case SOC15_IH_CLIENTID_SDMA1: @@ -171,27 +177,21 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, case SOC15_IH_CLIENTID_SDMA3: case SOC15_IH_CLIENTID_SDMA4: block = AMDGPU_RAS_BLOCK__SDMA; + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; break; default: - break; + dev_warn(dev->adev->dev, + "client %d does not support poison consumption\n", client_id); + return; } kfd_signal_poison_consumed_event(dev, pasid); - /* resetting queue passes, do page retirement without gpu reset - * resetting queue fails, fallback to gpu reset solution - */ - if (!ret) { - dev_warn(dev->adev->dev, - "RAS poison consumption, unmap queue flow succeeded: client id %d\n", - client_id); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false); - } else { - dev_warn(dev->adev->dev, - "RAS poison consumption, fall back to gpu reset flow: client id %d\n", - client_id); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true); - } + dev_warn(dev->adev->dev, + "poison is consumed by client %d, kick off gpu reset flow\n", client_id); + + amdgpu_amdkfd_ras_pasid_poison_consumption_handler(dev->adev, + block, pasid, NULL, NULL, reset); } static bool context_id_expected(struct kfd_dev *dev) @@ -414,10 +414,25 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, client_id == SOC15_IH_CLIENTID_UTCL2) { struct kfd_vm_fault_info info = {0}; uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); + uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry); + uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry); + int hub_inst = 0; struct kfd_hsa_memory_exception_data exception_data; - if (client_id == SOC15_IH_CLIENTID_UTCL2 && - amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) { + /* gfxhub */ + if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) { + hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev, + node_id); + if (hub_inst < 0) + hub_inst = 0; + } + + /* mmhub */ + if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC) + hub_inst = node_id / 4; + + if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev, + hub_inst, vmid_type)) { event_interrupt_poison_consumption_v9(dev, pasid, client_id); return; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c index dd3c43c1ad..9b6b6e8825 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c @@ -104,6 +104,8 @@ void kfd_interrupt_exit(struct kfd_node *node) */ flush_workqueue(node->ih_wq); + destroy_workqueue(node->ih_wq); + kfifo_free(&node->ih_fifo); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index ba651d12f1..8ee3d07ffb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -77,7 +77,7 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, - dst_addr, num_bytes, false); + dst_addr, num_bytes, 0); amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); @@ -153,7 +153,7 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, } r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE, - NULL, &next, false, true, false); + NULL, &next, false, true, 0); if (r) { dev_err(adev->dev, "fail %d to copy memory\n", r); goto out_unlock; @@ -1023,7 +1023,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev) if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1)) return -EINVAL; - if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) + if (adev->flags & AMD_IS_APU) return 0; pgmap = &kfddev->pgmap; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index 050a6936ff..8746a61a85 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -290,3 +290,21 @@ uint64_t kfd_mqd_stride(struct mqd_manager *mm, { return mm->mqd_size; } + +bool kfd_check_hiq_mqd_doorbell_id(struct kfd_node *node, uint32_t doorbell_id, + uint32_t inst) +{ + if (doorbell_id) { + struct device *dev = node->adev->dev; + + if (node->adev->xcp_mgr && node->adev->xcp_mgr->num_xcps > 0) + dev_err(dev, "XCC %d: Queue preemption failed for queue with doorbell_id: %x\n", + inst, doorbell_id); + else + dev_err(dev, "Queue preemption failed for queue with doorbell_id: %x\n", + doorbell_id); + return true; + } + + return false; +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index e5cc697a3c..17cc1f25c8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -119,7 +119,7 @@ struct mqd_manager { #if defined(CONFIG_DEBUG_FS) int (*debugfs_show_mqd)(struct seq_file *m, void *data); #endif - uint32_t (*read_doorbell_id)(void *mqd); + bool (*check_preemption_failed)(struct mqd_manager *mm, void *mqd); uint64_t (*mqd_stride)(struct mqd_manager *mm, struct queue_properties *p); @@ -198,4 +198,6 @@ void kfd_get_hiq_xcc_mqd(struct kfd_node *dev, uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev); uint64_t kfd_mqd_stride(struct mqd_manager *mm, struct queue_properties *q); +bool kfd_check_hiq_mqd_doorbell_id(struct kfd_node *node, uint32_t doorbell_id, + uint32_t inst); #endif /* KFD_MQD_MANAGER_H_ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 1a4a69943c..05f3ac2eae 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -206,11 +206,11 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = QUEUE_IS_ACTIVE(*q); } -static uint32_t read_doorbell_id(void *mqd) +static bool check_preemption_failed(struct mqd_manager *mm, void *mqd) { struct cik_mqd *m = (struct cik_mqd *)mqd; - return m->queue_doorbell_id0; + return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0); } static void update_mqd(struct mqd_manager *mm, void *mqd, @@ -423,7 +423,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif - mqd->read_doorbell_id = read_doorbell_id; + mqd->check_preemption_failed = check_preemption_failed; break; case KFD_MQD_TYPE_DIQ: mqd->allocate_mqd = allocate_mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 22cbfa1bda..2eff37aaf8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -224,11 +224,11 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = QUEUE_IS_ACTIVE(*q); } -static uint32_t read_doorbell_id(void *mqd) +static bool check_preemption_failed(struct mqd_manager *mm, void *mqd) { struct v10_compute_mqd *m = (struct v10_compute_mqd *)mqd; - return m->queue_doorbell_id0; + return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0); } static int get_wave_state(struct mqd_manager *mm, void *mqd, @@ -488,7 +488,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif - mqd->read_doorbell_id = read_doorbell_id; + mqd->check_preemption_failed = check_preemption_failed; pr_debug("%s@%i\n", __func__, __LINE__); break; case KFD_MQD_TYPE_DIQ: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 826bc4f6c8..68dbc0399c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -278,11 +278,11 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = QUEUE_IS_ACTIVE(*q); } -static uint32_t read_doorbell_id(void *mqd) +static bool check_preemption_failed(struct mqd_manager *mm, void *mqd) { struct v11_compute_mqd *m = (struct v11_compute_mqd *)mqd; - return m->queue_doorbell_id0; + return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0); } static int get_wave_state(struct mqd_manager *mm, void *mqd, @@ -517,7 +517,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif - mqd->read_doorbell_id = read_doorbell_id; + mqd->check_preemption_failed = check_preemption_failed; pr_debug("%s@%i\n", __func__, __LINE__); break; case KFD_MQD_TYPE_DIQ: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 697b6d530d..8ec136eba5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -316,11 +316,11 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, } -static uint32_t read_doorbell_id(void *mqd) +static bool check_preemption_failed(struct mqd_manager *mm, void *mqd) { struct v9_mqd *m = (struct v9_mqd *)mqd; - return m->queue_doorbell_id0; + return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0); } static int get_wave_state(struct mqd_manager *mm, void *mqd, @@ -607,6 +607,24 @@ static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, return err; } +static bool check_preemption_failed_v9_4_3(struct mqd_manager *mm, void *mqd) +{ + uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); + uint32_t xcc_mask = mm->dev->xcc_mask; + int inst = 0, xcc_id; + struct v9_mqd *m; + bool ret = false; + + for_each_inst(xcc_id, xcc_mask) { + m = get_mqd(mqd + hiq_mqd_size * inst); + ret |= kfd_check_hiq_mqd_doorbell_id(mm->dev, + m->queue_doorbell_id0, inst); + ++inst; + } + + return ret; +} + static void get_xcc_mqd(struct kfd_mem_obj *mqd_mem_obj, struct kfd_mem_obj *xcc_mqd_mem_obj, uint64_t offset) @@ -695,7 +713,7 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd + size * xcc); update_mqd(mm, m, q, minfo); - update_cu_mask(mm, mqd, minfo, xcc); + update_cu_mask(mm, m, minfo, xcc); if (q->format == KFD_QUEUE_FORMAT_AQL) { switch (xcc) { @@ -881,15 +899,16 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif - mqd->read_doorbell_id = read_doorbell_id; if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) { mqd->init_mqd = init_mqd_hiq_v9_4_3; mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3; mqd->destroy_mqd = destroy_hiq_mqd_v9_4_3; + mqd->check_preemption_failed = check_preemption_failed_v9_4_3; } else { mqd->init_mqd = init_mqd_hiq; mqd->load_mqd = kfd_hiq_load_mqd_kiq; mqd->destroy_mqd = destroy_hiq_mqd; + mqd->check_preemption_failed = check_preemption_failed; } break; case KFD_MQD_TYPE_DIQ: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 3e1a574d4e..c1fafc5025 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -237,11 +237,11 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = QUEUE_IS_ACTIVE(*q); } -static uint32_t read_doorbell_id(void *mqd) +static bool check_preemption_failed(struct mqd_manager *mm, void *mqd) { struct vi_mqd *m = (struct vi_mqd *)mqd; - return m->queue_doorbell_id0; + return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0); } static void update_mqd(struct mqd_manager *mm, void *mqd, @@ -482,7 +482,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif - mqd->read_doorbell_id = read_doorbell_id; + mqd->check_preemption_failed = check_preemption_failed; break; case KFD_MQD_TYPE_DIQ: mqd->allocate_mqd = allocate_mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 069b81eeea..31e500859a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -2619,8 +2619,7 @@ svm_range_best_restore_location(struct svm_range *prange, return -1; } - if (node->adev->gmc.is_app_apu || - node->adev->flags & AMD_IS_APU) + if (node->adev->flags & AMD_IS_APU) return 0; if (prange->preferred_loc == gpuid || @@ -3338,8 +3337,7 @@ svm_range_best_prefetch_location(struct svm_range *prange) goto out; } - if (bo_node->adev->gmc.is_app_apu || - bo_node->adev->flags & AMD_IS_APU) { + if (bo_node->adev->flags & AMD_IS_APU) { best_loc = 0; goto out; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 9c37bd0567..70c1776611 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -201,7 +201,6 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s * is initialized to not 0 when page migration register device memory. */ #define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\ - (adev)->gmc.is_app_apu ||\ ((adev)->flags & AMD_IS_APU)) void svm_range_bo_unref_async(struct svm_range_bo *svm_bo); diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 901d1961b7..47b8b49da8 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -8,7 +8,7 @@ config DRM_AMD_DC depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64 select SND_HDA_COMPONENT if SND_HDA_CORE # !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752 - select DRM_AMD_DC_FP if (X86 || LOONGARCH || (PPC64 && ALTIVEC) || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG)) + select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && !(CC_IS_CLANG && (ARM64 || RISCV)) help Choose this option if you want to use the new display engine support for AMDGPU. This adds required support for Vega and diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index 92a5c5efcf..9a5bcafbf7 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -33,6 +33,7 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/hwss subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/resource subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dsc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/optc +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dpp subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c29d271579..3cdcadd41b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1230,6 +1230,15 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) break; } + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): + hw_params.ips_sequential_ono = adev->external_rev_id > 0x10; + break; + default: + break; + } + status = dmub_srv_hw_init(dmub_srv, &hw_params); if (status != DMUB_STATUS_OK) { DRM_ERROR("Error initializing DMUB HW: %d\n", status); @@ -1726,8 +1735,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_DISABLE_IPS) init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL; + else + init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; - init_data.flags.disable_ips_in_vpb = 1; + init_data.flags.disable_ips_in_vpb = 0; /* Enable DWB for tested platforms only */ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) @@ -2629,6 +2640,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) int i; struct dc_stream_state *del_streams[MAX_PIPES]; int del_streams_count = 0; + struct dc_commit_streams_params params = {}; memset(del_streams, 0, sizeof(del_streams)); @@ -2655,7 +2667,9 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) goto fail; } - res = dc_commit_streams(dc, context->streams, context->stream_count); + params.streams = context->streams; + params.stream_count = context->stream_count; + res = dc_commit_streams(dc, ¶ms); fail: dc_state_release(context); @@ -2877,6 +2891,7 @@ static int dm_resume(void *handle) struct dc_state *dc_state; int i, r, j, ret; bool need_hotplug = false; + struct dc_commit_streams_params commit_params = {}; if (dm->dc->caps.ips_support) { dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); @@ -2926,7 +2941,9 @@ static int dm_resume(void *handle) dc_enable_dmub_outbox(adev->dm.dc); } - WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); + commit_params.streams = dc_state->streams; + commit_params.stream_count = dc_state->stream_count; + WARN_ON(!dc_commit_streams(dm->dc, &commit_params)); dm_gpureset_commit_state(dm->cached_dc_state, dm); @@ -2943,7 +2960,7 @@ static int dm_resume(void *handle) } /* Recreate dc_state - DC invalidates it when setting power state to S3. */ dc_state_release(dm_state->context); - dm_state->context = dc_state_create(dm->dc); + dm_state->context = dc_state_create(dm->dc, NULL); /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ /* Before powering on DC we need to re-initialize DMUB. */ @@ -3104,6 +3121,8 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = { .soft_reset = dm_soft_reset, .set_clockgating_state = dm_set_clockgating_state, .set_powergating_state = dm_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version dm_ip_block = { @@ -5713,8 +5732,8 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->aspect_ratio = get_aspect_ratio(mode_in); - stream->out_transfer_func->type = TF_TYPE_PREDEFINED; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; + stream->out_transfer_func.type = TF_TYPE_PREDEFINED; + stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB; if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { if (!adjust_colour_depth_from_display_info(timing_out, info) && drm_mode_is_420_also(info, mode_in) && @@ -6332,7 +6351,7 @@ create_stream_for_sink(struct drm_connector *connector, stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED; - if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) + if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; @@ -6803,7 +6822,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, if (!dc_plane_state) goto cleanup; - dc_state = dc_state_create(dc); + dc_state = dc_state_create(dc, NULL); if (!dc_state) goto cleanup; @@ -8405,13 +8424,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].surface = dc_plane; if (new_pcrtc_state->color_mgmt_changed) { - bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; - bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; + bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction; + bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func; bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult; - bundle->surface_updates[planes_count].func_shaper = dc_plane->in_shaper_func; - bundle->surface_updates[planes_count].lut3d_func = dc_plane->lut3d_func; - bundle->surface_updates[planes_count].blend_tf = dc_plane->blend_tf; + bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func; + bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func; + bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf; } amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, @@ -8624,7 +8643,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->stream_update.output_csc_transform = &acrtc_state->stream->csc_color_matrix; bundle->stream_update.out_transfer_func = - acrtc_state->stream->out_transfer_func; + &acrtc_state->stream->out_transfer_func; bundle->stream_update.lut3d_func = (struct dc_3dlut *) acrtc_state->stream->lut3d_func; bundle->stream_update.func_shaper = @@ -8858,6 +8877,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, struct drm_connector *connector; bool mode_set_reset_required = false; u32 i; + struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; /* Disable writeback */ for_each_old_connector_in_state(state, connector, old_con_state, i) { @@ -8994,7 +9014,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, dm_enable_per_frame_crtc_master_sync(dc_state); mutex_lock(&dm->dc_lock); - WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); + WARN_ON(!dc_commit_streams(dm->dc, ¶ms)); /* Allow idle optimization when vblank count is 0 for display off */ if (dm->active_vblank_irq_count == 0) @@ -10028,6 +10048,7 @@ skip_modeset: } /* Update Freesync settings. */ + reset_freesync_config_for_crtc(dm_new_crtc_state); get_freesync_config_for_crtc(dm_new_crtc_state, dm_new_conn_state); @@ -11467,6 +11488,12 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) mutex_unlock(&adev->dm.dc_lock); } +static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc) +{ + if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter) + dc_exit_ips_for_hw_access(dc); +} + void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, u32 value, const char *func_name) { @@ -11477,6 +11504,8 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, return; } #endif + + amdgpu_dm_exit_ips_for_hw_access(ctx->dc); cgs_write_register(ctx->cgs_device, address, value); trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); } @@ -11500,6 +11529,8 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, return 0; } + amdgpu_dm_exit_ips_for_hw_access(ctx->dc); + value = cgs_read_register(ctx->cgs_device, address); trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index c87b64e464..ebabfe3a51 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -571,7 +571,7 @@ static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream, uint32_t regamma_size, bool has_rom, enum dc_transfer_func_predefined tf) { - struct dc_transfer_func *out_tf = stream->out_transfer_func; + struct dc_transfer_func *out_tf = &stream->out_transfer_func; int ret = 0; if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) { @@ -954,8 +954,8 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) * inverse color ramp in legacy userspace. */ crtc->cm_is_degamma_srgb = true; - stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; + stream->out_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS; + stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB; /* * Note: although we pass has_rom as parameter here, we never * actually use ROM because the color module only takes the ROM @@ -963,7 +963,7 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) * * See more in mod_color_calculate_regamma_params() */ - r = __set_legacy_tf(stream->out_transfer_func, regamma_lut, + r = __set_legacy_tf(&stream->out_transfer_func, regamma_lut, regamma_size, has_rom); if (r) return r; @@ -1034,7 +1034,7 @@ map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc, °amma_size); ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES); - dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; + dc_plane_state->in_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS; /* * This case isn't fully correct, but also fairly @@ -1061,12 +1061,12 @@ map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc, * map these to the atomic one instead. */ if (crtc->cm_is_degamma_srgb) - dc_plane_state->in_transfer_func->tf = tf; + dc_plane_state->in_transfer_func.tf = tf; else - dc_plane_state->in_transfer_func->tf = + dc_plane_state->in_transfer_func.tf = TRANSFER_FUNCTION_LINEAR; - r = __set_input_tf(caps, dc_plane_state->in_transfer_func, + r = __set_input_tf(caps, &dc_plane_state->in_transfer_func, degamma_lut, degamma_size); if (r) return r; @@ -1075,12 +1075,12 @@ map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc, * For legacy gamma support we need the regamma input * in linear space. Assume that the input is sRGB. */ - dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED; - dc_plane_state->in_transfer_func->tf = tf; + dc_plane_state->in_transfer_func.type = TF_TYPE_PREDEFINED; + dc_plane_state->in_transfer_func.tf = tf; if (tf != TRANSFER_FUNCTION_SRGB && !mod_color_calculate_degamma_params(caps, - dc_plane_state->in_transfer_func, + &dc_plane_state->in_transfer_func, NULL, false)) return -ENOMEM; } @@ -1114,24 +1114,24 @@ __set_dm_plane_degamma(struct drm_plane_state *plane_state, if (!has_degamma_lut && tf == AMDGPU_TRANSFER_FUNCTION_DEFAULT) return -EINVAL; - dc_plane_state->in_transfer_func->tf = amdgpu_tf_to_dc_tf(tf); + dc_plane_state->in_transfer_func.tf = amdgpu_tf_to_dc_tf(tf); if (has_degamma_lut) { ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES); - dc_plane_state->in_transfer_func->type = + dc_plane_state->in_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS; - ret = __set_input_tf(color_caps, dc_plane_state->in_transfer_func, + ret = __set_input_tf(color_caps, &dc_plane_state->in_transfer_func, degamma_lut, degamma_size); if (ret) return ret; } else { - dc_plane_state->in_transfer_func->type = + dc_plane_state->in_transfer_func.type = TF_TYPE_PREDEFINED; if (!mod_color_calculate_degamma_params(color_caps, - dc_plane_state->in_transfer_func, NULL, false)) + &dc_plane_state->in_transfer_func, NULL, false)) return -ENOMEM; } return 0; @@ -1156,11 +1156,11 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state, lut3d = __extract_blob_lut(dm_plane_state->lut3d, &lut3d_size); lut3d_size = lut3d != NULL ? lut3d_size : 0; - amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, dc_plane_state->lut3d_func); + amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, &dc_plane_state->lut3d_func); ret = amdgpu_dm_atomic_shaper_lut(shaper_lut, false, amdgpu_tf_to_dc_tf(shaper_tf), shaper_size, - dc_plane_state->in_shaper_func); + &dc_plane_state->in_shaper_func); if (ret) { drm_dbg_kms(plane_state->plane->dev, "setting plane %d shaper LUT failed.\n", @@ -1175,7 +1175,7 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state, ret = amdgpu_dm_atomic_blend_lut(blend_lut, false, amdgpu_tf_to_dc_tf(blend_tf), - blend_size, dc_plane_state->blend_tf); + blend_size, &dc_plane_state->blend_tf); if (ret) { drm_dbg_kms(plane_state->plane->dev, "setting plane %d gamma lut failed.\n", @@ -1221,8 +1221,8 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, color_caps = &dc_plane_state->ctx->dc->caps.color; /* Initially, we can just bypass the DGM block. */ - dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS; - dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; + dc_plane_state->in_transfer_func.type = TF_TYPE_BYPASS; + dc_plane_state->in_transfer_func.tf = TRANSFER_FUNCTION_LINEAR; /* After, we start to update values according to color props */ has_crtc_cm_degamma = (crtc->cm_has_degamma || crtc->cm_is_degamma_srgb); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 1a269099f1..a5e1a93dda 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -794,25 +794,12 @@ struct dsc_mst_fairness_params { struct amdgpu_dm_connector *aconnector; }; -static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link) -{ - u8 link_coding_cap; - uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B; - - link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link); - if (link_coding_cap == DP_128b_132b_ENCODING) - fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B; - - return fec_overhead_multiplier_x1000; -} - -static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000) +static int kbps_to_peak_pbn(int kbps) { u64 peak_kbps = kbps; peak_kbps *= 1006; - peak_kbps *= fec_overhead_multiplier_x1000; - peak_kbps = div_u64(peak_kbps, 1000 * 1000); + peak_kbps = div_u64(peak_kbps, 1000); return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); } @@ -913,12 +900,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state, int link_timeslots_used; int fair_pbn_alloc; int ret = 0; - uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled) { initial_slack[i] = - kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn; + kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn; bpp_increased[i] = false; remaining_to_increase += 1; } else { @@ -1014,7 +1000,6 @@ static int try_disable_dsc(struct drm_atomic_state *state, int next_index; int remaining_to_try = 0; int ret; - uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled @@ -1044,7 +1029,7 @@ static int try_disable_dsc(struct drm_atomic_state *state, if (next_index == -1) break; - vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, @@ -1057,7 +1042,8 @@ static int try_disable_dsc(struct drm_atomic_state *state, vars[next_index].dsc_enabled = false; vars[next_index].bpp_x16 = 0; } else { - vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000); + vars[next_index].pbn = kbps_to_peak_pbn( + params[next_index].bw_range.max_kbps); ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, @@ -1086,7 +1072,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, int count = 0; int i, k, ret; bool debugfs_overwrite = false; - uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); memset(params, 0, sizeof(params)); @@ -1151,7 +1136,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, /* Try no compression */ for (i = 0; i < count; i++) { vars[i + k].aconnector = params[i].aconnector; - vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, @@ -1170,7 +1155,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, /* Try max compression */ for (i = 0; i < count; i++) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { - vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000); + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); vars[i + k].dsc_enabled = true; vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, @@ -1178,7 +1163,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (ret < 0) return ret; } else { - vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, @@ -1614,7 +1599,7 @@ enum dc_status dm_dp_mst_is_port_support_mode( struct amdgpu_dm_connector *aconnector, struct dc_stream_state *stream) { - int bpp, pbn, branch_max_throughput_mps = 0; + int pbn, branch_max_throughput_mps = 0; struct dc_link_settings cur_link_settings; unsigned int end_to_end_bw_in_kbps = 0; unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0; @@ -1664,11 +1649,34 @@ enum dc_status dm_dp_mst_is_port_support_mode( } } } else { - /* check if mode could be supported within full_pbn */ - bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3; - pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4); - if (pbn > aconnector->mst_output_port->full_pbn) + /* Check if mode could be supported within max slot + * number of current mst link and full_pbn of mst links. + */ + int pbn_div, slot_num, max_slot_num; + enum dc_link_encoding_format link_encoding; + uint32_t stream_kbps = + dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(stream->link)); + + pbn = kbps_to_peak_pbn(stream_kbps); + pbn_div = dm_mst_get_pbn_divider(stream->link); + slot_num = DIV_ROUND_UP(pbn, pbn_div); + + link_encoding = dc_link_get_highest_encoding_format(stream->link); + if (link_encoding == DC_LINK_ENCODING_DP_8b_10b) + max_slot_num = 63; + else if (link_encoding == DC_LINK_ENCODING_DP_128b_132b) + max_slot_num = 64; + else { + DRM_DEBUG_DRIVER("Invalid link encoding format\n"); return DC_FAIL_BANDWIDTH_VALIDATE; + } + + if (slot_num > max_slot_num || + pbn > aconnector->mst_output_port->full_pbn) { + DRM_DEBUG_DRIVER("Mode can not be supported within mst links!"); + return DC_FAIL_BANDWIDTH_VALIDATE; + } } /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 37c820ab0f..fa84d34b73 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -46,9 +46,6 @@ #define SYNAPTICS_CASCADED_HUB_ID 0x5A #define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0) -#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031 -#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000 - enum mst_msg_ready_type { NONE_MSG_RDY_EVENT = 0, DOWN_REP_MSG_RDY_EVENT = 1, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 286ecd28cc..bfa090432c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -212,7 +212,7 @@ bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) } /* - * amdgpu_dm_psr_disable() - disable psr f/w + * amdgpu_dm_psr_disable_all() - disable psr f/w for all streams * if psr is enabled on any stream * * Return: true if success diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index d9e33c6bcc..0005f5f8f3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -52,4 +52,12 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc func_name, line); } +void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx) +{ +} + +void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx) +{ +} + /**** power component interfaces ****/ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h index 133af994a0..4686d4b0cb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h @@ -87,7 +87,7 @@ TRACE_EVENT(amdgpu_dc_performance, __entry->writes = write_count; __entry->read_delta = read_count - *last_read; __entry->write_delta = write_count - *last_write; - __assign_str(func, func); + __assign_str(func); __entry->line = line; *last_read = read_count; *last_write = write_count; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c index 4ae4720535..e46f8ce41d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c @@ -26,16 +26,7 @@ #include "dc_trace.h" -#if defined(CONFIG_X86) -#include -#elif defined(CONFIG_PPC64) -#include -#include -#elif defined(CONFIG_ARM64) -#include -#elif defined(CONFIG_LOONGARCH) -#include -#endif +#include /** * DOC: DC FPU manipulation overview @@ -87,20 +78,9 @@ void dc_fpu_begin(const char *function_name, const int line) WARN_ON_ONCE(!in_task()); preempt_disable(); depth = __this_cpu_inc_return(fpu_recursion_depth); - if (depth == 1) { -#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) + BUG_ON(!kernel_fpu_available()); kernel_fpu_begin(); -#elif defined(CONFIG_PPC64) - if (cpu_has_feature(CPU_FTR_VSX_COMP)) - enable_kernel_vsx(); - else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) - enable_kernel_altivec(); - else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) - enable_kernel_fp(); -#elif defined(CONFIG_ARM64) - kernel_neon_begin(); -#endif } TRACE_DCN_FPU(true, function_name, line, depth); @@ -122,18 +102,7 @@ void dc_fpu_end(const char *function_name, const int line) depth = __this_cpu_dec_return(fpu_recursion_depth); if (depth == 0) { -#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) kernel_fpu_end(); -#elif defined(CONFIG_PPC64) - if (cpu_has_feature(CPU_FTR_VSX_COMP)) - disable_kernel_vsx(); - else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) - disable_kernel_altivec(); - else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) - disable_kernel_fp(); -#elif defined(CONFIG_ARM64) - kernel_neon_end(); -#endif } else { WARN_ON_ONCE(depth < 0); } diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 7991ae468f..4e9fb17428 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -22,7 +22,7 @@ # # Makefile for Display Core (dc) component. -DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc +DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc dpp ifdef CONFIG_DRM_AMD_DC_FP diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 6450853fea..bc16db69a6 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -44,8 +44,6 @@ #include "bios_parser_common.h" -#include "dc.h" - #define THREE_PERCENT_OF_10000 300 #define LAST_RECORD_TYPE 0xff @@ -1731,6 +1729,7 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1( return 0; } + /** * get_ss_entry_number_from_internal_ss_info_tbl_V3_1 * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index ab31643b10..9fe0020bcb 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1594,8 +1594,6 @@ static bool bios_parser_is_device_id_supported( return (le16_to_cpu(bp->object_info_tbl.v1_5->supporteddevices) & mask) != 0; break; } - - return false; } static uint32_t bios_parser_get_ss_entry_number( diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 86f9198e75..2bcae0643e 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -399,7 +399,7 @@ static enum bp_result transmitter_control_v1_6( static void init_transmitter_control(struct bios_parser *bp) { uint8_t frev; - uint8_t crev; + uint8_t crev = 0; if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl, frev, crev) == false) diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index cbae1be7b0..cc000833d3 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -225,7 +225,7 @@ static enum bp_result transmitter_control_fallback( static void init_transmitter_control(struct bios_parser *bp) { uint8_t frev; - uint8_t crev; + uint8_t crev = 0; BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 9f0f25aee4..a2b4ff2cff 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -272,7 +272,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base; } - if (asic_id.chip_id == DEVICE_ID_NV_13FE) { + if (ctx->dce_version == DCN_VERSION_2_01) { dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base; } @@ -329,15 +329,14 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p } break; case AMDGPU_FAMILY_GC_11_0_0: { - struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL); - - if (clk_mgr == NULL) { - BREAK_TO_DEBUGGER(); - return NULL; - } + struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL); - dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); - return &clk_mgr->base; + if (clk_mgr == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); + return &clk_mgr->base; } case AMDGPU_FAMILY_GC_11_0_1: { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index b77804cfde..2a5dd3a296 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -131,8 +131,8 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); int dprefclk_wdivider; int dprefclk_src_sel; - int dp_ref_clk_khz; - int target_div = 600000; + int dp_ref_clk_khz = 600000; + int target_div; /* ASSERT DP Reference Clock source is from DFS*/ REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 2a74e2d749..369421e46c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -23,7 +23,6 @@ * */ -#include "reg_helper.h" #include "core_types.h" #include "clk_mgr_internal.h" #include "rv1_clk_mgr.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index 89b79dd396..19897fa52e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -26,7 +26,6 @@ #include "core_types.h" #include "clk_mgr_internal.h" #include "reg_helper.h" -#include #include "rv1_clk_mgr_vbios_smu.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 5ee87965a0..bb4f3bd753 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -503,7 +503,7 @@ static void dcn2_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - for (i = 0; i < MAX_PIPES * 2; i++) { + for (i = 0; i < MAX_LINKS; i++) { if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req) max_phyclk_req = clk_mgr->cur_phyclk_req_table[i]; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c index 9c90090e73..f77840dd05 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c @@ -100,7 +100,15 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base, if (clk_mgr_base->clks.dispclk_khz == 0 || dc->debug.force_clock_mode & 0x1) { + /* this is from resume or boot up, if forced_clock cfg option + * used, we bypass program dispclk and DPPCLK, but need set them + * for S3. + */ + force_reset = true; + /* force_clock_mode 0x1: force reset the clock even it is the + * same clock as long as it is in Passive level. + */ dcn2_read_clocks_from_hw_dentist(clk_mgr_base); } @@ -150,11 +158,14 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base, if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { if (dpp_clock_lowered) { + // if clock is being lowered, increase DTO before lowering refclk dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); dcn20_update_clocks_update_dentist(clk_mgr, context); } else { + // if clock is being raised, increase refclk before lowering DTO if (update_dppclk || update_dispclk) dcn20_update_clocks_update_dentist(clk_mgr, context); + // always update dtos unless clock is lowered and not safe to lower dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index e3e1940198..5ef0879f6a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -548,7 +548,7 @@ static void rn_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc_l clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - for (i = 0; i < MAX_PIPES * 2; i++) { + for (i = 0; i < MAX_LINKS; i++) { if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req) max_phyclk_req = clk_mgr->cur_phyclk_req_table[i]; } @@ -642,7 +642,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params j = -1; - ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported FCLK DPM levels exceed maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 4e036356b6..8083a553c6 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -474,7 +474,7 @@ static void dcn30_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct d clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - for (i = 0; i < MAX_PIPES * 2; i++) { + for (i = 0; i < MAX_LINKS; i++) { if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req) max_phyclk_req = clk_mgr->cur_phyclk_req_table[i]; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c index bdbf183066..3253115a15 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c @@ -23,7 +23,6 @@ * */ -#include #include "dcn30_clk_mgr_smu_msg.h" #include "clk_mgr_internal.h" @@ -54,6 +53,7 @@ */ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries) { + const uint32_t initial_max_retries = max_retries; uint32_t reg = 0; do { @@ -69,7 +69,7 @@ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un /* handle DALSMC_Result_CmdRejectedBusy? */ - /* Log? */ + TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx); return reg; } @@ -89,6 +89,8 @@ static bool dcn30_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint /* Trigger the message transaction by writing the message ID */ REG_WRITE(DAL_MSG_REG, msg_id); + TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx); + result = dcn30_smu_wait_for_response(clk_mgr, 10, 200000); if (IS_SMU_TIMEOUT(result)) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index aa9fd1dc55..191d8b969d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -566,7 +566,8 @@ static void vg_clk_mgr_helper_populate_bw_params( j = -1; - ASSERT(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported FCLK DPM levels exceeds maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index ce1386e225..12a7752758 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -562,7 +562,8 @@ static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk j = -1; - ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported pstate levels exceeds maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 6904e95113..f201628e4e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -23,7 +23,6 @@ * */ -#include #include "core_types.h" #include "clk_mgr_internal.h" #include "reg_helper.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h index 047d19ea91..78ca1e5c5e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h @@ -37,34 +37,34 @@ typedef enum { } WCK_RATIO_e; typedef struct { - uint32_t FClk; - uint32_t MemClk; - uint32_t Voltage; - uint8_t WckRatio; - uint8_t Spare[3]; + uint32_t FClk; + uint32_t MemClk; + uint32_t Voltage; + uint8_t WckRatio; + uint8_t Spare[3]; } DfPstateTable314_t; //Freq in MHz //Voltage in milli volts with 2 fractional bits typedef struct { - uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; - uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; - uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; - uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; - uint32_t VClocks[NUM_VCN_DPM_LEVELS]; - uint32_t DClocks[NUM_VCN_DPM_LEVELS]; - uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; - DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS]; + uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; + uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; + uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; + uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; + uint32_t VClocks[NUM_VCN_DPM_LEVELS]; + uint32_t DClocks[NUM_VCN_DPM_LEVELS]; + uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; + DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS]; - uint8_t NumDcfClkLevelsEnabled; - uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk - uint8_t NumSocClkLevelsEnabled; - uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk - uint8_t NumDfPstatesEnabled; - uint8_t spare[3]; + uint8_t NumDcfClkLevelsEnabled; + uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk + uint8_t NumSocClkLevelsEnabled; + uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk + uint8_t NumDfPstatesEnabled; + uint8_t spare[3]; - uint32_t MinGfxClk; - uint32_t MaxGfxClk; + uint32_t MinGfxClk; + uint32_t MaxGfxClk; } DpmClocks314_t; struct dcn314_watermarks { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c index 879f1494c4..2d14346b68 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c @@ -29,6 +29,7 @@ #include "dm_helpers.h" #include "dcn315_smu.h" #include "mp/mp_13_0_5_offset.h" +#include "logger_types.h" #define MAX_INSTANCE 6 #define MAX_SEGMENT 6 @@ -69,7 +70,6 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D #define REG_NBIO(reg_name) \ (NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name) -#include "logger_types.h" #undef DC_LOGGER #define DC_LOGGER \ CTX->logger diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 6ad4f4efec..20ca7afa9c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -485,7 +485,8 @@ static void dcn316_clk_mgr_helper_populate_bw_params( j = -1; - ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported pstate levels exceeds maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index dda1173be3..ff5fdc7b11 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -29,6 +29,7 @@ #include "dcn20/dcn20_clk_mgr.h" #include "dce100/dce_clk_mgr.h" #include "dcn31/dcn31_clk_mgr.h" +#include "dcn32/dcn32_clk_mgr.h" #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" @@ -40,7 +41,6 @@ #include "dcn/dcn_3_2_0_offset.h" #include "dcn/dcn_3_2_0_sh_mask.h" -#include "dcn32/dcn32_clk_mgr.h" #include "dml/dcn32/dcn32_fpu.h" #define DCN_BASE__INST0_SEG1 0x000000C0 @@ -838,7 +838,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, dmcu->funcs->set_psr_wait_loop(dmcu, clk_mgr_base->clks.dispclk_khz / 1000 / 7); - if (dc->config.enable_auto_dpm_test_logs && safe_to_lower) { + if (dc->config.enable_auto_dpm_test_logs) { dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context); } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c index df244b175f..f2f60478b1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c @@ -49,6 +49,7 @@ */ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries) { + const uint32_t initial_max_retries = max_retries; uint32_t reg = 0; do { @@ -62,6 +63,8 @@ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un udelay(delay_us); } while (max_retries--); + TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx); + return reg; } @@ -79,6 +82,8 @@ static bool dcn32_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint /* Trigger the message transaction by writing the message ID */ REG_WRITE(DAL_MSG_REG, msg_id); + TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx); + /* Wait for response */ if (dcn32_smu_wait_for_response(clk_mgr, 10, 200000) == DALSMC_Result_OK) { if (param_out) @@ -115,6 +120,8 @@ static uint32_t dcn32_smu_wait_for_response_delay(struct clk_mgr_internal *clk_m *total_delay_us += delay_us; } while (max_retries--); + TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx); + return reg; } @@ -135,6 +142,8 @@ static bool dcn32_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mgr /* Trigger the message transaction by writing the message ID */ REG_WRITE(DAL_MSG_REG, msg_id); + TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx); + /* Wait for response */ if (dcn32_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay2_us) == DALSMC_Result_OK) { if (param_out) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h index c76352a817..5c44ab0e86 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h @@ -37,10 +37,9 @@ #define DALSMC_Result_OK 0x1 void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable); -void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); -void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr); void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways); void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); +void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr); unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz); void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index d9c5692c86..6c9b4e6491 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -252,7 +252,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, } if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) { - dcn35_smu_set_dtbclk(clk_mgr, false); + if (clk_mgr->base.ctx->dc->config.allow_0_dtb_clk) + dcn35_smu_set_dtbclk(clk_mgr, false); clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; } /* check that we're not already in lower */ @@ -889,35 +890,6 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base) } } -static void dcn35_set_ips_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - struct dc *dc = clk_mgr_base->ctx->dc; - uint32_t val = dcn35_smu_read_ips_scratch(clk_mgr); - - if (dc->config.disable_ips == DMUB_IPS_ENABLE || - dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { - val = val & ~DMUB_IPS1_ALLOW_MASK; - val = val & ~DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { - val |= DMUB_IPS1_ALLOW_MASK; - val |= DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { - val = val & ~DMUB_IPS1_ALLOW_MASK; - val |= DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { - val = val & ~DMUB_IPS1_ALLOW_MASK; - val = val & ~DMUB_IPS2_ALLOW_MASK; - } - - if (!allow_idle) { - val |= DMUB_IPS1_ALLOW_MASK; - val |= DMUB_IPS2_ALLOW_MASK; - } - - dcn35_smu_write_ips_scratch(clk_mgr, val); -} - static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); @@ -937,13 +909,6 @@ static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base) return ips_supported; } -static uint32_t dcn35_get_ips_idle_state(struct clk_mgr *clk_mgr_base) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - - return dcn35_smu_read_ips_scratch(clk_mgr); -} - static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr) { init_clk_states(clk_mgr); @@ -1031,8 +996,6 @@ static struct clk_mgr_funcs dcn35_funcs = { .set_low_power_state = dcn35_set_low_power_state, .exit_low_power_state = dcn35_exit_low_power_state, .is_ips_supported = dcn35_is_ips_supported, - .set_idle_state = dcn35_set_ips_idle_state, - .get_idle_state = dcn35_get_ips_idle_state }; struct clk_mgr_funcs dcn35_fpga_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c index 9e588c56c5..1399b41dfd 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c @@ -487,24 +487,3 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr) //smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv); return retv; } - -void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param) -{ - if (!clk_mgr->smu_present) - return; - - REG_WRITE(MP1_SMN_C2PMSG_71, param); - //smu_print("%s: write_ips_scratch = %x\n", __func__, param); -} - -uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr) -{ - uint32_t retv; - - if (!clk_mgr->smu_present) - return 0; - - retv = REG_READ(MP1_SMN_C2PMSG_71); - //smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv); - return retv; -} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h index 2b8e6959a0..06cd3cc6d3 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h @@ -198,6 +198,4 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr); -void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param); -uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr); #endif /* DAL_DC_35_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index d68c83e40d..236876d951 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -36,6 +36,7 @@ #include "resource.h" #include "dc_state.h" #include "dc_state_priv.h" +#include "dc_plane_priv.h" #include "gpio_service_interface.h" #include "clk_mgr.h" @@ -212,7 +213,8 @@ static bool create_links( connectors_num, num_virtual_links); - for (i = 0; i < connectors_num; i++) { + // condition loop on link_count to allow skipping invalid indices + for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) { struct link_init_data link_init_params = {0}; struct dc_link *link; @@ -386,6 +388,30 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) *perf_trace = NULL; } +static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust) +{ + if (!dc || !stream || !adjust) + return false; + + if (!dc->current_state) + return false; + + int i; + + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->stream == stream && pipe->stream_res.tg) { + if (dc->hwss.set_long_vtotal) + dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max); + + return true; + } + } + + return false; +} + /** * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR * @dc: dc reference @@ -420,6 +446,15 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, stream->adjust.v_total_mid = adjust->v_total_mid; stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; stream->adjust.v_total_min = adjust->v_total_min; + stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt; + + if (dc->caps.max_v_total != 0 && + (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) { + if (adjust->allow_otg_v_count_halt) + return set_long_vtotal(dc, stream, adjust); + else + return false; + } for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -1055,8 +1090,7 @@ static bool dc_construct(struct dc *dc, * is initialized in dc_create_resource_pool because * on creation it copies the contents of dc->dml */ - - dc->current_state = dc_state_create(dc); + dc->current_state = dc_state_create(dc, NULL); if (!dc->current_state) { dm_error("%s: failed to create validate ctx\n", __func__); @@ -1272,7 +1306,7 @@ static void disable_vbios_mode_if_required( if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { unsigned int enc_inst, tg_inst = 0; - unsigned int pix_clk_100hz; + unsigned int pix_clk_100hz = 0; enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); if (enc_inst != ENGINE_ID_UNKNOWN) { @@ -1759,7 +1793,7 @@ bool dc_validate_boot_timing(const struct dc *dc, return false; if (dc_is_dp_signal(link->connector_signal)) { - unsigned int pix_clk_100hz; + unsigned int pix_clk_100hz = 0; uint32_t numOdmPipes = 1; uint32_t id_src[4] = {0}; @@ -2088,15 +2122,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c return result; } -static bool commit_minimal_transition_state_legacy(struct dc *dc, +static bool commit_minimal_transition_state(struct dc *dc, struct dc_state *transition_base_context); /** * dc_commit_streams - Commit current stream state * * @dc: DC object with the commit state to be configured in the hardware - * @streams: Array with a list of stream state - * @stream_count: Total of streams + * @params: Parameters for the commit, including the streams to be committed * * Function responsible for commit streams change to the hardware. * @@ -2104,9 +2137,7 @@ static bool commit_minimal_transition_state_legacy(struct dc *dc, * Return DC_OK if everything work as expected, otherwise, return a dc_status * code. */ -enum dc_status dc_commit_streams(struct dc *dc, - struct dc_stream_state *streams[], - uint8_t stream_count) +enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params) { int i, j; struct dc_state *context; @@ -2115,18 +2146,22 @@ enum dc_status dc_commit_streams(struct dc *dc, struct pipe_ctx *pipe; bool handle_exit_odm2to1 = false; + if (!params) + return DC_ERROR_UNEXPECTED; + if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) return res; - if (!streams_changed(dc, streams, stream_count)) + if (!streams_changed(dc, params->streams, params->stream_count) && + dc->current_state->power_source == params->power_source) return res; dc_exit_ips_for_hw_access(dc); - DC_LOG_DC("%s: %d streams\n", __func__, stream_count); + DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count); - for (i = 0; i < stream_count; i++) { - struct dc_stream_state *stream = streams[i]; + for (i = 0; i < params->stream_count; i++) { + struct dc_stream_state *stream = params->streams[i]; struct dc_stream_status *status = dc_stream_get_status(stream); dc_stream_log(dc, stream); @@ -2144,7 +2179,7 @@ enum dc_status dc_commit_streams(struct dc *dc, * scenario, it uses extra pipes than needed to reduce power consumption * We need to switch off this feature to make room for new streams. */ - if (stream_count > dc->current_state->stream_count && + if (params->stream_count > dc->current_state->stream_count && dc->current_state->stream_count == 1) { for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -2154,13 +2189,15 @@ enum dc_status dc_commit_streams(struct dc *dc, } if (handle_exit_odm2to1) - res = commit_minimal_transition_state_legacy(dc, dc->current_state); + res = commit_minimal_transition_state(dc, dc->current_state); context = dc_state_create_current_copy(dc); if (!context) goto context_alloc_fail; - res = dc_validate_with_context(dc, set, stream_count, context, false); + context->power_source = params->power_source; + + res = dc_validate_with_context(dc, set, params->stream_count, context, false); if (res != DC_OK) { BREAK_TO_DEBUGGER(); goto fail; @@ -2168,16 +2205,16 @@ enum dc_status dc_commit_streams(struct dc *dc, res = dc_commit_state_no_check(dc, context); - for (i = 0; i < stream_count; i++) { + for (i = 0; i < params->stream_count; i++) { for (j = 0; j < context->stream_count; j++) { - if (streams[i]->stream_id == context->streams[j]->stream_id) - streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; + if (params->streams[i]->stream_id == context->streams[j]->stream_id) + params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; - if (dc_is_embedded_signal(streams[i]->signal)) { - struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]); + if (dc_is_embedded_signal(params->streams[i]->signal)) { + struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]); if (dc->hwss.is_abm_supported) - status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]); + status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]); else status->is_abm_supported = true; } @@ -2821,55 +2858,45 @@ static void copy_surface_update_to_plane( srf_update->plane_info->layer_index; } - if (srf_update->gamma && - (surface->gamma_correction != - srf_update->gamma)) { - memcpy(&surface->gamma_correction->entries, + if (srf_update->gamma) { + memcpy(&surface->gamma_correction.entries, &srf_update->gamma->entries, sizeof(struct dc_gamma_entries)); - surface->gamma_correction->is_identity = + surface->gamma_correction.is_identity = srf_update->gamma->is_identity; - surface->gamma_correction->num_entries = + surface->gamma_correction.num_entries = srf_update->gamma->num_entries; - surface->gamma_correction->type = + surface->gamma_correction.type = srf_update->gamma->type; } - if (srf_update->in_transfer_func && - (surface->in_transfer_func != - srf_update->in_transfer_func)) { - surface->in_transfer_func->sdr_ref_white_level = + if (srf_update->in_transfer_func) { + surface->in_transfer_func.sdr_ref_white_level = srf_update->in_transfer_func->sdr_ref_white_level; - surface->in_transfer_func->tf = + surface->in_transfer_func.tf = srf_update->in_transfer_func->tf; - surface->in_transfer_func->type = + surface->in_transfer_func.type = srf_update->in_transfer_func->type; - memcpy(&surface->in_transfer_func->tf_pts, + memcpy(&surface->in_transfer_func.tf_pts, &srf_update->in_transfer_func->tf_pts, sizeof(struct dc_transfer_func_distributed_points)); } - if (srf_update->func_shaper && - (surface->in_shaper_func != - srf_update->func_shaper)) - memcpy(surface->in_shaper_func, srf_update->func_shaper, - sizeof(*surface->in_shaper_func)); + if (srf_update->func_shaper) + memcpy(&surface->in_shaper_func, srf_update->func_shaper, + sizeof(surface->in_shaper_func)); - if (srf_update->lut3d_func && - (surface->lut3d_func != - srf_update->lut3d_func)) - memcpy(surface->lut3d_func, srf_update->lut3d_func, - sizeof(*surface->lut3d_func)); + if (srf_update->lut3d_func) + memcpy(&surface->lut3d_func, srf_update->lut3d_func, + sizeof(surface->lut3d_func)); if (srf_update->hdr_mult.value) surface->hdr_mult = srf_update->hdr_mult; - if (srf_update->blend_tf && - (surface->blend_tf != - srf_update->blend_tf)) - memcpy(surface->blend_tf, srf_update->blend_tf, - sizeof(*surface->blend_tf)); + if (srf_update->blend_tf) + memcpy(&surface->blend_tf, srf_update->blend_tf, + sizeof(surface->blend_tf)); if (srf_update->input_csc_color_matrix) surface->input_csc_color_matrix = @@ -2900,14 +2927,13 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->dst.height && update->dst.width) stream->dst = update->dst; - if (update->out_transfer_func && - stream->out_transfer_func != update->out_transfer_func) { - stream->out_transfer_func->sdr_ref_white_level = + if (update->out_transfer_func) { + stream->out_transfer_func.sdr_ref_white_level = update->out_transfer_func->sdr_ref_white_level; - stream->out_transfer_func->tf = update->out_transfer_func->tf; - stream->out_transfer_func->type = + stream->out_transfer_func.tf = update->out_transfer_func->tf; + stream->out_transfer_func.type = update->out_transfer_func->type; - memcpy(&stream->out_transfer_func->tf_pts, + memcpy(&stream->out_transfer_func.tf_pts, &update->out_transfer_func->tf_pts, sizeof(struct dc_transfer_func_distributed_points)); } @@ -3020,15 +3046,8 @@ static void backup_planes_and_stream_state( for (i = 0; i < status->plane_count; i++) { scratch->plane_states[i] = *status->plane_states[i]; - scratch->gamma_correction[i] = *status->plane_states[i]->gamma_correction; - scratch->in_transfer_func[i] = *status->plane_states[i]->in_transfer_func; - scratch->lut3d_func[i] = *status->plane_states[i]->lut3d_func; - scratch->in_shaper_func[i] = *status->plane_states[i]->in_shaper_func; - scratch->blend_tf[i] = *status->plane_states[i]->blend_tf; } scratch->stream_state = *stream; - if (stream->out_transfer_func) - scratch->out_transfer_func = *stream->out_transfer_func; } static void restore_planes_and_stream_state( @@ -3043,17 +3062,67 @@ static void restore_planes_and_stream_state( for (i = 0; i < status->plane_count; i++) { *status->plane_states[i] = scratch->plane_states[i]; - *status->plane_states[i]->gamma_correction = scratch->gamma_correction[i]; - *status->plane_states[i]->in_transfer_func = scratch->in_transfer_func[i]; - *status->plane_states[i]->lut3d_func = scratch->lut3d_func[i]; - *status->plane_states[i]->in_shaper_func = scratch->in_shaper_func[i]; - *status->plane_states[i]->blend_tf = scratch->blend_tf[i]; } *stream = scratch->stream_state; - if (stream->out_transfer_func) - *stream->out_transfer_func = scratch->out_transfer_func; } +/** + * update_seamless_boot_flags() - Helper function for updating seamless boot flags + * + * @dc: Current DC state + * @context: New DC state to be programmed + * @surface_count: Number of surfaces that have an updated + * @stream: Corresponding stream to be updated in the current flip + * + * Updating seamless boot flags do not need to be part of the commit sequence. This + * helper function will update the seamless boot flags on each flip (if required) + * outside of the HW commit sequence (fast or slow). + * + * Return: void + */ +static void update_seamless_boot_flags(struct dc *dc, + struct dc_state *context, + int surface_count, + struct dc_stream_state *stream) +{ + if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { + /* Optimize seamless boot flag keeps clocks and watermarks high until + * first flip. After first flip, optimization is required to lower + * bandwidth. Important to note that it is expected UEFI will + * only light up a single display on POST, therefore we only expect + * one stream with seamless boot flag set. + */ + if (stream->apply_seamless_boot_optimization) { + stream->apply_seamless_boot_optimization = false; + + if (get_seamless_boot_stream_count(context) == 0) + dc->optimized_required = true; + } + } +} + +/** + * update_planes_and_stream_state() - The function takes planes and stream + * updates as inputs and determines the appropriate update type. If update type + * is FULL, the function allocates a new context, populates and validates it. + * Otherwise, it updates current dc context. The function will return both + * new_context and new_update_type back to the caller. The function also backs + * up both current and new contexts into corresponding dc state scratch memory. + * TODO: The function does too many things, and even conditionally allocates dc + * context memory implicitly. We should consider to break it down. + * + * @dc: Current DC state + * @srf_updates: an array of surface updates + * @surface_count: surface update count + * @stream: Corresponding stream to be updated + * @stream_update: stream update + * @new_update_type: [out] determined update type by the function + * @new_context: [out] new context allocated and validated if update type is + * FULL, reference to current context if update type is less than FULL. + * + * Return: true if a valid update is populated into new_context, false + * otherwise. + */ static bool update_planes_and_stream_state(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, @@ -3077,9 +3146,10 @@ static bool update_planes_and_stream_state(struct dc *dc, } context = dc->current_state; - backup_planes_and_stream_state(&dc->current_state->scratch, stream); update_type = dc_check_update_surfaces_for_stream( dc, srf_updates, surface_count, stream_update, stream_status); + if (update_type == UPDATE_TYPE_FULL) + backup_planes_and_stream_state(&dc->scratch.current_state, stream); /* update current stream with the new updates */ copy_stream_update_to_stream(dc, context, stream, stream_update); @@ -3148,7 +3218,10 @@ static bool update_planes_and_stream_state(struct dc *dc, for (i = 0; i < surface_count; i++) { struct dc_plane_state *surface = srf_updates[i].surface; - if (update_type >= UPDATE_TYPE_MED) { + if (update_type != UPDATE_TYPE_MED) + continue; + if (surface->update_flags.bits.clip_size_change || + surface->update_flags.bits.position_change) { for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; @@ -3165,19 +3238,13 @@ static bool update_planes_and_stream_state(struct dc *dc, BREAK_TO_DEBUGGER(); goto fail; } - - for (i = 0; i < context->stream_count; i++) { - struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx, - context->streams[i]); - - if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) - resource_build_test_pattern_params(&context->res_ctx, otg_master); - } } + update_seamless_boot_flags(dc, context, surface_count, stream); *new_context = context; *new_update_type = update_type; - backup_planes_and_stream_state(&context->scratch, stream); + if (update_type == UPDATE_TYPE_FULL) + backup_planes_and_stream_state(&dc->scratch.new_state, stream); return true; @@ -3266,12 +3333,26 @@ static void commit_planes_do_stream_update(struct dc *dc, } if (stream_update->pending_test_pattern) { - dc_link_dp_set_test_pattern(stream->link, + /* + * test pattern params depends on ODM topology + * changes that we could be applying to front + * end. Since at the current stage front end + * changes are not yet applied. We can only + * apply test pattern in hw based on current + * state and populate the final test pattern + * params in new state. If current and new test + * pattern params are different as result of + * different ODM topology being used, it will be + * detected and handle during front end + * programming update. + */ + dc->link_srv->dp_set_test_pattern(stream->link, stream->test_pattern.type, stream->test_pattern.color_space, stream->test_pattern.p_link_settings, stream->test_pattern.p_custom_pattern, stream->test_pattern.cust_pattern_size); + resource_build_test_pattern_params(&context->res_ctx, pipe_ctx); } if (stream_update->dpms_off) { @@ -3368,6 +3449,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc, if (srf_updates[i].surface->flip_immediate) continue; + update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, sizeof(flip_addr->dirty_rects)); @@ -3484,6 +3566,7 @@ static void commit_planes_for_stream_fast(struct dc *dc, int i, j; struct pipe_ctx *top_pipe_to_program = NULL; struct dc_stream_status *stream_status = NULL; + dc_exit_ips_for_hw_access(dc); dc_z10_restore(dc); @@ -3541,7 +3624,8 @@ static void commit_planes_for_stream_fast(struct dc *dc, context->block_sequence, &(context->block_sequence_steps), top_pipe_to_program, - stream_status); + stream_status, + context); hwss_execute_sequence(dc, context->block_sequence, context->block_sequence_steps); @@ -4070,24 +4154,14 @@ struct pipe_split_policy_backup { bool dynamic_odm_policy; bool subvp_policy; enum pipe_split_policy mpc_policy; + char force_odm[MAX_PIPES]; }; -static void release_minimal_transition_state(struct dc *dc, - struct dc_state *context, struct pipe_split_policy_backup *policy) -{ - dc_state_release(context); - /* restore previous pipe split and odm policy */ - if (!dc->config.is_vmin_only_asic) - dc->debug.pipe_split_policy = policy->mpc_policy; - dc->debug.enable_single_display_2to1_odm_policy = policy->dynamic_odm_policy; - dc->debug.force_disable_subvp = policy->subvp_policy; -} - -static struct dc_state *create_minimal_transition_state(struct dc *dc, - struct dc_state *base_context, struct pipe_split_policy_backup *policy) +static void backup_and_set_minimal_pipe_split_policy(struct dc *dc, + struct dc_state *context, + struct pipe_split_policy_backup *policy) { - struct dc_state *minimal_transition_context = NULL; - unsigned int i, j; + int i; if (!dc->config.is_vmin_only_asic) { policy->mpc_policy = dc->debug.pipe_split_policy; @@ -4097,97 +4171,257 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc, dc->debug.enable_single_display_2to1_odm_policy = false; policy->subvp_policy = dc->debug.force_disable_subvp; dc->debug.force_disable_subvp = true; + for (i = 0; i < context->stream_count; i++) { + policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments; + context->streams[i]->debug.force_odm_combine_segments = 0; + } +} + +static void restore_minimal_pipe_split_policy(struct dc *dc, + struct dc_state *context, + struct pipe_split_policy_backup *policy) +{ + uint8_t i; + + if (!dc->config.is_vmin_only_asic) + dc->debug.pipe_split_policy = policy->mpc_policy; + dc->debug.enable_single_display_2to1_odm_policy = + policy->dynamic_odm_policy; + dc->debug.force_disable_subvp = policy->subvp_policy; + for (i = 0; i < context->stream_count; i++) + context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i]; +} + +static void release_minimal_transition_state(struct dc *dc, + struct dc_state *minimal_transition_context, + struct dc_state *base_context, + struct pipe_split_policy_backup *policy) +{ + restore_minimal_pipe_split_policy(dc, base_context, policy); + dc_state_release(minimal_transition_context); +} + +static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context) +{ + uint8_t i; + int j; + struct dc_stream_status *stream_status; + + for (i = 0; i < context->stream_count; i++) { + stream_status = &context->stream_status[i]; + + for (j = 0; j < stream_status->plane_count; j++) + stream_status->plane_states[j]->flip_immediate = false; + } +} + +static struct dc_state *create_minimal_transition_state(struct dc *dc, + struct dc_state *base_context, struct pipe_split_policy_backup *policy) +{ + struct dc_state *minimal_transition_context = NULL; minimal_transition_context = dc_state_create_copy(base_context); if (!minimal_transition_context) return NULL; + backup_and_set_minimal_pipe_split_policy(dc, base_context, policy); /* commit minimal state */ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) { - for (i = 0; i < minimal_transition_context->stream_count; i++) { - struct dc_stream_status *stream_status = &minimal_transition_context->stream_status[i]; - - for (j = 0; j < stream_status->plane_count; j++) { - struct dc_plane_state *plane_state = stream_status->plane_states[j]; - - /* force vsync flip when reconfiguring pipes to prevent underflow - * and corruption - */ - plane_state->flip_immediate = false; - } - } + /* prevent underflow and corruption when reconfiguring pipes */ + force_vsync_flip_in_minimal_transition_context(minimal_transition_context); } else { - /* this should never happen */ - release_minimal_transition_state(dc, minimal_transition_context, policy); + /* + * This should never happen, minimal transition state should + * always be validated first before adding pipe split features. + */ + release_minimal_transition_state(dc, minimal_transition_context, base_context, policy); BREAK_TO_DEBUGGER(); minimal_transition_context = NULL; } return minimal_transition_context; } +static bool is_pipe_topology_transition_seamless_with_intermediate_step( + struct dc *dc, + struct dc_state *initial_state, + struct dc_state *intermediate_state, + struct dc_state *final_state) +{ + return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state, + intermediate_state) && + dc->hwss.is_pipe_topology_transition_seamless(dc, + intermediate_state, final_state); +} + +static void swap_and_release_current_context(struct dc *dc, + struct dc_state *new_context, struct dc_stream_state *stream) +{ + + int i; + struct dc_state *old = dc->current_state; + struct pipe_ctx *pipe_ctx; + + /* Since memory free requires elevated IRQ, an interrupt + * request is generated by mem free. If this happens + * between freeing and reassigning the context, our vsync + * interrupt will call into dc and cause a memory + * corruption. Hence, we first reassign the context, + * then free the old context. + */ + dc->current_state = new_context; + dc_state_release(old); + + // clear any forced full updates + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe_ctx = &new_context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->plane_state && pipe_ctx->stream == stream) + pipe_ctx->plane_state->force_full_update = false; + } +} + +static int initialize_empty_surface_updates( + struct dc_stream_state *stream, + struct dc_surface_update *srf_updates) +{ + struct dc_stream_status *status = dc_stream_get_status(stream); + int i; + + if (!status) + return 0; + + for (i = 0; i < status->plane_count; i++) + srf_updates[i].surface = status->plane_states[i]; + + return status->plane_count; +} + +static bool commit_minimal_transition_based_on_new_context(struct dc *dc, + struct dc_state *new_context, + struct dc_stream_state *stream, + struct dc_surface_update *srf_updates, + int surface_count) +{ + bool success = false; + struct pipe_split_policy_backup policy; + struct dc_state *intermediate_context = + create_minimal_transition_state(dc, new_context, + &policy); + + if (intermediate_context) { + if (is_pipe_topology_transition_seamless_with_intermediate_step( + dc, + dc->current_state, + intermediate_context, + new_context)) { + DC_LOG_DC("commit minimal transition state: base = new state\n"); + commit_planes_for_stream(dc, srf_updates, + surface_count, stream, NULL, + UPDATE_TYPE_FULL, intermediate_context); + swap_and_release_current_context( + dc, intermediate_context, stream); + dc_state_retain(dc->current_state); + success = true; + } + release_minimal_transition_state( + dc, intermediate_context, new_context, &policy); + } + return success; +} + +static bool commit_minimal_transition_based_on_current_context(struct dc *dc, + struct dc_state *new_context, struct dc_stream_state *stream) +{ + bool success = false; + struct pipe_split_policy_backup policy; + struct dc_state *intermediate_context; + struct dc_state *old_current_state = dc->current_state; + struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0}; + int surface_count; + + /* + * Both current and new contexts share the same stream and plane state + * pointers. When new context is validated, stream and planes get + * populated with new updates such as new plane addresses. This makes + * the current context no longer valid because stream and planes are + * modified from the original. We backup current stream and plane states + * into scratch space whenever we are populating new context. So we can + * restore the original values back by calling the restore function now. + * This restores back the original stream and plane states associated + * with the current state. + */ + restore_planes_and_stream_state(&dc->scratch.current_state, stream); + dc_state_retain(old_current_state); + intermediate_context = create_minimal_transition_state(dc, + old_current_state, &policy); + + if (intermediate_context) { + if (is_pipe_topology_transition_seamless_with_intermediate_step( + dc, + dc->current_state, + intermediate_context, + new_context)) { + DC_LOG_DC("commit minimal transition state: base = current state\n"); + surface_count = initialize_empty_surface_updates( + stream, srf_updates); + commit_planes_for_stream(dc, srf_updates, + surface_count, stream, NULL, + UPDATE_TYPE_FULL, intermediate_context); + swap_and_release_current_context( + dc, intermediate_context, stream); + dc_state_retain(dc->current_state); + success = true; + } + release_minimal_transition_state(dc, intermediate_context, + old_current_state, &policy); + } + dc_state_release(old_current_state); + /* + * Restore stream and plane states back to the values associated with + * new context. + */ + restore_planes_and_stream_state(&dc->scratch.new_state, stream); + return success; +} /** - * commit_minimal_transition_state - Commit a minimal state based on current or new context + * commit_minimal_transition_state_in_dc_update - Commit a minimal state based + * on current or new context * * @dc: DC structure, used to get the current state - * @context: New context + * @new_context: New context * @stream: Stream getting the update for the flip + * @srf_updates: Surface updates + * @surface_count: Number of surfaces * - * The function takes in current state and new state and determine a minimal transition state - * as the intermediate step which could make the transition between current and new states - * seamless. If found, it will commit the minimal transition state and update current state to - * this minimal transition state and return true, if not, it will return false. + * The function takes in current state and new state and determine a minimal + * transition state as the intermediate step which could make the transition + * between current and new states seamless. If found, it will commit the minimal + * transition state and update current state to this minimal transition state + * and return true, if not, it will return false. * * Return: * Return True if the minimal transition succeeded, false otherwise */ -static bool commit_minimal_transition_state(struct dc *dc, - struct dc_state *context, - struct dc_stream_state *stream) -{ - bool success = false; - struct dc_state *minimal_transition_context; - struct pipe_split_policy_backup policy; - - /* commit based on new context */ - minimal_transition_context = create_minimal_transition_state(dc, - context, &policy); - if (minimal_transition_context) { - if (dc->hwss.is_pipe_topology_transition_seamless( - dc, dc->current_state, minimal_transition_context) && - dc->hwss.is_pipe_topology_transition_seamless( - dc, minimal_transition_context, context)) { - DC_LOG_DC("%s base = new state\n", __func__); - - success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK; - } - release_minimal_transition_state(dc, minimal_transition_context, &policy); - } - - if (!success) { - /* commit based on current context */ - restore_planes_and_stream_state(&dc->current_state->scratch, stream); - minimal_transition_context = create_minimal_transition_state(dc, - dc->current_state, &policy); - if (minimal_transition_context) { - if (dc->hwss.is_pipe_topology_transition_seamless( - dc, dc->current_state, minimal_transition_context) && - dc->hwss.is_pipe_topology_transition_seamless( - dc, minimal_transition_context, context)) { - DC_LOG_DC("%s base = current state\n", __func__); - success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK; - } - release_minimal_transition_state(dc, minimal_transition_context, &policy); - } - restore_planes_and_stream_state(&context->scratch, stream); - } - - ASSERT(success); +static bool commit_minimal_transition_state_in_dc_update(struct dc *dc, + struct dc_state *new_context, + struct dc_stream_state *stream, + struct dc_surface_update *srf_updates, + int surface_count) +{ + bool success = commit_minimal_transition_based_on_new_context( + dc, new_context, stream, srf_updates, + surface_count); + if (!success) + success = commit_minimal_transition_based_on_current_context(dc, + new_context, stream); + if (!success) + DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n"); return success; } /** - * commit_minimal_transition_state_legacy - Create a transition pipe split state + * commit_minimal_transition_state - Create a transition pipe split state * * @dc: Used to get the current state status * @transition_base_context: New transition state @@ -4204,7 +4438,7 @@ static bool commit_minimal_transition_state(struct dc *dc, * Return: * Return false if something is wrong in the transition state. */ -static bool commit_minimal_transition_state_legacy(struct dc *dc, +static bool commit_minimal_transition_state(struct dc *dc, struct dc_state *transition_base_context) { struct dc_state *transition_context; @@ -4265,12 +4499,14 @@ static bool commit_minimal_transition_state_legacy(struct dc *dc, dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" : "Unknown"); + dc_state_retain(transition_base_context); transition_context = create_minimal_transition_state(dc, transition_base_context, &policy); if (transition_context) { ret = dc_commit_state_no_check(dc, transition_context); - release_minimal_transition_state(dc, transition_context, &policy); + release_minimal_transition_state(dc, transition_context, transition_base_context, &policy); } + dc_state_release(transition_base_context); if (ret != DC_OK) { /* this should never happen */ @@ -4288,41 +4524,6 @@ static bool commit_minimal_transition_state_legacy(struct dc *dc, return true; } -/** - * update_seamless_boot_flags() - Helper function for updating seamless boot flags - * - * @dc: Current DC state - * @context: New DC state to be programmed - * @surface_count: Number of surfaces that have an updated - * @stream: Corresponding stream to be updated in the current flip - * - * Updating seamless boot flags do not need to be part of the commit sequence. This - * helper function will update the seamless boot flags on each flip (if required) - * outside of the HW commit sequence (fast or slow). - * - * Return: void - */ -static void update_seamless_boot_flags(struct dc *dc, - struct dc_state *context, - int surface_count, - struct dc_stream_state *stream) -{ - if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { - /* Optimize seamless boot flag keeps clocks and watermarks high until - * first flip. After first flip, optimization is required to lower - * bandwidth. Important to note that it is expected UEFI will - * only light up a single display on POST, therefore we only expect - * one stream with seamless boot flag set. - */ - if (stream->apply_seamless_boot_optimization) { - stream->apply_seamless_boot_optimization = false; - - if (get_seamless_boot_stream_count(context) == 0) - dc->optimized_required = true; - } - } -} - static void populate_fast_updates(struct dc_fast_update *fast_update, struct dc_surface_update *srf_updates, int surface_count, @@ -4442,131 +4643,17 @@ static bool fast_update_only(struct dc *dc, && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); } -bool dc_update_planes_and_stream(struct dc *dc, +static bool update_planes_and_stream_v1(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, - struct dc_stream_update *stream_update) + struct dc_stream_update *stream_update, + struct dc_state *state) { - struct dc_state *context; + const struct dc_stream_status *stream_status; enum surface_update_type update_type; - int i; - struct dc_fast_update fast_update[MAX_SURFACES] = {0}; - - /* In cases where MPO and split or ODM are used transitions can - * cause underflow. Apply stream configuration with minimal pipe - * split first to avoid unsupported transitions for active pipes. - */ - bool force_minimal_pipe_splitting = 0; - bool is_plane_addition = 0; - bool is_fast_update_only; - - dc_exit_ips_for_hw_access(dc); - - populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); - is_fast_update_only = fast_update_only(dc, fast_update, srf_updates, - surface_count, stream_update, stream); - force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( - dc, - stream, - srf_updates, - surface_count, - &is_plane_addition); - - /* on plane addition, minimal state is the current one */ - if (force_minimal_pipe_splitting && is_plane_addition && - !commit_minimal_transition_state_legacy(dc, dc->current_state)) - return false; - - if (!update_planes_and_stream_state( - dc, - srf_updates, - surface_count, - stream, - stream_update, - &update_type, - &context)) - return false; - - /* on plane removal, minimal state is the new one */ - if (force_minimal_pipe_splitting && !is_plane_addition) { - if (!commit_minimal_transition_state_legacy(dc, context)) { - dc_state_release(context); - return false; - } - update_type = UPDATE_TYPE_FULL; - } - - if (dc->hwss.is_pipe_topology_transition_seamless && - !dc->hwss.is_pipe_topology_transition_seamless( - dc, dc->current_state, context)) { - commit_minimal_transition_state(dc, - context, stream); - } - update_seamless_boot_flags(dc, context, surface_count, stream); - if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) { - commit_planes_for_stream_fast(dc, - srf_updates, - surface_count, - stream, - stream_update, - update_type, - context); - } else { - if (!stream_update && - dc->hwss.is_pipe_topology_transition_seamless && - !dc->hwss.is_pipe_topology_transition_seamless( - dc, dc->current_state, context)) { - DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n"); - BREAK_TO_DEBUGGER(); - } - commit_planes_for_stream( - dc, - srf_updates, - surface_count, - stream, - stream_update, - update_type, - context); - } - - if (dc->current_state != context) { - - /* Since memory free requires elevated IRQL, an interrupt - * request is generated by mem free. If this happens - * between freeing and reassigning the context, our vsync - * interrupt will call into dc and cause a memory - * corruption BSOD. Hence, we first reassign the context, - * then free the old context. - */ - - struct dc_state *old = dc->current_state; - - dc->current_state = context; - dc_state_release(old); - - // clear any forced full updates - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - if (pipe_ctx->plane_state && pipe_ctx->stream == stream) - pipe_ctx->plane_state->force_full_update = false; - } - } - return true; -} - -void dc_commit_updates_for_stream(struct dc *dc, - struct dc_surface_update *srf_updates, - int surface_count, - struct dc_stream_state *stream, - struct dc_stream_update *stream_update, - struct dc_state *state) -{ - const struct dc_stream_status *stream_status; - enum surface_update_type update_type; - struct dc_state *context; - struct dc_context *dc_ctx = dc->ctx; - int i, j; + struct dc_state *context; + struct dc_context *dc_ctx = dc->ctx; + int i, j; struct dc_fast_update fast_update[MAX_SURFACES] = {0}; dc_exit_ips_for_hw_access(dc); @@ -4578,35 +4665,13 @@ void dc_commit_updates_for_stream(struct dc *dc, update_type = dc_check_update_surfaces_for_stream( dc, srf_updates, surface_count, stream_update, stream_status); - /* TODO: Since change commit sequence can have a huge impact, - * we decided to only enable it for DCN3x. However, as soon as - * we get more confident about this change we'll need to enable - * the new sequence for all ASICs. - */ - if (dc->ctx->dce_version >= DCN_VERSION_3_2) { - /* - * Previous frame finished and HW is ready for optimization. - */ - if (update_type == UPDATE_TYPE_FAST) - dc_post_update_surfaces_to_stream(dc); - - dc_update_planes_and_stream(dc, srf_updates, - surface_count, stream, - stream_update); - return; - } - - if (update_type >= update_surface_trace_level) - update_surface_trace(dc, srf_updates, surface_count); - - if (update_type >= UPDATE_TYPE_FULL) { /* initialize scratch memory for building context */ context = dc_state_create_copy(state); if (context == NULL) { DC_ERROR("Failed to allocate new validate context!\n"); - return; + return false; } for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -4623,7 +4688,6 @@ void dc_commit_updates_for_stream(struct dc *dc, dc_post_update_surfaces_to_stream(dc); } - for (i = 0; i < surface_count; i++) { struct dc_plane_state *surface = srf_updates[i].surface; @@ -4648,13 +4712,12 @@ void dc_commit_updates_for_stream(struct dc *dc, if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { DC_ERROR("Mode validation failed for stream update!\n"); dc_state_release(context); - return; + return false; } } TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); - update_seamless_boot_flags(dc, context, surface_count, stream); if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && !dc->debug.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, @@ -4695,9 +4758,252 @@ void dc_commit_updates_for_stream(struct dc *dc, dc_post_update_surfaces_to_stream(dc); TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); } + return true; +} + +static bool update_planes_and_stream_v2(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update) +{ + struct dc_state *context; + enum surface_update_type update_type; + struct dc_fast_update fast_update[MAX_SURFACES] = {0}; - return; + /* In cases where MPO and split or ODM are used transitions can + * cause underflow. Apply stream configuration with minimal pipe + * split first to avoid unsupported transitions for active pipes. + */ + bool force_minimal_pipe_splitting = 0; + bool is_plane_addition = 0; + bool is_fast_update_only; + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); + is_fast_update_only = fast_update_only(dc, fast_update, srf_updates, + surface_count, stream_update, stream); + force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( + dc, + stream, + srf_updates, + surface_count, + &is_plane_addition); + + /* on plane addition, minimal state is the current one */ + if (force_minimal_pipe_splitting && is_plane_addition && + !commit_minimal_transition_state(dc, dc->current_state)) + return false; + + if (!update_planes_and_stream_state( + dc, + srf_updates, + surface_count, + stream, + stream_update, + &update_type, + &context)) + return false; + + /* on plane removal, minimal state is the new one */ + if (force_minimal_pipe_splitting && !is_plane_addition) { + if (!commit_minimal_transition_state(dc, context)) { + dc_state_release(context); + return false; + } + update_type = UPDATE_TYPE_FULL; + } + + if (dc->hwss.is_pipe_topology_transition_seamless && + !dc->hwss.is_pipe_topology_transition_seamless( + dc, dc->current_state, context)) + commit_minimal_transition_state_in_dc_update(dc, context, stream, + srf_updates, surface_count); + + if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) { + commit_planes_for_stream_fast(dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } else { + if (!stream_update && + dc->hwss.is_pipe_topology_transition_seamless && + !dc->hwss.is_pipe_topology_transition_seamless( + dc, dc->current_state, context)) { + DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n"); + BREAK_TO_DEBUGGER(); + } + commit_planes_for_stream( + dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } + if (dc->current_state != context) + swap_and_release_current_context(dc, context, stream); + return true; +} + +static void commit_planes_and_stream_update_on_current_context(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update, + enum surface_update_type update_type) +{ + struct dc_fast_update fast_update[MAX_SURFACES] = {0}; + + ASSERT(update_type < UPDATE_TYPE_FULL); + populate_fast_updates(fast_update, srf_updates, surface_count, + stream_update); + if (fast_update_only(dc, fast_update, srf_updates, surface_count, + stream_update, stream) && + !dc->debug.enable_legacy_fast_update) + commit_planes_for_stream_fast(dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + dc->current_state); + else + commit_planes_for_stream( + dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + dc->current_state); +} + +static void commit_planes_and_stream_update_with_new_context(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update, + enum surface_update_type update_type, + struct dc_state *new_context) +{ + ASSERT(update_type >= UPDATE_TYPE_FULL); + if (!dc->hwss.is_pipe_topology_transition_seamless(dc, + dc->current_state, new_context)) + /* + * It is required by the feature design that all pipe topologies + * using extra free pipes for power saving purposes such as + * dynamic ODM or SubVp shall only be enabled when it can be + * transitioned seamlessly to AND from its minimal transition + * state. A minimal transition state is defined as the same dc + * state but with all power saving features disabled. So it uses + * the minimum pipe topology. When we can't seamlessly + * transition from state A to state B, we will insert the + * minimal transition state A' or B' in between so seamless + * transition between A and B can be made possible. + */ + commit_minimal_transition_state_in_dc_update(dc, new_context, + stream, srf_updates, surface_count); + + commit_planes_for_stream( + dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + new_context); +} + +static bool update_planes_and_stream_v3(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update) +{ + struct dc_state *new_context; + enum surface_update_type update_type; + + /* + * When this function returns true and new_context is not equal to + * current state, the function allocates and validates a new dc state + * and assigns it to new_context. The function expects that the caller + * is responsible to free this memory when new_context is no longer + * used. We swap current with new context and free current instead. So + * new_context's memory will live until the next full update after it is + * replaced by a newer context. Refer to the use of + * swap_and_free_current_context below. + */ + if (!update_planes_and_stream_state(dc, srf_updates, surface_count, + stream, stream_update, &update_type, + &new_context)) + return false; + + if (new_context == dc->current_state) { + commit_planes_and_stream_update_on_current_context(dc, + srf_updates, surface_count, stream, + stream_update, update_type); + } else { + commit_planes_and_stream_update_with_new_context(dc, + srf_updates, surface_count, stream, + stream_update, update_type, new_context); + swap_and_release_current_context(dc, new_context, stream); + } + + return true; +} + +bool dc_update_planes_and_stream(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update) +{ + dc_exit_ips_for_hw_access(dc); + /* + * update planes and stream version 3 separates FULL and FAST updates + * to their own sequences. It aims to clean up frequent checks for + * update type resulting unnecessary branching in logic flow. It also + * adds a new commit minimal transition sequence, which detects the need + * for minimal transition based on the actual comparison of current and + * new states instead of "predicting" it based on per feature software + * policy.i.e could_mpcc_tree_change_for_active_pipes. The new commit + * minimal transition sequence is made universal to any power saving + * features that would use extra free pipes such as Dynamic ODM/MPC + * Combine, MPO or SubVp. Therefore there is no longer a need to + * specially handle compatibility problems with transitions among those + * features as they are now transparent to the new sequence. + */ + if (dc->ctx->dce_version > DCN_VERSION_3_51) + return update_planes_and_stream_v3(dc, srf_updates, + surface_count, stream, stream_update); + return update_planes_and_stream_v2(dc, srf_updates, + surface_count, stream, stream_update); +} + +void dc_commit_updates_for_stream(struct dc *dc, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update, + struct dc_state *state) +{ + dc_exit_ips_for_hw_access(dc); + /* TODO: Since change commit sequence can have a huge impact, + * we decided to only enable it for DCN3x. However, as soon as + * we get more confident about this change we'll need to enable + * the new sequence for all ASICs. + */ + if (dc->ctx->dce_version > DCN_VERSION_3_51) { + update_planes_and_stream_v3(dc, srf_updates, surface_count, + stream, stream_update); + return; + } + if (dc->ctx->dce_version >= DCN_VERSION_3_2) { + update_planes_and_stream_v2(dc, srf_updates, surface_count, + stream, stream_update); + return; + } + update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, + stream_update, state); } uint8_t dc_get_current_stream_count(struct dc *dc) @@ -4740,8 +5046,13 @@ void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) void dc_power_down_on_boot(struct dc *dc) { if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && - dc->hwss.power_down_on_boot) + dc->hwss.power_down_on_boot) { + + if (dc->caps.ips_support) + dc_exit_ips_for_hw_access(dc); + dc->hwss.power_down_on_boot(dc); + } } void dc_set_power_state( @@ -4879,11 +5190,15 @@ bool dc_set_replay_allow_active(struct dc *dc, bool active) return true; } -void dc_allow_idle_optimizations(struct dc *dc, bool allow) +void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name) { if (dc->debug.disable_idle_power_optimizations) return; + if (allow != dc->idle_optimizations_allowed) + DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__, + dc->idle_optimizations_allowed, allow, caller_name); + if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) return; @@ -4898,10 +5213,10 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow) dc->idle_optimizations_allowed = allow; } -void dc_exit_ips_for_hw_access(struct dc *dc) +void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name) { if (dc->caps.ips_support) - dc_allow_idle_optimizations(dc, false); + dc_allow_idle_optimizations_internal(dc, false, caller_name); } bool dc_dmub_is_ips_idle_state(struct dc *dc) @@ -5035,10 +5350,13 @@ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) } dc->clk_mgr->dc_mode_softmax_enabled = enable; } -bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, +bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, + unsigned int pitch, + unsigned int height, + enum surface_pixel_format format, struct dc_cursor_attributes *cursor_attr) { - if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr)) + if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr)) return true; return false; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 9c05b1a071..5c1d3017ae 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -392,10 +392,10 @@ void get_hdr_visual_confirm_color( switch (top_pipe_ctx->plane_res.scl_data.format) { case PIXEL_FORMAT_ARGB2101010: - if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) { + if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_PQ) { /* HDR10, ARGB2101010 - set border color to red */ color->color_r_cr = color_value; - } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { + } else if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) { /* FreeSync 2 ARGB2101010 - set border color to pink */ color->color_r_cr = color_value; color->color_b_cb = color_value; @@ -403,10 +403,10 @@ void get_hdr_visual_confirm_color( is_sdr = true; break; case PIXEL_FORMAT_FP16: - if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) { + if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_PQ) { /* HDR10, FP16 - set border color to blue */ color->color_b_cb = color_value; - } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { + } else if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) { /* FreeSync 2 HDR - set border color to green */ color->color_g_y = color_value; } else @@ -558,9 +558,10 @@ void hwss_build_fast_sequence(struct dc *dc, struct dc_dmub_cmd *dc_dmub_cmd, unsigned int dmub_cmd_count, struct block_sequence block_sequence[], - int *num_steps, + unsigned int *num_steps, struct pipe_ctx *pipe_ctx, - struct dc_stream_status *stream_status) + struct dc_stream_status *stream_status, + struct dc_state *context) { struct dc_plane_state *plane = pipe_ctx->plane_state; struct dc_stream_state *stream = pipe_ctx->stream; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index ab598e1f08..15819416a2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -340,7 +340,7 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, return res_pool; } -void dc_destroy_resource_pool(struct dc *dc) +void dc_destroy_resource_pool(struct dc *dc) { if (dc) { if (dc->res_pool) @@ -1457,6 +1457,9 @@ void resource_build_test_pattern_params(struct resource_context *res_ctx, controller_color_space = convert_dp_to_controller_color_space( otg_master->stream->test_pattern.color_space); + if (controller_test_pattern == CONTROLLER_DP_TEST_PATTERN_VIDEOMODE) + return; + odm_cnt = resource_get_opp_heads_for_otg_master(otg_master, res_ctx, opp_heads); odm_slice_width = h_active / odm_cnt; @@ -1485,6 +1488,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; const struct rect odm_slice_rec = calculate_odm_slice_in_timing_active(pipe_ctx); bool res = false; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); /* Invalid input */ @@ -1496,9 +1500,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) return false; } - pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( - pipe_ctx->plane_state->format); - /* Timing borders are part of vactive that we are also supposed to skip in addition * to any stream dst offset. Since dm logic assumes dst is in addressable * space we need to add the left and top borders to dst offsets temporarily. @@ -1510,6 +1511,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) /* Calculate H and V active size */ pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width; pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height; + pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( + pipe_ctx->plane_state->format); /* depends on h_active */ calculate_recout(pipe_ctx); @@ -1794,6 +1797,30 @@ int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx( return free_pipe_idx; } +int resource_find_free_pipe_used_as_cur_sec_dpp( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool) +{ + int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; + const struct pipe_ctx *new_pipe, *cur_pipe; + int i; + + for (i = 0; i < pool->pipe_count; i++) { + cur_pipe = &cur_res_ctx->pipe_ctx[i]; + new_pipe = &new_res_ctx->pipe_ctx[i]; + + if (resource_is_pipe_type(cur_pipe, DPP_PIPE) && + !resource_is_pipe_type(cur_pipe, OPP_HEAD) && + resource_is_pipe_type(new_pipe, FREE_PIPE)) { + free_pipe_idx = i; + break; + } + } + + return free_pipe_idx; +} + int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( const struct resource_context *cur_res_ctx, struct resource_context *new_res_ctx, @@ -2307,6 +2334,9 @@ static bool update_pipe_params_after_odm_slice_count_change( if (pool->funcs->build_pipe_pix_clk_params) pool->funcs->build_pipe_pix_clk_params(otg_master); + + resource_build_test_pattern_params(&context->res_ctx, otg_master); + return result; } @@ -2665,13 +2695,19 @@ bool resource_append_dpp_pipes_for_plane_composition( struct pipe_ctx *otg_master_pipe, struct dc_plane_state *plane_state) { + bool success; if (otg_master_pipe->plane_state == NULL) - return add_plane_to_opp_head_pipes(otg_master_pipe, + success = add_plane_to_opp_head_pipes(otg_master_pipe, plane_state, new_ctx); else - return acquire_secondary_dpp_pipes_and_add_plane( + success = acquire_secondary_dpp_pipes_and_add_plane( otg_master_pipe, plane_state, new_ctx, cur_ctx, pool); + if (success) + /* when appending a plane mpc slice count changes from 0 to 1 */ + success = update_pipe_params_after_mpc_slice_count_change( + plane_state, new_ctx, pool); + return success; } void resource_remove_dpp_pipes_for_plane_composition( @@ -3006,7 +3042,7 @@ bool resource_update_pipes_for_plane_with_slice_count( int i; int dpp_pipe_count; int cur_slice_count; - struct pipe_ctx *dpp_pipes[MAX_PIPES]; + struct pipe_ctx *dpp_pipes[MAX_PIPES] = {0}; bool result = true; dpp_pipe_count = resource_get_dpp_pipes_for_plane(plane, @@ -3415,11 +3451,31 @@ static bool acquire_otg_master_pipe_for_stream( * any free pipes already used in current context as this could tear * down exiting ODM/MPC/MPO configuration unnecessarily. */ + + /* + * Try to acquire the same OTG master already in use. This is not + * optimal because resetting an enabled OTG master pipe for a new stream + * requires an extra frame of wait. However there are test automation + * and eDP assumptions that rely on reusing the same OTG master pipe + * during mode change. We have to keep this logic as is for now. + */ pipe_idx = recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx( &cur_ctx->res_ctx, &new_ctx->res_ctx, pool); + /* + * Try to acquire a pipe not used in current resource context to avoid + * pipe swapping. + */ if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx( &cur_ctx->res_ctx, &new_ctx->res_ctx, pool); + /* + * If pipe swapping is unavoidable, try to acquire pipe used as + * secondary DPP pipe in current state as we prioritize to support more + * streams over supporting MPO planes. + */ + if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + pipe_idx = resource_find_free_pipe_used_as_cur_sec_dpp( + &cur_ctx->res_ctx, &new_ctx->res_ctx, pool); if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool); if (pipe_idx != FREE_PIPE_INDEX_NOT_FOUND) { @@ -4034,7 +4090,7 @@ static void set_avi_info_frame( } if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR && - stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { + stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) { hdmi_info.bits.EC0_EC2 = 0; hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709; } @@ -5036,3 +5092,39 @@ bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_st return false; } + +void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options) +{ + dml2_options->callbacks.dc = dc; + dml2_options->callbacks.build_scaling_params = &resource_build_scaling_params; + dml2_options->callbacks.build_test_pattern_params = &resource_build_test_pattern_params; + dml2_options->callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; + dml2_options->callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; + dml2_options->callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; + dml2_options->callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; + dml2_options->callbacks.get_mpc_slice_count = &resource_get_mpc_slice_count; + dml2_options->callbacks.get_odm_slice_index = &resource_get_odm_slice_index; + dml2_options->callbacks.get_odm_slice_count = &resource_get_odm_slice_count; + dml2_options->callbacks.get_opp_head = &resource_get_opp_head; + dml2_options->callbacks.get_otg_master_for_stream = &resource_get_otg_master_for_stream; + dml2_options->callbacks.get_opp_heads_for_otg_master = &resource_get_opp_heads_for_otg_master; + dml2_options->callbacks.get_dpp_pipes_for_plane = &resource_get_dpp_pipes_for_plane; + dml2_options->callbacks.get_stream_status = &dc_state_get_stream_status; + dml2_options->callbacks.get_stream_from_id = &dc_state_get_stream_from_id; + + dml2_options->svp_pstate.callbacks.dc = dc; + dml2_options->svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane; + dml2_options->svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream; + dml2_options->svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; + dml2_options->svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane; + dml2_options->svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane; + dml2_options->svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream; + dml2_options->svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream; + dml2_options->svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane; + dml2_options->svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream; + dml2_options->svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type; + dml2_options->svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type; + dml2_options->svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream; + dml2_options->svp_pstate.callbacks.remove_phantom_streams_and_planes = &dc_state_remove_phantom_streams_and_planes; + dml2_options->svp_pstate.callbacks.release_phantom_streams_and_planes = &dc_state_release_phantom_streams_and_planes; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c index 5f6392ae31..cd6570a1e2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c @@ -61,7 +61,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification /* For HPD/HPD RX, convert dpia port index into link index */ if (notify->type == DMUB_NOTIFICATION_HPD || notify->type == DMUB_NOTIFICATION_HPD_IRQ || - notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION || + notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION || notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) { notify->link_index = get_link_index_from_dpia_port_index(dc, notify->link_index); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 61986e5cb4..76bb05f4d6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -188,8 +188,11 @@ static void init_state(struct dc *dc, struct dc_state *state) } /* Public dc_state functions */ -struct dc_state *dc_state_create(struct dc *dc) +struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params) { +#ifdef CONFIG_DRM_AMD_DC_FP + struct dml2_configuration_options *dml2_opt = &dc->dml2_options; +#endif struct dc_state *state = kvzalloc(sizeof(struct dc_state), GFP_KERNEL); @@ -198,10 +201,16 @@ struct dc_state *dc_state_create(struct dc *dc) init_state(dc, state); dc_state_construct(dc, state); + state->power_source = params ? params->power_source : DC_POWER_SOURCE_AC; #ifdef CONFIG_DRM_AMD_DC_FP - if (dc->debug.using_dml2) - dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2); + if (dc->debug.using_dml2) { + dml2_opt->use_clock_dc_limits = false; + dml2_create(dc, dml2_opt, &state->bw_ctx.dml2); + + dml2_opt->use_clock_dc_limits = true; + dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source); + } #endif kref_init(&state->refcount); @@ -214,6 +223,7 @@ void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state) struct kref refcount = dst_state->refcount; #ifdef CONFIG_DRM_AMD_DC_FP struct dml2_context *dst_dml2 = dst_state->bw_ctx.dml2; + struct dml2_context *dst_dml2_dc_power_source = dst_state->bw_ctx.dml2_dc_power_source; #endif dc_state_copy_internal(dst_state, src_state); @@ -222,6 +232,10 @@ void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state) dst_state->bw_ctx.dml2 = dst_dml2; if (src_state->bw_ctx.dml2) dml2_copy(dst_state->bw_ctx.dml2, src_state->bw_ctx.dml2); + + dst_state->bw_ctx.dml2_dc_power_source = dst_dml2_dc_power_source; + if (src_state->bw_ctx.dml2_dc_power_source) + dml2_copy(dst_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source); #endif /* context refcount should not be overridden */ @@ -245,6 +259,12 @@ struct dc_state *dc_state_create_copy(struct dc_state *src_state) dc_state_release(new_state); return NULL; } + + if (src_state->bw_ctx.dml2_dc_power_source && + !dml2_create_copy(&new_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source)) { + dc_state_release(new_state); + return NULL; + } #endif kref_init(&new_state->refcount); @@ -310,7 +330,6 @@ void dc_state_destruct(struct dc_state *state) memset(state->dc_dmub_cmd, 0, sizeof(state->dc_dmub_cmd)); state->dmub_cmd_count = 0; memset(&state->perf_params, 0, sizeof(state->perf_params)); - memset(&state->scratch, 0, sizeof(state->scratch)); } void dc_state_retain(struct dc_state *state) @@ -327,6 +346,9 @@ static void dc_state_free(struct kref *kref) #ifdef CONFIG_DRM_AMD_DC_FP dml2_destroy(state->bw_ctx.dml2); state->bw_ctx.dml2 = 0; + + dml2_destroy(state->bw_ctx.dml2_dc_power_source); + state->bw_ctx.dml2_dc_power_source = 0; #endif kvfree(state); @@ -341,7 +363,7 @@ void dc_state_release(struct dc_state *state) * dc_state_add_stream() - Add a new dc_stream_state to a dc_state. */ enum dc_status dc_state_add_stream( - struct dc *dc, + const struct dc *dc, struct dc_state *state, struct dc_stream_state *stream) { @@ -370,7 +392,7 @@ enum dc_status dc_state_add_stream( * dc_state_remove_stream() - Remove a stream from a dc_state. */ enum dc_status dc_state_remove_stream( - struct dc *dc, + const struct dc *dc, struct dc_state *state, struct dc_stream_state *stream) { @@ -595,7 +617,7 @@ bool dc_state_add_all_planes_for_stream( */ struct dc_stream_status *dc_state_get_stream_status( struct dc_state *state, - struct dc_stream_state *stream) + const struct dc_stream_state *stream) { uint8_t i; @@ -689,7 +711,7 @@ void dc_state_release_phantom_stream(const struct dc *dc, dc_stream_release(phantom_stream); } -struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc, +struct dc_plane_state *dc_state_create_phantom_plane(const struct dc *dc, struct dc_state *state, struct dc_plane_state *main_plane) { @@ -725,7 +747,7 @@ void dc_state_release_phantom_plane(const struct dc *dc, } /* add phantom streams to context and generate correct meta inside dc_state */ -enum dc_status dc_state_add_phantom_stream(struct dc *dc, +enum dc_status dc_state_add_phantom_stream(const struct dc *dc, struct dc_state *state, struct dc_stream_state *phantom_stream, struct dc_stream_state *main_stream) @@ -751,7 +773,7 @@ enum dc_status dc_state_add_phantom_stream(struct dc *dc, return res; } -enum dc_status dc_state_remove_phantom_stream(struct dc *dc, +enum dc_status dc_state_remove_phantom_stream(const struct dc *dc, struct dc_state *state, struct dc_stream_state *phantom_stream) { @@ -845,7 +867,7 @@ bool dc_state_add_all_phantom_planes_for_stream( } bool dc_state_remove_phantom_streams_and_planes( - struct dc *dc, + const struct dc *dc, struct dc_state *state) { int i; @@ -867,7 +889,7 @@ bool dc_state_remove_phantom_streams_and_planes( } void dc_state_release_phantom_streams_and_planes( - struct dc *dc, + const struct dc *dc, struct dc_state *state) { int i; @@ -878,3 +900,19 @@ void dc_state_release_phantom_streams_and_planes( for (i = 0; i < state->phantom_plane_count; i++) dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]); } + +struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id) +{ + struct dc_stream_state *stream = NULL; + int i; + + for (i = 0; i < state->stream_count; i++) { + if (state->streams[i] && state->streams[i]->stream_id == id) { + stream = state->streams[i]; + break; + } + } + + return stream; +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 51a970fcb5..5c7e4884ca 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -116,12 +116,7 @@ bool dc_stream_construct(struct dc_stream_state *stream, update_stream_signal(stream, dc_sink_data); - stream->out_transfer_func = dc_create_transfer_func(); - if (stream->out_transfer_func == NULL) { - dc_sink_release(dc_sink_data); - return false; - } - stream->out_transfer_func->type = TF_TYPE_BYPASS; + stream->out_transfer_func.type = TF_TYPE_BYPASS; dc_stream_assign_stream_id(stream); @@ -131,10 +126,6 @@ bool dc_stream_construct(struct dc_stream_state *stream, void dc_stream_destruct(struct dc_stream_state *stream) { dc_sink_release(stream->sink); - if (stream->out_transfer_func != NULL) { - dc_transfer_func_release(stream->out_transfer_func); - stream->out_transfer_func = NULL; - } } void dc_stream_assign_stream_id(struct dc_stream_state *stream) @@ -201,9 +192,6 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) if (new_stream->sink) dc_sink_retain(new_stream->sink); - if (new_stream->out_transfer_func) - dc_transfer_func_retain(new_stream->out_transfer_func); - dc_stream_assign_stream_id(new_stream); /* If using dynamic encoder assignment, wait till stream committed to assign encoder. */ @@ -319,7 +307,7 @@ bool dc_stream_set_cursor_attributes( program_cursor_attributes(dc, stream, attributes); /* re-enable idle optimizations if necessary */ - if (reset_idle_optimizations) + if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle) dc_allow_idle_optimizations(dc, true); return true; @@ -394,7 +382,7 @@ bool dc_stream_set_cursor_position( program_cursor_position(dc, stream, position); /* re-enable idle optimizations if necessary */ - if (reset_idle_optimizations) + if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle) dc_allow_idle_optimizations(dc, true); return true; @@ -425,7 +413,7 @@ bool dc_stream_add_writeback(struct dc *dc, dc_exit_ips_for_hw_access(dc); - wb_info->dwb_params.out_transfer_func = stream->out_transfer_func; + wb_info->dwb_params.out_transfer_func = &stream->out_transfer_func; dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; dwb->dwb_is_drc = false; @@ -507,7 +495,7 @@ bool dc_stream_remove_writeback(struct dc *dc, struct dc_stream_state *stream, uint32_t dwb_pipe_inst) { - int i = 0, j = 0; + unsigned int i, j; if (stream == NULL) { dm_error("DC: dc_stream is NULL!\n"); return false; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index 19140fb657..ccbb15f163 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -41,25 +41,15 @@ void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_sta { plane_state->ctx = ctx; - plane_state->gamma_correction = dc_create_gamma(); - if (plane_state->gamma_correction != NULL) - plane_state->gamma_correction->is_identity = true; + plane_state->gamma_correction.is_identity = true; - plane_state->in_transfer_func = dc_create_transfer_func(); - if (plane_state->in_transfer_func != NULL) { - plane_state->in_transfer_func->type = TF_TYPE_BYPASS; - } - plane_state->in_shaper_func = dc_create_transfer_func(); - if (plane_state->in_shaper_func != NULL) { - plane_state->in_shaper_func->type = TF_TYPE_BYPASS; - } + plane_state->in_transfer_func.type = TF_TYPE_BYPASS; - plane_state->lut3d_func = dc_create_3dlut_func(); + plane_state->in_shaper_func.type = TF_TYPE_BYPASS; - plane_state->blend_tf = dc_create_transfer_func(); - if (plane_state->blend_tf != NULL) { - plane_state->blend_tf->type = TF_TYPE_BYPASS; - } + plane_state->lut3d_func.state.raw = 0; + + plane_state->blend_tf.type = TF_TYPE_BYPASS; plane_state->pre_multiplied_alpha = true; @@ -67,30 +57,27 @@ void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_sta void dc_plane_destruct(struct dc_plane_state *plane_state) { - if (plane_state->gamma_correction != NULL) { - dc_gamma_release(&plane_state->gamma_correction); - } - if (plane_state->in_transfer_func != NULL) { - dc_transfer_func_release( - plane_state->in_transfer_func); - plane_state->in_transfer_func = NULL; - } - if (plane_state->in_shaper_func != NULL) { - dc_transfer_func_release( - plane_state->in_shaper_func); - plane_state->in_shaper_func = NULL; - } - if (plane_state->lut3d_func != NULL) { - dc_3dlut_func_release( - plane_state->lut3d_func); - plane_state->lut3d_func = NULL; - } - if (plane_state->blend_tf != NULL) { - dc_transfer_func_release( - plane_state->blend_tf); - plane_state->blend_tf = NULL; + // no more pointers to free within dc_plane_state +} + + +/* dc_state is passed in separately since it may differ from the current dc state accessible from plane_state e.g. + * if the driver is doing an update from an old context to a new one and the caller wants the pipe mask for the new + * context rather than the existing one + */ +uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane_state *plane_state) +{ + uint8_t pipe_mask = 0; + int i; + + for (i = 0; i < plane_state->ctx->dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->plane_state == plane_state && pipe_ctx->plane_res.hubp) + pipe_mask |= 1 << pipe_ctx->plane_res.hubp->inst; } + return pipe_mask; } /******************************************************************************* @@ -103,7 +90,7 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state, /*register_flip_interrupt(surface);*/ } -struct dc_plane_state *dc_create_plane_state(struct dc *dc) +struct dc_plane_state *dc_create_plane_state(const struct dc *dc) { struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state), GFP_KERNEL); @@ -156,7 +143,8 @@ const struct dc_plane_status *dc_plane_get_status( if (pipe_ctx->plane_state != plane_state) continue; - pipe_ctx->plane_state->status.is_flip_pending = false; + if (pipe_ctx->plane_state) + pipe_ctx->plane_state->status.is_flip_pending = false; break; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index ee8453bf95..4362fca1f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -44,6 +44,8 @@ #include "dml2/dml2_wrapper.h" +#include "dmub/inc/dmub_cmd.h" + struct abm_save_restore; /* forward declaration */ @@ -51,7 +53,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.273" +#define DC_VER "3.2.281" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -219,6 +221,7 @@ struct dc_dmub_caps { bool mclk_sw; bool subvp_psr; bool gecc_enable; + uint8_t fams_ver; }; struct dc_caps { @@ -306,12 +309,12 @@ struct dc_dcc_setting { unsigned int max_compressed_blk_size; unsigned int max_uncompressed_blk_size; bool independent_64b_blks; - //These bitfields to be used starting with DCN + //These bitfields to be used starting with DCN 3.0 struct { - uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case) - uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN - uint32_t dcc_256_128_128 : 1; //available starting with DCN - uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case) + uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case) + uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0 + uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0 + uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case) } dcc_controls; }; @@ -435,6 +438,9 @@ struct dc_config { unsigned int disable_ips; unsigned int disable_ips_in_vpb; bool usb4_bw_alloc_support; + bool allow_0_dtb_clk; + bool use_assr_psp_message; + bool support_edp0_on_dp1; }; enum visual_confirm { @@ -693,6 +699,8 @@ enum pg_hw_pipe_resources { PG_MPCC, PG_OPP, PG_OPTC, + PG_DPSTREAM, + PG_HDMISTREAM, PG_HW_PIPE_RESOURCES_NUM_ELEMENT }; @@ -987,14 +995,17 @@ struct dc_debug_options { bool psp_disabled_wa; unsigned int ips2_eval_delay_us; unsigned int ips2_entry_delay_us; + bool optimize_ips_handshake; bool disable_dmub_reallow_idle; bool disable_timeout; bool disable_extblankadj; + bool enable_idle_reg_checks; unsigned int static_screen_wait_frames; bool force_chroma_subsampling_1tap; + bool disable_422_left_edge_pixel; + unsigned int force_cositing; }; -struct gpu_info_soc_bounding_box_v1_0; /* Generic structure that can be used to query properties of DC. More fields * can be added as required. @@ -1003,76 +1014,6 @@ struct dc_current_properties { unsigned int cursor_size_limit; }; -struct dc { - struct dc_debug_options debug; - struct dc_versions versions; - struct dc_caps caps; - struct dc_cap_funcs cap_funcs; - struct dc_config config; - struct dc_bounding_box_overrides bb_overrides; - struct dc_bug_wa work_arounds; - struct dc_context *ctx; - struct dc_phy_addr_space_config vm_pa_config; - - uint8_t link_count; - struct dc_link *links[MAX_PIPES * 2]; - struct link_service *link_srv; - - struct dc_state *current_state; - struct resource_pool *res_pool; - - struct clk_mgr *clk_mgr; - - /* Display Engine Clock levels */ - struct dm_pp_clock_levels sclk_lvls; - - /* Inputs into BW and WM calculations. */ - struct bw_calcs_dceip *bw_dceip; - struct bw_calcs_vbios *bw_vbios; - struct dcn_soc_bounding_box *dcn_soc; - struct dcn_ip_params *dcn_ip; - struct display_mode_lib dml; - - /* HW functions */ - struct hw_sequencer_funcs hwss; - struct dce_hwseq *hwseq; - - /* Require to optimize clocks and bandwidth for added/removed planes */ - bool optimized_required; - bool wm_optimized_required; - bool idle_optimizations_allowed; - bool enable_c20_dtm_b0; - - /* Require to maintain clocks and bandwidth for UEFI enabled HW */ - - /* FBC compressor */ - struct compressor *fbc_compressor; - - struct dc_debug_data debug_data; - struct dpcd_vendor_signature vendor_signature; - - const char *build_id; - struct vm_helper *vm_helper; - - uint32_t *dcn_reg_offsets; - uint32_t *nbio_reg_offsets; - uint32_t *clk_reg_offsets; - - /* Scratch memory */ - struct { - struct { - /* - * For matching clock_limits table in driver with table - * from PMFW. - */ - struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; - } update_bw_bounding_box; - } scratch; - - struct dml2_configuration_options dml2_options; - enum dc_acpi_cm_power_state power_state; -}; - enum frame_buffer_mode { FRAME_BUFFER_MODE_LOCAL_ONLY = 0, FRAME_BUFFER_MODE_ZFB_ONLY, @@ -1277,6 +1218,8 @@ union surface_update_flags { uint32_t raw; }; +#define DC_REMOVE_PLANE_POINTERS 1 + struct dc_plane_state { struct dc_plane_address address; struct dc_plane_flip_time time; @@ -1291,8 +1234,8 @@ struct dc_plane_state { struct dc_plane_dcc_param dcc; - struct dc_gamma *gamma_correction; - struct dc_transfer_func *in_transfer_func; + struct dc_gamma gamma_correction; + struct dc_transfer_func in_transfer_func; struct dc_bias_and_scale *bias_and_scale; struct dc_csc_transform input_csc_color_matrix; struct fixed31_32 coeff_reduction_factor; @@ -1304,9 +1247,9 @@ struct dc_plane_state { enum dc_color_space color_space; - struct dc_3dlut *lut3d_func; - struct dc_transfer_func *in_shaper_func; - struct dc_transfer_func *blend_tf; + struct dc_3dlut lut3d_func; + struct dc_transfer_func in_shaper_func; + struct dc_transfer_func blend_tf; struct dc_transfer_func *gamcor_tf; enum surface_pixel_format format; @@ -1342,6 +1285,7 @@ struct dc_plane_state { struct tg_color visual_confirm_color; bool is_statically_allocated; + enum chroma_cositing cositing; }; struct dc_plane_info { @@ -1360,6 +1304,97 @@ struct dc_plane_info { int global_alpha_value; bool input_csc_enabled; int layer_index; + enum chroma_cositing cositing; +}; + +#include "dc_stream.h" + +struct dc_scratch_space { + /* used to temporarily backup plane states of a stream during + * dc update. The reason is that plane states are overwritten + * with surface updates in dc update. Once they are overwritten + * current state is no longer valid. We want to temporarily + * store current value in plane states so we can still recover + * a valid current state during dc update. + */ + struct dc_plane_state plane_states[MAX_SURFACE_NUM]; + + struct dc_stream_state stream_state; +}; + +struct dc { + struct dc_debug_options debug; + struct dc_versions versions; + struct dc_caps caps; + struct dc_cap_funcs cap_funcs; + struct dc_config config; + struct dc_bounding_box_overrides bb_overrides; + struct dc_bug_wa work_arounds; + struct dc_context *ctx; + struct dc_phy_addr_space_config vm_pa_config; + + uint8_t link_count; + struct dc_link *links[MAX_LINKS]; + struct link_service *link_srv; + + struct dc_state *current_state; + struct resource_pool *res_pool; + + struct clk_mgr *clk_mgr; + + /* Display Engine Clock levels */ + struct dm_pp_clock_levels sclk_lvls; + + /* Inputs into BW and WM calculations. */ + struct bw_calcs_dceip *bw_dceip; + struct bw_calcs_vbios *bw_vbios; + struct dcn_soc_bounding_box *dcn_soc; + struct dcn_ip_params *dcn_ip; + struct display_mode_lib dml; + + /* HW functions */ + struct hw_sequencer_funcs hwss; + struct dce_hwseq *hwseq; + + /* Require to optimize clocks and bandwidth for added/removed planes */ + bool optimized_required; + bool wm_optimized_required; + bool idle_optimizations_allowed; + bool enable_c20_dtm_b0; + + /* Require to maintain clocks and bandwidth for UEFI enabled HW */ + + /* FBC compressor */ + struct compressor *fbc_compressor; + + struct dc_debug_data debug_data; + struct dpcd_vendor_signature vendor_signature; + + const char *build_id; + struct vm_helper *vm_helper; + + uint32_t *dcn_reg_offsets; + uint32_t *nbio_reg_offsets; + uint32_t *clk_reg_offsets; + + /* Scratch memory */ + struct { + struct { + /* + * For matching clock_limits table in driver with table + * from PMFW. + */ + struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; + } update_bw_bounding_box; + struct dc_scratch_space current_state; + struct dc_scratch_space new_state; + struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack + } scratch; + + struct dml2_configuration_options dml2_options; + struct dml2_configuration_options dml2_tmp; + enum dc_acpi_cm_power_state power_state; + }; struct dc_scaling_info { @@ -1476,10 +1511,15 @@ bool dc_acquire_release_mpc_3dlut( bool dc_resource_is_dsc_encoding_supported(const struct dc *dc); void get_audio_check(struct audio_info *aud_modes, struct audio_check *aud_chk); - -enum dc_status dc_commit_streams(struct dc *dc, - struct dc_stream_state *streams[], - uint8_t stream_count); +/* + * Set up streams and links associated to drive sinks + * The streams parameter is an absolute set of all active streams. + * + * After this call: + * Phy, Encoder, Timing Generator are programmed and enabled. + * New streams are enabled with blank stream; no memory read. + */ +enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params); struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc, @@ -2335,11 +2375,17 @@ bool dc_is_dmcu_initialized(struct dc *dc); enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping); void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); -bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, - struct dc_cursor_attributes *cursor_attr); +bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, + unsigned int pitch, + unsigned int height, + enum surface_pixel_format format, + struct dc_cursor_attributes *cursor_attr); + +#define dc_allow_idle_optimizations(dc, allow) dc_allow_idle_optimizations_internal(dc, allow, __func__) +#define dc_exit_ips_for_hw_access(dc) dc_exit_ips_for_hw_access_internal(dc, __func__) -void dc_allow_idle_optimizations(struct dc *dc, bool allow); -void dc_exit_ips_for_hw_access(struct dc *dc); +void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, const char *caller_name); +void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name); bool dc_dmub_is_ips_idle_state(struct dc *dc); /* set min and max memory clock to lowest and highest DPM level, respectively */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index a72e849ece..2293a92df3 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -23,6 +23,7 @@ * */ +#include "dm_services.h" #include "dc.h" #include "dc_dmub_srv.h" #include "../dmub/dmub_srv.h" @@ -34,6 +35,7 @@ #include "resource.h" #include "clk_mgr.h" #include "dc_state_priv.h" +#include "dc_plane_priv.h" #define CTX dc_dmub_srv->ctx #define DC_LOGGER CTX->logger @@ -198,6 +200,11 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, if (status != DMUB_STATUS_OK) { DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); + if (!dmub->debug.timeout_occured) { + dmub->debug.timeout_occured = true; + dmub->debug.timeout_cmd = *cmd_list; + dmub->debug.timestamp = dm_get_timestamp(dc_dmub_srv->ctx); + } dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); return false; } @@ -904,12 +911,15 @@ bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmu void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_diagnostic_data diag_data = {0}; + uint32_t i; if (!dc_dmub_srv || !dc_dmub_srv->dmub) { DC_LOG_ERROR("%s: invalid parameters.", __func__); return; } + DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__); + if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) { DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__); return; @@ -933,7 +943,8 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]); DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]); DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]); - DC_LOG_DEBUG(" pc : %08x", diag_data.pc); + for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++) + DC_LOG_DEBUG(" pc[%d] : %08x", i, diag_data.pc[i]); DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr); DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr); DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr); @@ -1199,8 +1210,23 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) return true; } +static int count_active_streams(const struct dc *dc) +{ + int i, count = 0; + + for (i = 0; i < dc->current_state->stream_count; ++i) { + struct dc_stream_state *stream = dc->current_state->streams[i]; + + if (stream && !stream->dpms_off) + count += 1; + } + + return count; +} + static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) { + volatile const struct dmub_shared_state_ips_fw *ips_fw; struct dc_dmub_srv *dc_dmub_srv; union dmub_rb_cmd cmd = {0}; @@ -1211,6 +1237,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) return; dc_dmub_srv = dc->ctx->dmub_srv; + ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; memset(&cmd, 0, sizeof(cmd)); cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT; @@ -1226,6 +1253,12 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; union dmub_shared_state_ips_driver_signals new_signals; + DC_LOG_IPS( + "%s wait idle (ips1_commit=%d ips2_commit=%d)", + __func__, + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); memset(&new_signals, 0, sizeof(new_signals)); @@ -1245,19 +1278,46 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) new_signals.bits.allow_pg = 1; new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; + } else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) { + /* TODO: Move this logic out to hwseq */ + if (count_active_streams(dc) == 0) { + /* IPS2 - Display off */ + new_signals.bits.allow_pg = 1; + new_signals.bits.allow_ips1 = 1; + new_signals.bits.allow_ips2 = 1; + new_signals.bits.allow_z10 = 1; + } else { + /* RCG only */ + new_signals.bits.allow_pg = 0; + new_signals.bits.allow_ips1 = 1; + new_signals.bits.allow_ips2 = 0; + new_signals.bits.allow_z10 = 0; + } } ips_driver->signals = new_signals; } + DC_LOG_IPS( + "%s send allow_idle=%d (ips1_commit=%d ips2_commit=%d)", + __func__, + allow_idle, + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + /* NOTE: This does not use the "wake" interface since this is part of the wake path. */ /* We also do not perform a wait since DMCUB could enter idle after the notification. */ dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT); + + /* Register access should stop at this point. */ + if (allow_idle) + dc_dmub_srv->needs_idle_wake = true; } static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) { struct dc_dmub_srv *dc_dmub_srv; + uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0; if (dc->debug.dmcub_emulation) return; @@ -1274,40 +1334,113 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals; + rcg_exit_count = ips_fw->rcg_exit_count; + ips1_exit_count = ips_fw->ips1_exit_count; + ips2_exit_count = ips_fw->ips2_exit_count; + ips_driver->signals.all = 0; - if (prev_driver_signals.bits.allow_ips2) { - udelay(dc->debug.ips2_eval_delay_us); + DC_LOG_IPS( + "%s (allow ips1=%d ips2=%d) (commit ips1=%d ips2=%d) (count rcg=%d ips1=%d ips2=%d)", + __func__, + ips_driver->signals.bits.allow_ips1, + ips_driver->signals.bits.allow_ips2, + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit, + ips_fw->rcg_entry_count, + ips_fw->ips1_entry_count, + ips_fw->ips2_entry_count); + + /* Note: register access has technically not resumed for DCN here, but we + * need to be message PMFW through our standard register interface. + */ + dc_dmub_srv->needs_idle_wake = false; + + if (prev_driver_signals.bits.allow_ips2 && + (!dc->debug.optimize_ips_handshake || + ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) { + DC_LOG_IPS( + "wait IPS2 eval (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + + if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit) + udelay(dc->debug.ips2_eval_delay_us); if (ips_fw->signals.bits.ips2_commit) { + DC_LOG_IPS( + "exit IPS2 #1 (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + // Tell PMFW to exit low power state dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); + DC_LOG_IPS( + "wait IPS2 entry delay (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + // Wait for IPS2 entry upper bound udelay(dc->debug.ips2_entry_delay_us); + DC_LOG_IPS( + "exit IPS2 #2 (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); + DC_LOG_IPS( + "wait IPS2 commit clear (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + while (ips_fw->signals.bits.ips2_commit) udelay(1); + DC_LOG_IPS( + "wait hw_pwr_up (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); + DC_LOG_IPS( + "resync inbox1 (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub); } } dc_dmub_srv_notify_idle(dc, false); if (prev_driver_signals.bits.allow_ips1) { + DC_LOG_IPS( + "wait for IPS1 commit clear (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + while (ips_fw->signals.bits.ips1_commit) udelay(1); + DC_LOG_IPS( + "wait for IPS1 commit clear done (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); } } if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); + + DC_LOG_IPS("%s exit (count rcg=%d ips1=%d ips2=%d)", + __func__, + rcg_exit_count, + ips1_exit_count, + ips2_exit_count); } void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState) @@ -1335,6 +1468,8 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_ if (dc_dmub_srv->idle_allowed == allow_idle) return; + DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle); + /* * Entering a low power state requires a driver notification. * Powering up the hardware requires notifying PMFW and DMCUB. @@ -1343,6 +1478,8 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_ */ if (!allow_idle) { + dc_dmub_srv->idle_exit_counter += 1; + dc_dmub_srv_exit_low_power_state(dc); /* * Idle is considered fully exited only after the sequence above @@ -1354,6 +1491,12 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_ * dm_execute_dmub_cmd submission instead of the "wake" helpers. */ dc_dmub_srv->idle_allowed = false; + + dc_dmub_srv->idle_exit_counter -= 1; + if (dc_dmub_srv->idle_exit_counter < 0) { + ASSERT(0); + dc_dmub_srv->idle_exit_counter = 0; + } } else { /* Consider idle as notified prior to the actual submission to * prevent multiple entries. */ @@ -1395,7 +1538,8 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in else result = dm_execute_dmub_cmd(ctx, cmd, wait_type); - if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle) + if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && + !ctx->dc->debug.disable_dmub_reallow_idle) dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); return result; @@ -1444,8 +1588,10 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type); - if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle) + if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && + !ctx->dc->debug.disable_dmub_reallow_idle) dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); return result; } + diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 952bfb3688..2c5866211f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -35,6 +35,7 @@ struct pipe_ctx; struct dc_crtc_timing_adjust; struct dc_crtc_timing; struct dc_state; +struct dc_surface_update; struct dc_reg_helper_state { bool gather_in_progress; @@ -51,7 +52,9 @@ struct dc_dmub_srv { struct dc_context *ctx; void *dm; + int32_t idle_exit_counter; bool idle_allowed; + bool needs_idle_wake; }; void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 1cb7765f59..519c3df78e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -137,8 +137,13 @@ enum dp_link_encoding { enum dp_test_link_rate { DP_TEST_LINK_RATE_RBR = 0x06, + DP_TEST_LINK_RATE_RATE_2 = 0x08, // Rate_2 - 2.16 Gbps/Lane + DP_TEST_LINK_RATE_RATE_3 = 0x09, // Rate_3 - 2.43 Gbps/Lane DP_TEST_LINK_RATE_HBR = 0x0A, + DP_TEST_LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2) - 3.24 Gbps/Lane + DP_TEST_LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane DP_TEST_LINK_RATE_HBR2 = 0x14, + DP_TEST_LINK_RATE_RATE_8 = 0x19, // Rate_8 - 6.75 Gbps/Lane DP_TEST_LINK_RATE_HBR3 = 0x1E, DP_TEST_LINK_RATE_UHBR10 = 0x01, DP_TEST_LINK_RATE_UHBR20 = 0x02, @@ -917,16 +922,6 @@ struct dpcd_usb4_dp_tunneling_info { uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN]; }; -#ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT -#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3 -#endif -#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_7_0 -#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230 -#endif -#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_263_256 -#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0X2250 -#endif - union dp_main_line_channel_coding_cap { struct { uint8_t DP_8b_10b_SUPPORTED :1; @@ -1232,8 +1227,7 @@ union replay_enable_and_configuration { unsigned char FREESYNC_PANEL_REPLAY_MODE :1; unsigned char TIMING_DESYNC_ERROR_VERIFICATION :1; unsigned char STATE_TRANSITION_ERROR_DETECTION :1; - unsigned char RESERVED0 :1; - unsigned char RESERVED1 :4; + unsigned char RESERVED :5; } bits; unsigned char raw; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index aae2f3a266..2ad7f60805 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -738,6 +738,13 @@ enum scanning_type { SCANNING_TYPE_UNDEFINED }; +enum chroma_cositing { + CHROMA_COSITING_NONE, + CHROMA_COSITING_LEFT, + CHROMA_COSITING_TOPLEFT, + CHROMA_COSITING_COUNT +}; + struct dc_crtc_timing_flags { uint32_t INTERLACE :1; uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1, @@ -974,6 +981,7 @@ struct dc_crtc_timing_adjust { uint32_t v_total_max; uint32_t v_total_mid; uint32_t v_total_mid_frame_num; + uint32_t allow_otg_v_count_halt; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h index ef380cae81..44afcd9892 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_plane.h +++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h @@ -29,7 +29,7 @@ #include "dc.h" #include "dc_hw_types.h" -struct dc_plane_state *dc_create_plane_state(struct dc *dc); +struct dc_plane_state *dc_create_plane_state(const struct dc *dc); const struct dc_plane_status *dc_plane_get_status( const struct dc_plane_state *plane_state); void dc_plane_state_retain(struct dc_plane_state *plane_state); diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h index 9ee184c1df..ab13335f1d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h @@ -30,5 +30,6 @@ void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state); void dc_plane_destruct(struct dc_plane_state *plane_state); +uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane_state *plane_state); #endif /* _DC_PLANE_PRIV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h index d167fdbfa8..caa45db502 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_state.h +++ b/drivers/gpu/drm/amd/display/dc/dc_state.h @@ -29,7 +29,7 @@ #include "dc.h" #include "inc/core_status.h" -struct dc_state *dc_state_create(struct dc *dc); +struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params); void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state); struct dc_state *dc_state_create_copy(struct dc_state *src_state); void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state); @@ -39,12 +39,12 @@ void dc_state_destruct(struct dc_state *state); void dc_state_retain(struct dc_state *state); void dc_state_release(struct dc_state *state); -enum dc_status dc_state_add_stream(struct dc *dc, +enum dc_status dc_state_add_stream(const struct dc *dc, struct dc_state *state, struct dc_stream_state *stream); enum dc_status dc_state_remove_stream( - struct dc *dc, + const struct dc *dc, struct dc_state *state, struct dc_stream_state *stream); @@ -74,5 +74,5 @@ bool dc_state_add_all_planes_for_stream( struct dc_stream_status *dc_state_get_stream_status( struct dc_state *state, - struct dc_stream_state *stream); + const struct dc_stream_state *stream); #endif /* _DC_STATE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h index c1f44e09a6..615086d74d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h @@ -29,6 +29,8 @@ #include "dc_state.h" #include "dc_stream.h" +struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id); + /* Get the type of the provided resource (none, phantom, main) based on the provided * context. If the context is unavailable, determine only if phantom or not. */ @@ -45,7 +47,7 @@ struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state * struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc, struct dc_state *state, struct dc_stream_state *main_stream); -struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc, +struct dc_plane_state *dc_state_create_phantom_plane(const struct dc *dc, struct dc_state *state, struct dc_plane_state *main_plane); @@ -58,11 +60,11 @@ void dc_state_release_phantom_plane(const struct dc *dc, struct dc_plane_state *phantom_plane); /* add/remove phantom stream to context and generate subvp meta data */ -enum dc_status dc_state_add_phantom_stream(struct dc *dc, +enum dc_status dc_state_add_phantom_stream(const struct dc *dc, struct dc_state *state, struct dc_stream_state *phantom_stream, struct dc_stream_state *main_stream); -enum dc_status dc_state_remove_phantom_stream(struct dc *dc, +enum dc_status dc_state_remove_phantom_stream(const struct dc *dc, struct dc_state *state, struct dc_stream_state *phantom_stream); @@ -92,11 +94,11 @@ bool dc_state_add_all_phantom_planes_for_stream( struct dc_state *state); bool dc_state_remove_phantom_streams_and_planes( - struct dc *dc, + const struct dc *dc, struct dc_state *state); void dc_state_release_phantom_streams_and_planes( - struct dc *dc, + const struct dc *dc, struct dc_state *state); #endif /* _DC_STATE_PRIV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index ee10941caa..e5dbbc6089 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -190,7 +190,7 @@ struct dc_stream_state { PHYSICAL_ADDRESS_LOC dmdata_address; bool use_dynamic_meta; - struct dc_transfer_func *out_transfer_func; + struct dc_transfer_func out_transfer_func; struct colorspace_transform gamut_remap_matrix; struct dc_csc_transform csc_color_matrix; @@ -427,14 +427,6 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream); -/* - * Set up streams and links associated to drive sinks - * The streams parameter is an absolute set of all active streams. - * - * After this call: - * Phy, Encoder, Timing Generator are programmed and enabled. - * New streams are enabled with blank stream; no memory read. - */ /* * Enable stereo when commit_streams is not required, * for example, frame alternate. diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index be2ac5c442..0f66d00ef8 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -422,7 +422,7 @@ struct dc_dwb_params { enum dwb_capture_rate capture_rate; /* controls the frame capture rate */ struct scaling_taps scaler_taps; /* Scaling taps */ enum dwb_subsample_position subsample_position; - struct dc_transfer_func *out_transfer_func; + const struct dc_transfer_func *out_transfer_func; }; /* audio*/ @@ -1050,6 +1050,8 @@ union replay_error_status { struct replay_config { /* Replay feature is supported */ bool replay_supported; + /* Replay caps support DPCD & EDID caps*/ + bool replay_cap_support; /* Power opt flags that are supported */ unsigned int replay_power_opt_supported; /* SMU optimization is supported */ @@ -1175,4 +1177,20 @@ enum mall_stream_type { SUBVP_MAIN, // subvp in use, this stream is main stream SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream }; + +enum dc_power_source_type { + DC_POWER_SOURCE_AC, // wall power + DC_POWER_SOURCE_DC, // battery power +}; + +struct dc_state_create_params { + enum dc_power_source_type power_source; +}; + +struct dc_commit_streams_params { + struct dc_stream_state **streams; + uint8_t stream_count; + enum dc_power_source_type power_source; +}; + #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index a2f48d46d1..ee601a6897 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -22,9 +22,6 @@ * Authors: AMD * */ - -#include - #include "resource.h" #include "dce_i2c.h" #include "dce_i2c_hw.h" @@ -315,9 +312,6 @@ static bool setup_engine( /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); - /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ - REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); - /*set SW requested I2c speed to default, if API calls in it will be override later*/ set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h index f98400efdd..e34e445a40 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h @@ -181,6 +181,7 @@ struct dce_mem_input_registers { SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\ SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\ SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\ + SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\ SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\ SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\ SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h index bf1ffc3629..3d9be87aae 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h @@ -111,6 +111,7 @@ enum dce110_opp_reg_type { OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\ OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\ OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c index 670d5ab9d9..2b1673d69e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c @@ -1408,7 +1408,7 @@ void dce110_opp_set_csc_default( static void program_pwl(struct dce_transform *xfm_dce, const struct pwl_params *params) { - int retval; + uint32_t retval; uint8_t max_tries = 10; uint8_t counter = 0; uint32_t i = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c index f9d6a18116..b851fc65f5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c @@ -34,11 +34,7 @@ #include "reg_helper.h" #include "fixed31_32.h" -#ifdef _WIN32 -#include "atombios.h" -#else #include "atom.h" -#endif #define TO_DMUB_ABM(abm)\ container_of(abm, struct dce_abm, base) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index b010814706..4f559a025c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -244,7 +244,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst, uint16_t param = (uint16_t)(panel_inst << 8); if (is_alpm) - param |= REPLAY_RESIDENCY_MODE_ALPM; + param |= REPLAY_RESIDENCY_FIELD_MODE_ALPM; if (is_start) param |= REPLAY_RESIDENCY_ENABLE; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index ae6a131be7..8dc7938c36 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -24,9 +24,9 @@ DCN10 = dcn10_ipp.o \ dcn10_hw_sequencer_debug.o \ - dcn10_dpp.o dcn10_opp.o \ + dcn10_opp.o \ dcn10_hubp.o dcn10_mpc.o \ - dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ + dcn10_cm_common.o \ dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index b0d192c6e6..0b49362f71 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -24,7 +24,7 @@ */ #include "dc.h" #include "reg_helper.h" -#include "dcn10_dpp.h" +#include "dcn10/dcn10_dpp.h" #include "dcn10_cm_common.h" #include "custom_float.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c deleted file mode 100644 index 4e391fd1d7..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ /dev/null @@ -1,585 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "core_types.h" - -#include "reg_helper.h" -#include "dcn10_dpp.h" -#include "basics/conversion.h" - -#define NUM_PHASES 64 -#define HORZ_MAX_TAPS 8 -#define VERT_MAX_TAPS 8 - -#define BLACK_OFFSET_RGB_Y 0x0 -#define BLACK_OFFSET_CBCR 0x8000 - -#define REG(reg)\ - dpp->tf_regs->reg - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - -enum pixel_format_description { - PIXEL_FORMAT_FIXED = 0, - PIXEL_FORMAT_FIXED16, - PIXEL_FORMAT_FLOAT - -}; - -enum dcn10_coef_filter_type_sel { - SCL_COEF_LUMA_VERT_FILTER = 0, - SCL_COEF_LUMA_HORZ_FILTER = 1, - SCL_COEF_CHROMA_VERT_FILTER = 2, - SCL_COEF_CHROMA_HORZ_FILTER = 3, - SCL_COEF_ALPHA_VERT_FILTER = 4, - SCL_COEF_ALPHA_HORZ_FILTER = 5 -}; - -enum dscl_autocal_mode { - AUTOCAL_MODE_OFF = 0, - - /* Autocal calculate the scaling ratio and initial phase and the - * DSCL_MODE_SEL must be set to 1 - */ - AUTOCAL_MODE_AUTOSCALE = 1, - /* Autocal perform auto centering without replication and the - * DSCL_MODE_SEL must be set to 0 - */ - AUTOCAL_MODE_AUTOCENTER = 2, - /* Autocal perform auto centering and auto replication and the - * DSCL_MODE_SEL must be set to 0 - */ - AUTOCAL_MODE_AUTOREPLICATE = 3 -}; - -enum dscl_mode_sel { - DSCL_MODE_SCALING_444_BYPASS = 0, - DSCL_MODE_SCALING_444_RGB_ENABLE = 1, - DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, - DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, - DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, - DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, - DSCL_MODE_DSCL_BYPASS = 6 -}; - -void dpp_read_state(struct dpp *dpp_base, - struct dcn_dpp_state *s) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_GET(DPP_CONTROL, - DPP_CLOCK_ENABLE, &s->is_enabled); - REG_GET(CM_IGAM_CONTROL, - CM_IGAM_LUT_MODE, &s->igam_lut_mode); - REG_GET(CM_IGAM_CONTROL, - CM_IGAM_INPUT_FORMAT, &s->igam_input_format); - REG_GET(CM_DGAM_CONTROL, - CM_DGAM_LUT_MODE, &s->dgam_lut_mode); - REG_GET(CM_RGAM_CONTROL, - CM_RGAM_LUT_MODE, &s->rgam_lut_mode); - REG_GET(CM_GAMUT_REMAP_CONTROL, - CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode); - - if (s->gamut_remap_mode) { - s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12); - s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14); - s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22); - s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24); - s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32); - s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34); - } -} - -#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) - -bool dpp1_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps) -{ - /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ - if (scl_data->format == PIXEL_FORMAT_FP16 && - dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && - scl_data->ratios.horz.value != dc_fixpt_one.value && - scl_data->ratios.vert.value != dc_fixpt_one.value) - return false; - - if (scl_data->viewport.width > scl_data->h_active && - dpp->ctx->dc->debug.max_downscale_src_width != 0 && - scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) - return false; - - /* TODO: add lb check */ - - /* No support for programming ratio of 4, drop to 3.99999.. */ - if (scl_data->ratios.horz.value == (4ll << 32)) - scl_data->ratios.horz.value--; - if (scl_data->ratios.vert.value == (4ll << 32)) - scl_data->ratios.vert.value--; - if (scl_data->ratios.horz_c.value == (4ll << 32)) - scl_data->ratios.horz_c.value--; - if (scl_data->ratios.vert_c.value == (4ll << 32)) - scl_data->ratios.vert_c.value--; - - /* Set default taps if none are provided */ - if (in_taps->h_taps == 0) - scl_data->taps.h_taps = 4; - else - scl_data->taps.h_taps = in_taps->h_taps; - if (in_taps->v_taps == 0) - scl_data->taps.v_taps = 4; - else - scl_data->taps.v_taps = in_taps->v_taps; - if (in_taps->v_taps_c == 0) - scl_data->taps.v_taps_c = 2; - else - scl_data->taps.v_taps_c = in_taps->v_taps_c; - if (in_taps->h_taps_c == 0) - scl_data->taps.h_taps_c = 2; - /* Only 1 and even h_taps_c are supported by hw */ - else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) - scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; - else - scl_data->taps.h_taps_c = in_taps->h_taps_c; - - if (!dpp->ctx->dc->debug.always_scale) { - if (IDENTITY_RATIO(scl_data->ratios.horz)) - scl_data->taps.h_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert)) - scl_data->taps.v_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.horz_c)) - scl_data->taps.h_taps_c = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert_c)) - scl_data->taps.v_taps_c = 1; - } - - return true; -} - -void dpp_reset(struct dpp *dpp_base) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - dpp->filter_h_c = NULL; - dpp->filter_v_c = NULL; - dpp->filter_h = NULL; - dpp->filter_v = NULL; - - memset(&dpp->scl_data, 0, sizeof(dpp->scl_data)); - memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data)); -} - - - -static void dpp1_cm_set_regamma_pwl( - struct dpp *dpp_base, const struct pwl_params *params, enum opp_regamma mode) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - uint32_t re_mode = 0; - - switch (mode) { - case OPP_REGAMMA_BYPASS: - re_mode = 0; - break; - case OPP_REGAMMA_SRGB: - re_mode = 1; - break; - case OPP_REGAMMA_XVYCC: - re_mode = 2; - break; - case OPP_REGAMMA_USER: - re_mode = dpp->is_write_to_ram_a_safe ? 4 : 3; - if (memcmp(&dpp->pwl_data, params, sizeof(*params)) == 0) - break; - - dpp1_cm_power_on_regamma_lut(dpp_base, true); - dpp1_cm_configure_regamma_lut(dpp_base, dpp->is_write_to_ram_a_safe); - - if (dpp->is_write_to_ram_a_safe) - dpp1_cm_program_regamma_luta_settings(dpp_base, params); - else - dpp1_cm_program_regamma_lutb_settings(dpp_base, params); - - dpp1_cm_program_regamma_lut(dpp_base, params->rgb_resulted, - params->hw_points_num); - dpp->pwl_data = *params; - - re_mode = dpp->is_write_to_ram_a_safe ? 3 : 4; - dpp->is_write_to_ram_a_safe = !dpp->is_write_to_ram_a_safe; - break; - default: - break; - } - REG_SET(CM_RGAM_CONTROL, 0, CM_RGAM_LUT_MODE, re_mode); -} - -static void dpp1_setup_format_flags(enum surface_pixel_format input_format,\ - enum pixel_format_description *fmt) -{ - - if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F || - input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) - *fmt = PIXEL_FORMAT_FLOAT; - else if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 || - input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616) - *fmt = PIXEL_FORMAT_FIXED16; - else - *fmt = PIXEL_FORMAT_FIXED; -} - -static void dpp1_set_degamma_format_float( - struct dpp *dpp_base, - bool is_float) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - if (is_float) { - REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 3); - REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 1); - } else { - REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 2); - REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 0); - } -} - -void dpp1_cnv_setup ( - struct dpp *dpp_base, - enum surface_pixel_format format, - enum expansion_mode mode, - struct dc_csc_transform input_csc_color_matrix, - enum dc_color_space input_color_space, - struct cnv_alpha_2bit_lut *alpha_2bit_lut) -{ - uint32_t pixel_format; - uint32_t alpha_en; - enum pixel_format_description fmt ; - enum dc_color_space color_space; - enum dcn10_input_csc_select select; - bool is_float; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - bool force_disable_cursor = false; - struct out_csc_color_matrix tbl_entry; - int i = 0; - - dpp1_setup_format_flags(format, &fmt); - alpha_en = 1; - pixel_format = 0; - color_space = COLOR_SPACE_SRGB; - select = INPUT_CSC_SELECT_BYPASS; - is_float = false; - - switch (fmt) { - case PIXEL_FORMAT_FIXED: - case PIXEL_FORMAT_FIXED16: - /*when output is float then FORMAT_CONTROL__OUTPUT_FP=1*/ - REG_SET_3(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_EXPANSION_MODE, mode, - OUTPUT_FP, 0); - break; - case PIXEL_FORMAT_FLOAT: - REG_SET_3(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_EXPANSION_MODE, mode, - OUTPUT_FP, 1); - is_float = true; - break; - default: - - break; - } - - dpp1_set_degamma_format_float(dpp_base, is_float); - - switch (format) { - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - pixel_format = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - pixel_format = 3; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: - pixel_format = 8; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: - pixel_format = 10; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: - force_disable_cursor = false; - pixel_format = 65; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: - force_disable_cursor = true; - pixel_format = 64; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: - force_disable_cursor = true; - pixel_format = 67; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: - force_disable_cursor = true; - pixel_format = 66; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: - pixel_format = 26; /* ARGB16161616_UNORM */ - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: - pixel_format = 24; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - pixel_format = 25; - break; - default: - break; - } - - /* Set default color space based on format if none is given. */ - color_space = input_color_space ? input_color_space : color_space; - - REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, - CNVC_SURFACE_PIXEL_FORMAT, pixel_format); - REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); - - // if input adjustments exist, program icsc with those values - - if (input_csc_color_matrix.enable_adjustment - == true) { - for (i = 0; i < 12; i++) - tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; - - tbl_entry.color_space = color_space; - - if (color_space >= COLOR_SPACE_YCBCR601) - select = INPUT_CSC_SELECT_ICSC; - else - select = INPUT_CSC_SELECT_BYPASS; - - dpp1_program_input_csc(dpp_base, color_space, select, &tbl_entry); - } else - dpp1_program_input_csc(dpp_base, color_space, select, NULL); - - if (force_disable_cursor) { - REG_UPDATE(CURSOR_CONTROL, - CURSOR_ENABLE, 0); - REG_UPDATE(CURSOR0_CONTROL, - CUR0_ENABLE, 0); - } -} - -void dpp1_set_cursor_attributes( - struct dpp *dpp_base, - struct dc_cursor_attributes *cursor_attributes) -{ - enum dc_cursor_color_format color_format = cursor_attributes->color_format; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_UPDATE_2(CURSOR0_CONTROL, - CUR0_MODE, color_format, - CUR0_EXPANSION_MODE, 0); - - if (color_format == CURSOR_MODE_MONO) { - /* todo: clarify what to program these to */ - REG_UPDATE(CURSOR0_COLOR0, - CUR0_COLOR0, 0x00000000); - REG_UPDATE(CURSOR0_COLOR1, - CUR0_COLOR1, 0xFFFFFFFF); - } -} - - -void dpp1_set_cursor_position( - struct dpp *dpp_base, - const struct dc_cursor_position *pos, - const struct dc_cursor_mi_param *param, - uint32_t width, - uint32_t height) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - int x_pos = pos->x - param->viewport.x; - int y_pos = pos->y - param->viewport.y; - int x_hotspot = pos->x_hotspot; - int y_hotspot = pos->y_hotspot; - int src_x_offset = x_pos - pos->x_hotspot; - int src_y_offset = y_pos - pos->y_hotspot; - int cursor_height = (int)height; - int cursor_width = (int)width; - uint32_t cur_en = pos->enable ? 1 : 0; - - // Transform cursor width / height and hotspots for offset calculations - if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) { - swap(cursor_height, cursor_width); - swap(x_hotspot, y_hotspot); - - if (param->rotation == ROTATION_ANGLE_90) { - // hotspot = (-y, x) - src_x_offset = x_pos - (cursor_width - x_hotspot); - src_y_offset = y_pos - y_hotspot; - } else if (param->rotation == ROTATION_ANGLE_270) { - // hotspot = (y, -x) - src_x_offset = x_pos - x_hotspot; - src_y_offset = y_pos - (cursor_height - y_hotspot); - } - } else if (param->rotation == ROTATION_ANGLE_180) { - // hotspot = (-x, -y) - if (!param->mirror) - src_x_offset = x_pos - (cursor_width - x_hotspot); - - src_y_offset = y_pos - (cursor_height - y_hotspot); - } - - if (src_x_offset >= (int)param->viewport.width) - cur_en = 0; /* not visible beyond right edge*/ - - if (src_x_offset + cursor_width <= 0) - cur_en = 0; /* not visible beyond left edge*/ - - if (src_y_offset >= (int)param->viewport.height) - cur_en = 0; /* not visible beyond bottom edge*/ - - if (src_y_offset + cursor_height <= 0) - cur_en = 0; /* not visible beyond top edge*/ - - REG_UPDATE(CURSOR0_CONTROL, - CUR0_ENABLE, cur_en); - - dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; -} - -void dpp1_cnv_set_optional_cursor_attributes( - struct dpp *dpp_base, - struct dpp_cursor_attributes *attr) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - if (attr) { - REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias); - REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale); - } -} - -void dpp1_dppclk_control( - struct dpp *dpp_base, - bool dppclk_div, - bool enable) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - if (enable) { - if (dpp->tf_mask->DPPCLK_RATE_CONTROL) - REG_UPDATE_2(DPP_CONTROL, - DPPCLK_RATE_CONTROL, dppclk_div, - DPP_CLOCK_ENABLE, 1); - else - REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 1); - } else - REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0); -} - -static const struct dpp_funcs dcn10_dpp_funcs = { - .dpp_read_state = dpp_read_state, - .dpp_reset = dpp_reset, - .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, - .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, - .dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment, - .dpp_set_csc_default = dpp1_cm_set_output_csc_default, - .dpp_power_on_regamma_lut = dpp1_cm_power_on_regamma_lut, - .dpp_program_regamma_lut = dpp1_cm_program_regamma_lut, - .dpp_configure_regamma_lut = dpp1_cm_configure_regamma_lut, - .dpp_program_regamma_lutb_settings = dpp1_cm_program_regamma_lutb_settings, - .dpp_program_regamma_luta_settings = dpp1_cm_program_regamma_luta_settings, - .dpp_program_regamma_pwl = dpp1_cm_set_regamma_pwl, - .dpp_program_bias_and_scale = dpp1_program_bias_and_scale, - .dpp_set_degamma = dpp1_set_degamma, - .dpp_program_input_lut = dpp1_program_input_lut, - .dpp_program_degamma_pwl = dpp1_set_degamma_pwl, - .dpp_setup = dpp1_cnv_setup, - .dpp_full_bypass = dpp1_full_bypass, - .set_cursor_attributes = dpp1_set_cursor_attributes, - .set_cursor_position = dpp1_set_cursor_position, - .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, - .dpp_dppclk_control = dpp1_dppclk_control, - .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier, - .dpp_program_blnd_lut = NULL, - .dpp_program_shaper_lut = NULL, - .dpp_program_3dlut = NULL, - .dpp_get_gamut_remap = dpp1_cm_get_gamut_remap, -}; - -static struct dpp_caps dcn10_dpp_cap = { - .dscl_data_proc_format = DSCL_DATA_PRCESSING_FIXED_FORMAT, - .dscl_calc_lb_num_partitions = dpp1_dscl_calc_lb_num_partitions, -}; - -/*****************************************/ -/* Constructor, Destructor */ -/*****************************************/ - -void dpp1_construct( - struct dcn10_dpp *dpp, - struct dc_context *ctx, - uint32_t inst, - const struct dcn_dpp_registers *tf_regs, - const struct dcn_dpp_shift *tf_shift, - const struct dcn_dpp_mask *tf_mask) -{ - dpp->base.ctx = ctx; - - dpp->base.inst = inst; - dpp->base.funcs = &dcn10_dpp_funcs; - dpp->base.caps = &dcn10_dpp_cap; - - dpp->tf_regs = tf_regs; - dpp->tf_shift = tf_shift; - dpp->tf_mask = tf_mask; - - dpp->lb_pixel_depth_supported = - LB_PIXEL_DEPTH_18BPP | - LB_PIXEL_DEPTH_24BPP | - LB_PIXEL_DEPTH_30BPP | - LB_PIXEL_DEPTH_36BPP; - - dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY; - dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/ -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h deleted file mode 100644 index a039eedc7c..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ /dev/null @@ -1,1527 +0,0 @@ -/* Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_DPP_DCN10_H__ -#define __DAL_DPP_DCN10_H__ - -#include "dpp.h" - -#define TO_DCN10_DPP(dpp)\ - container_of(dpp, struct dcn10_dpp, base) - -/* TODO: Use correct number of taps. Using polaris values for now */ -#define LB_TOTAL_NUMBER_OF_ENTRIES 5124 -#define LB_BITS_PER_ENTRY 144 - -#define TF_SF(reg_name, field_name, post_fix)\ - .field_name = reg_name ## __ ## field_name ## post_fix - -//Used to resolve corner case -#define TF2_SF(reg_name, field_name, post_fix)\ - .field_name = reg_name ## _ ## field_name ## post_fix - -#define TF_REG_LIST_DCN(id) \ - SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\ - SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\ - SRI(CM_GAMUT_REMAP_C13_C14, CM, id),\ - SRI(CM_GAMUT_REMAP_C21_C22, CM, id),\ - SRI(CM_GAMUT_REMAP_C23_C24, CM, id),\ - SRI(CM_GAMUT_REMAP_C31_C32, CM, id),\ - SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\ - SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \ - SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \ - SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \ - SRI(DSCL_MEM_PWR_CTRL, DSCL, id), \ - SRI(OTG_H_BLANK, DSCL, id), \ - SRI(OTG_V_BLANK, DSCL, id), \ - SRI(SCL_MODE, DSCL, id), \ - SRI(LB_DATA_FORMAT, DSCL, id), \ - SRI(LB_MEMORY_CTRL, DSCL, id), \ - SRI(DSCL_AUTOCAL, DSCL, id), \ - SRI(DSCL_CONTROL, DSCL, id), \ - SRI(SCL_BLACK_OFFSET, DSCL, id), \ - SRI(SCL_TAP_CONTROL, DSCL, id), \ - SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \ - SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \ - SRI(DSCL_2TAP_CONTROL, DSCL, id), \ - SRI(MPC_SIZE, DSCL, id), \ - SRI(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \ - SRI(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \ - SRI(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \ - SRI(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \ - SRI(SCL_HORZ_FILTER_INIT, DSCL, id), \ - SRI(SCL_HORZ_FILTER_INIT_C, DSCL, id), \ - SRI(SCL_VERT_FILTER_INIT, DSCL, id), \ - SRI(SCL_VERT_FILTER_INIT_BOT, DSCL, id), \ - SRI(SCL_VERT_FILTER_INIT_C, DSCL, id), \ - SRI(SCL_VERT_FILTER_INIT_BOT_C, DSCL, id), \ - SRI(RECOUT_START, DSCL, id), \ - SRI(RECOUT_SIZE, DSCL, id), \ - SRI(CM_ICSC_CONTROL, CM, id), \ - SRI(CM_ICSC_C11_C12, CM, id), \ - SRI(CM_ICSC_C33_C34, CM, id), \ - SRI(CM_DGAM_RAMB_START_CNTL_B, CM, id), \ - SRI(CM_DGAM_RAMB_START_CNTL_G, CM, id), \ - SRI(CM_DGAM_RAMB_START_CNTL_R, CM, id), \ - SRI(CM_DGAM_RAMB_SLOPE_CNTL_B, CM, id), \ - SRI(CM_DGAM_RAMB_SLOPE_CNTL_G, CM, id), \ - SRI(CM_DGAM_RAMB_SLOPE_CNTL_R, CM, id), \ - SRI(CM_DGAM_RAMB_END_CNTL1_B, CM, id), \ - SRI(CM_DGAM_RAMB_END_CNTL2_B, CM, id), \ - SRI(CM_DGAM_RAMB_END_CNTL1_G, CM, id), \ - SRI(CM_DGAM_RAMB_END_CNTL2_G, CM, id), \ - SRI(CM_DGAM_RAMB_END_CNTL1_R, CM, id), \ - SRI(CM_DGAM_RAMB_END_CNTL2_R, CM, id), \ - SRI(CM_DGAM_RAMB_REGION_0_1, CM, id), \ - SRI(CM_DGAM_RAMB_REGION_14_15, CM, id), \ - SRI(CM_DGAM_RAMA_START_CNTL_B, CM, id), \ - SRI(CM_DGAM_RAMA_START_CNTL_G, CM, id), \ - SRI(CM_DGAM_RAMA_START_CNTL_R, CM, id), \ - SRI(CM_DGAM_RAMA_SLOPE_CNTL_B, CM, id), \ - SRI(CM_DGAM_RAMA_SLOPE_CNTL_G, CM, id), \ - SRI(CM_DGAM_RAMA_SLOPE_CNTL_R, CM, id), \ - SRI(CM_DGAM_RAMA_END_CNTL1_B, CM, id), \ - SRI(CM_DGAM_RAMA_END_CNTL2_B, CM, id), \ - SRI(CM_DGAM_RAMA_END_CNTL1_G, CM, id), \ - SRI(CM_DGAM_RAMA_END_CNTL2_G, CM, id), \ - SRI(CM_DGAM_RAMA_END_CNTL1_R, CM, id), \ - SRI(CM_DGAM_RAMA_END_CNTL2_R, CM, id), \ - SRI(CM_DGAM_RAMA_REGION_0_1, CM, id), \ - SRI(CM_DGAM_RAMA_REGION_14_15, CM, id), \ - SRI(CM_MEM_PWR_CTRL, CM, id), \ - SRI(CM_DGAM_LUT_WRITE_EN_MASK, CM, id), \ - SRI(CM_DGAM_LUT_INDEX, CM, id), \ - SRI(CM_DGAM_LUT_DATA, CM, id), \ - SRI(CM_CONTROL, CM, id), \ - SRI(CM_DGAM_CONTROL, CM, id), \ - SRI(CM_TEST_DEBUG_INDEX, CM, id), \ - SRI(CM_TEST_DEBUG_DATA, CM, id), \ - SRI(FORMAT_CONTROL, CNVC_CFG, id), \ - SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ - SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ - SRI(CURSOR0_COLOR0, CNVC_CUR, id), \ - SRI(CURSOR0_COLOR1, CNVC_CUR, id), \ - SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \ - SRI(DPP_CONTROL, DPP_TOP, id), \ - SRI(CM_HDR_MULT_COEF, CM, id) - - - -#define TF_REG_LIST_DCN10(id) \ - TF_REG_LIST_DCN(id), \ - SRI(CM_COMA_C11_C12, CM, id),\ - SRI(CM_COMA_C33_C34, CM, id),\ - SRI(CM_COMB_C11_C12, CM, id),\ - SRI(CM_COMB_C33_C34, CM, id),\ - SRI(CM_OCSC_CONTROL, CM, id), \ - SRI(CM_OCSC_C11_C12, CM, id), \ - SRI(CM_OCSC_C33_C34, CM, id), \ - SRI(CM_BNS_VALUES_R, CM, id), \ - SRI(CM_BNS_VALUES_G, CM, id), \ - SRI(CM_BNS_VALUES_B, CM, id), \ - SRI(CM_MEM_PWR_CTRL, CM, id), \ - SRI(CM_RGAM_LUT_DATA, CM, id), \ - SRI(CM_RGAM_LUT_WRITE_EN_MASK, CM, id),\ - SRI(CM_RGAM_LUT_INDEX, CM, id), \ - SRI(CM_RGAM_RAMB_START_CNTL_B, CM, id), \ - SRI(CM_RGAM_RAMB_START_CNTL_G, CM, id), \ - SRI(CM_RGAM_RAMB_START_CNTL_R, CM, id), \ - SRI(CM_RGAM_RAMB_SLOPE_CNTL_B, CM, id), \ - SRI(CM_RGAM_RAMB_SLOPE_CNTL_G, CM, id), \ - SRI(CM_RGAM_RAMB_SLOPE_CNTL_R, CM, id), \ - SRI(CM_RGAM_RAMB_END_CNTL1_B, CM, id), \ - SRI(CM_RGAM_RAMB_END_CNTL2_B, CM, id), \ - SRI(CM_RGAM_RAMB_END_CNTL1_G, CM, id), \ - SRI(CM_RGAM_RAMB_END_CNTL2_G, CM, id), \ - SRI(CM_RGAM_RAMB_END_CNTL1_R, CM, id), \ - SRI(CM_RGAM_RAMB_END_CNTL2_R, CM, id), \ - SRI(CM_RGAM_RAMB_REGION_0_1, CM, id), \ - SRI(CM_RGAM_RAMB_REGION_32_33, CM, id), \ - SRI(CM_RGAM_RAMA_START_CNTL_B, CM, id), \ - SRI(CM_RGAM_RAMA_START_CNTL_G, CM, id), \ - SRI(CM_RGAM_RAMA_START_CNTL_R, CM, id), \ - SRI(CM_RGAM_RAMA_SLOPE_CNTL_B, CM, id), \ - SRI(CM_RGAM_RAMA_SLOPE_CNTL_G, CM, id), \ - SRI(CM_RGAM_RAMA_SLOPE_CNTL_R, CM, id), \ - SRI(CM_RGAM_RAMA_END_CNTL1_B, CM, id), \ - SRI(CM_RGAM_RAMA_END_CNTL2_B, CM, id), \ - SRI(CM_RGAM_RAMA_END_CNTL1_G, CM, id), \ - SRI(CM_RGAM_RAMA_END_CNTL2_G, CM, id), \ - SRI(CM_RGAM_RAMA_END_CNTL1_R, CM, id), \ - SRI(CM_RGAM_RAMA_END_CNTL2_R, CM, id), \ - SRI(CM_RGAM_RAMA_REGION_0_1, CM, id), \ - SRI(CM_RGAM_RAMA_REGION_32_33, CM, id), \ - SRI(CM_RGAM_CONTROL, CM, id), \ - SRI(CM_IGAM_CONTROL, CM, id), \ - SRI(CM_IGAM_LUT_RW_CONTROL, CM, id), \ - SRI(CM_IGAM_LUT_RW_INDEX, CM, id), \ - SRI(CM_IGAM_LUT_SEQ_COLOR, CM, id), \ - SRI(CURSOR_CONTROL, CURSOR, id), \ - SRI(CM_CMOUT_CONTROL, CM, id) - - -#define TF_REG_LIST_SH_MASK_DCN(mask_sh)\ - TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C13, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C14, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C21, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C22, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C23, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C24, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C31, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C32, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh),\ - TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_START, mask_sh),\ - TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_END, mask_sh),\ - TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_START, mask_sh),\ - TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_END, mask_sh),\ - TF_SF(DSCL0_LB_DATA_FORMAT, INTERLEAVE_EN, mask_sh),\ - TF2_SF(DSCL0, LB_DATA_FORMAT__ALPHA_EN, mask_sh),\ - TF_SF(DSCL0_LB_MEMORY_CTRL, MEMORY_CONFIG, mask_sh),\ - TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\ - TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\ - TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\ - TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\ - TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\ - TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_RGB_Y, mask_sh),\ - TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_CBCR, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS_C, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS_C, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_PHASE, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_FILTER_TYPE, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_FACTOR, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_FACTOR, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, DSCL_MODE, mask_sh),\ - TF_SF(DSCL0_RECOUT_START, RECOUT_START_X, mask_sh),\ - TF_SF(DSCL0_RECOUT_START, RECOUT_START_Y, mask_sh),\ - TF_SF(DSCL0_RECOUT_SIZE, RECOUT_WIDTH, mask_sh),\ - TF_SF(DSCL0_RECOUT_SIZE, RECOUT_HEIGHT, mask_sh),\ - TF_SF(DSCL0_MPC_SIZE, MPC_WIDTH, mask_sh),\ - TF_SF(DSCL0_MPC_SIZE, MPC_HEIGHT, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C, SCL_H_SCALE_RATIO_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C, SCL_V_SCALE_RATIO_C, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_FRAC_C, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_INT_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT, SCL_V_INIT_FRAC_BOT, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT, SCL_V_INIT_INT_BOT, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_FRAC_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_INT_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT_C, SCL_V_INIT_FRAC_BOT_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT_C, SCL_V_INIT_INT_BOT_C, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, SCL_CHROMA_COEF_MODE, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT_CURRENT, mask_sh), \ - TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh), \ - TF_SF(DSCL0_DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, mask_sh), \ - TF_SF(CM0_CM_ICSC_CONTROL, CM_ICSC_MODE, mask_sh), \ - TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C11, mask_sh), \ - TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C12, mask_sh), \ - TF_SF(CM0_CM_ICSC_C33_C34, CM_ICSC_C33, mask_sh), \ - TF_SF(CM0_CM_ICSC_C33_C34, CM_ICSC_C34, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_B, CM_DGAM_RAMB_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_B, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_G, CM_DGAM_RAMB_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_G, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_R, CM_DGAM_RAMB_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_R, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_B, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_G, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_R, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_B, CM_DGAM_RAMB_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_B, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_B, CM_DGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_G, CM_DGAM_RAMB_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_G, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_G, CM_DGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_R, CM_DGAM_RAMB_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_R, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_R, CM_DGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_B, CM_DGAM_RAMA_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_B, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_G, CM_DGAM_RAMA_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_G, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_R, CM_DGAM_RAMA_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_R, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_B, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_G, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_R, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_B, CM_DGAM_RAMA_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_B, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_B, CM_DGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_G, CM_DGAM_RAMA_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_G, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_G, CM_DGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_R, CM_DGAM_RAMA_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_R, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_R, CM_DGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_MEM_PWR_CTRL, SHARED_MEM_PWR_DIS, mask_sh), \ - TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_DGAM_LUT_INDEX, CM_DGAM_LUT_INDEX, mask_sh), \ - TF_SF(CM0_CM_DGAM_LUT_DATA, CM_DGAM_LUT_DATA, mask_sh), \ - TF_SF(CM0_CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, mask_sh), \ - TF_SF(CM0_CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_INDEX, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \ - TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \ - TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \ - TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \ - TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh) - -#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\ - TF_REG_LIST_SH_MASK_DCN(mask_sh),\ - TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_DEPTH, mask_sh),\ - TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_EXPAN_MODE, mask_sh),\ - TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_REDUCE_MODE, mask_sh),\ - TF_SF(DSCL0_LB_DATA_FORMAT, DYNAMIC_PIXEL_DEPTH, mask_sh),\ - TF_SF(DSCL0_LB_DATA_FORMAT, DITHER_EN, mask_sh),\ - TF_SF(CM0_CM_COMA_C11_C12, CM_COMA_C11, mask_sh),\ - TF_SF(CM0_CM_COMA_C11_C12, CM_COMA_C12, mask_sh),\ - TF_SF(CM0_CM_COMA_C33_C34, CM_COMA_C33, mask_sh),\ - TF_SF(CM0_CM_COMA_C33_C34, CM_COMA_C34, mask_sh),\ - TF_SF(CM0_CM_COMB_C11_C12, CM_COMB_C11, mask_sh),\ - TF_SF(CM0_CM_COMB_C11_C12, CM_COMB_C12, mask_sh),\ - TF_SF(CM0_CM_COMB_C33_C34, CM_COMB_C33, mask_sh),\ - TF_SF(CM0_CM_COMB_C33_C34, CM_COMB_C34, mask_sh),\ - TF_SF(CM0_CM_OCSC_CONTROL, CM_OCSC_MODE, mask_sh), \ - TF_SF(CM0_CM_OCSC_C11_C12, CM_OCSC_C11, mask_sh), \ - TF_SF(CM0_CM_OCSC_C11_C12, CM_OCSC_C12, mask_sh), \ - TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C33, mask_sh), \ - TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C34, mask_sh), \ - TF_SF(CM0_CM_BNS_VALUES_R, CM_BNS_BIAS_R, mask_sh), \ - TF_SF(CM0_CM_BNS_VALUES_G, CM_BNS_BIAS_G, mask_sh), \ - TF_SF(CM0_CM_BNS_VALUES_B, CM_BNS_BIAS_B, mask_sh), \ - TF_SF(CM0_CM_BNS_VALUES_R, CM_BNS_SCALE_R, mask_sh), \ - TF_SF(CM0_CM_BNS_VALUES_G, CM_BNS_SCALE_G, mask_sh), \ - TF_SF(CM0_CM_BNS_VALUES_B, CM_BNS_SCALE_B, mask_sh), \ - TF_SF(CM0_CM_MEM_PWR_CTRL, RGAM_MEM_PWR_FORCE, mask_sh), \ - TF_SF(CM0_CM_RGAM_LUT_DATA, CM_RGAM_LUT_DATA, mask_sh), \ - TF_SF(CM0_CM_RGAM_LUT_WRITE_EN_MASK, CM_RGAM_LUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_RGAM_LUT_WRITE_EN_MASK, CM_RGAM_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_RGAM_LUT_INDEX, CM_RGAM_LUT_INDEX, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_B, CM_RGAM_RAMB_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_B, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_G, CM_RGAM_RAMB_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_G, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_R, CM_RGAM_RAMB_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_R, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_B, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_G, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_R, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_B, CM_RGAM_RAMB_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_B, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_B, CM_RGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_G, CM_RGAM_RAMB_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_G, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_G, CM_RGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_R, CM_RGAM_RAMB_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_R, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_R, CM_RGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_B, CM_RGAM_RAMA_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_B, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_G, CM_RGAM_RAMA_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_G, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_R, CM_RGAM_RAMA_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_R, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_B, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_G, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_R, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_B, CM_RGAM_RAMA_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_B, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_B, CM_RGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_G, CM_RGAM_RAMA_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_G, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_G, CM_RGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_R, CM_RGAM_RAMA_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_R, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_R, CM_RGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_CONTROL, CM_RGAM_LUT_MODE, mask_sh), \ - TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, mask_sh), \ - TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_R, mask_sh), \ - TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_G, mask_sh), \ - TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_B, mask_sh), \ - TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, mask_sh), \ - TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, mask_sh), \ - TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, mask_sh), \ - TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, mask_sh), \ - TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, mask_sh), \ - TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, mask_sh), \ - TF_SF(CM0_CM_CONTROL, CM_BYPASS_EN, mask_sh), \ - TF_SF(CM0_CM_IGAM_LUT_SEQ_COLOR, CM_IGAM_LUT_SEQ_COLOR, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh), \ - TF_SF(CM0_CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, mask_sh), \ - TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ - TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \ - TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \ - TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ - TF_SF(DPP_TOP0_DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh) - -/* - * - DCN1 CM debug status register definition - - register :ID9_CM_STATUS do - implement_ref :cm - map to: :cmdebugind, at: j - width 32 - disclosure NEVER - - field :ID9_VUPDATE_CFG, [0], R - field :ID9_IGAM_LUT_MODE, [2..1], R - field :ID9_BNS_BYPASS, [3], R - field :ID9_ICSC_MODE, [5..4], R - field :ID9_DGAM_LUT_MODE, [8..6], R - field :ID9_HDR_BYPASS, [9], R - field :ID9_GAMUT_REMAP_MODE, [11..10], R - field :ID9_RGAM_LUT_MODE, [14..12], R - #1 free bit - field :ID9_OCSC_MODE, [18..16], R - field :ID9_DENORM_MODE, [21..19], R - field :ID9_ROUND_TRUNC_MODE, [25..22], R - field :ID9_DITHER_EN, [26], R - field :ID9_DITHER_MODE, [28..27], R - end -*/ - -#define TF_DEBUG_REG_LIST_SH_DCN10 \ - .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 4, \ - .CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 16 - -#define TF_DEBUG_REG_LIST_MASK_DCN10 \ - .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x30, \ - .CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 0x70000 - -#define TF_REG_FIELD_LIST(type) \ - type EXT_OVERSCAN_LEFT; \ - type EXT_OVERSCAN_RIGHT; \ - type EXT_OVERSCAN_BOTTOM; \ - type EXT_OVERSCAN_TOP; \ - type OTG_H_BLANK_START; \ - type OTG_H_BLANK_END; \ - type OTG_V_BLANK_START; \ - type OTG_V_BLANK_END; \ - type PIXEL_DEPTH; \ - type PIXEL_EXPAN_MODE; \ - type PIXEL_REDUCE_MODE; \ - type DYNAMIC_PIXEL_DEPTH; \ - type DITHER_EN; \ - type INTERLEAVE_EN; \ - type LB_DATA_FORMAT__ALPHA_EN; \ - type MEMORY_CONFIG; \ - type LB_MAX_PARTITIONS; \ - type AUTOCAL_MODE; \ - type AUTOCAL_NUM_PIPE; \ - type AUTOCAL_PIPE_ID; \ - type SCL_BOUNDARY_MODE; \ - type SCL_BLACK_OFFSET_RGB_Y; \ - type SCL_BLACK_OFFSET_CBCR; \ - type SCL_V_NUM_TAPS; \ - type SCL_H_NUM_TAPS; \ - type SCL_V_NUM_TAPS_C; \ - type SCL_H_NUM_TAPS_C; \ - type SCL_COEF_RAM_TAP_PAIR_IDX; \ - type SCL_COEF_RAM_PHASE; \ - type SCL_COEF_RAM_FILTER_TYPE; \ - type SCL_COEF_RAM_EVEN_TAP_COEF; \ - type SCL_COEF_RAM_EVEN_TAP_COEF_EN; \ - type SCL_COEF_RAM_ODD_TAP_COEF; \ - type SCL_COEF_RAM_ODD_TAP_COEF_EN; \ - type SCL_H_2TAP_HARDCODE_COEF_EN; \ - type SCL_H_2TAP_SHARP_EN; \ - type SCL_H_2TAP_SHARP_FACTOR; \ - type SCL_V_2TAP_HARDCODE_COEF_EN; \ - type SCL_V_2TAP_SHARP_EN; \ - type SCL_V_2TAP_SHARP_FACTOR; \ - type SCL_COEF_RAM_SELECT; \ - type DSCL_MODE; \ - type RECOUT_START_X; \ - type RECOUT_START_Y; \ - type RECOUT_WIDTH; \ - type RECOUT_HEIGHT; \ - type MPC_WIDTH; \ - type MPC_HEIGHT; \ - type SCL_H_SCALE_RATIO; \ - type SCL_V_SCALE_RATIO; \ - type SCL_H_SCALE_RATIO_C; \ - type SCL_V_SCALE_RATIO_C; \ - type SCL_H_INIT_FRAC; \ - type SCL_H_INIT_INT; \ - type SCL_H_INIT_FRAC_C; \ - type SCL_H_INIT_INT_C; \ - type SCL_V_INIT_FRAC; \ - type SCL_V_INIT_INT; \ - type SCL_V_INIT_FRAC_BOT; \ - type SCL_V_INIT_INT_BOT; \ - type SCL_V_INIT_FRAC_C; \ - type SCL_V_INIT_INT_C; \ - type SCL_V_INIT_FRAC_BOT_C; \ - type SCL_V_INIT_INT_BOT_C; \ - type SCL_CHROMA_COEF_MODE; \ - type SCL_COEF_RAM_SELECT_CURRENT; \ - type LUT_MEM_PWR_FORCE; \ - type LUT_MEM_PWR_STATE; \ - type CM_GAMUT_REMAP_MODE; \ - type CM_GAMUT_REMAP_C11; \ - type CM_GAMUT_REMAP_C12; \ - type CM_GAMUT_REMAP_C13; \ - type CM_GAMUT_REMAP_C14; \ - type CM_GAMUT_REMAP_C21; \ - type CM_GAMUT_REMAP_C22; \ - type CM_GAMUT_REMAP_C23; \ - type CM_GAMUT_REMAP_C24; \ - type CM_GAMUT_REMAP_C31; \ - type CM_GAMUT_REMAP_C32; \ - type CM_GAMUT_REMAP_C33; \ - type CM_GAMUT_REMAP_C34; \ - type CM_COMA_C11; \ - type CM_COMA_C12; \ - type CM_COMA_C33; \ - type CM_COMA_C34; \ - type CM_COMB_C11; \ - type CM_COMB_C12; \ - type CM_COMB_C33; \ - type CM_COMB_C34; \ - type CM_OCSC_MODE; \ - type CM_OCSC_C11; \ - type CM_OCSC_C12; \ - type CM_OCSC_C33; \ - type CM_OCSC_C34; \ - type RGAM_MEM_PWR_FORCE; \ - type CM_RGAM_LUT_DATA; \ - type CM_RGAM_LUT_WRITE_EN_MASK; \ - type CM_RGAM_LUT_WRITE_SEL; \ - type CM_RGAM_LUT_INDEX; \ - type CM_RGAM_RAMB_EXP_REGION_START_B; \ - type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; \ - type CM_RGAM_RAMB_EXP_REGION_START_G; \ - type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_G; \ - type CM_RGAM_RAMB_EXP_REGION_START_R; \ - type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_R; \ - type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \ - type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \ - type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \ - type CM_RGAM_RAMB_EXP_REGION_END_B; \ - type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; \ - type CM_RGAM_RAMB_EXP_REGION_END_BASE_B; \ - type CM_RGAM_RAMB_EXP_REGION_END_G; \ - type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_G; \ - type CM_RGAM_RAMB_EXP_REGION_END_BASE_G; \ - type CM_RGAM_RAMB_EXP_REGION_END_R; \ - type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_R; \ - type CM_RGAM_RAMB_EXP_REGION_END_BASE_R; \ - type CM_RGAM_RAMB_EXP_REGION0_LUT_OFFSET; \ - type CM_RGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \ - type CM_RGAM_RAMB_EXP_REGION1_LUT_OFFSET; \ - type CM_RGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \ - type CM_RGAM_RAMB_EXP_REGION32_LUT_OFFSET; \ - type CM_RGAM_RAMB_EXP_REGION32_NUM_SEGMENTS; \ - type CM_RGAM_RAMB_EXP_REGION33_LUT_OFFSET; \ - type CM_RGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; \ - type CM_RGAM_RAMA_EXP_REGION_START_B; \ - type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_B; \ - type CM_RGAM_RAMA_EXP_REGION_START_G; \ - type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_G; \ - type CM_RGAM_RAMA_EXP_REGION_START_R; \ - type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_R; \ - type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \ - type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \ - type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \ - type CM_RGAM_RAMA_EXP_REGION_END_B; \ - type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_B; \ - type CM_RGAM_RAMA_EXP_REGION_END_BASE_B; \ - type CM_RGAM_RAMA_EXP_REGION_END_G; \ - type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_G; \ - type CM_RGAM_RAMA_EXP_REGION_END_BASE_G; \ - type CM_RGAM_RAMA_EXP_REGION_END_R; \ - type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_R; \ - type CM_RGAM_RAMA_EXP_REGION_END_BASE_R; \ - type CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; \ - type CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \ - type CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; \ - type CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \ - type CM_RGAM_RAMA_EXP_REGION32_LUT_OFFSET; \ - type CM_RGAM_RAMA_EXP_REGION32_NUM_SEGMENTS; \ - type CM_RGAM_RAMA_EXP_REGION33_LUT_OFFSET; \ - type CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \ - type CM_RGAM_LUT_MODE; \ - type CM_CMOUT_ROUND_TRUNC_MODE; \ - type CM_BLNDGAM_LUT_MODE; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_B; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_G; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_R; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R; \ - type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \ - type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \ - type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_B; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_G; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_R; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R; \ - type CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_B; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_G; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_R; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R; \ - type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \ - type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \ - type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_B; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_G; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_R; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R; \ - type CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \ - type CM_BLNDGAM_LUT_WRITE_EN_MASK; \ - type CM_BLNDGAM_LUT_WRITE_SEL; \ - type CM_BLNDGAM_CONFIG_STATUS; \ - type CM_BLNDGAM_LUT_INDEX; \ - type BLNDGAM_MEM_PWR_FORCE; \ - type CM_3DLUT_MODE; \ - type CM_3DLUT_SIZE; \ - type CM_3DLUT_INDEX; \ - type CM_3DLUT_DATA0; \ - type CM_3DLUT_DATA1; \ - type CM_3DLUT_DATA_30BIT; \ - type CM_3DLUT_WRITE_EN_MASK; \ - type CM_3DLUT_RAM_SEL; \ - type CM_3DLUT_30BIT_EN; \ - type CM_3DLUT_CONFIG_STATUS; \ - type CM_3DLUT_READ_SEL; \ - type CM_SHAPER_LUT_MODE; \ - type CM_SHAPER_RAMB_EXP_REGION_START_B; \ - type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B; \ - type CM_SHAPER_RAMB_EXP_REGION_START_G; \ - type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G; \ - type CM_SHAPER_RAMB_EXP_REGION_START_R; \ - type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R; \ - type CM_SHAPER_RAMB_EXP_REGION_END_B; \ - type CM_SHAPER_RAMB_EXP_REGION_END_BASE_B; \ - type CM_SHAPER_RAMB_EXP_REGION_END_G; \ - type CM_SHAPER_RAMB_EXP_REGION_END_BASE_G; \ - type CM_SHAPER_RAMB_EXP_REGION_END_R; \ - type CM_SHAPER_RAMB_EXP_REGION_END_BASE_R; \ - type CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION_START_B; \ - type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B; \ - type CM_SHAPER_RAMA_EXP_REGION_START_G; \ - type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G; \ - type CM_SHAPER_RAMA_EXP_REGION_START_R; \ - type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R; \ - type CM_SHAPER_RAMA_EXP_REGION_END_B; \ - type CM_SHAPER_RAMA_EXP_REGION_END_BASE_B; \ - type CM_SHAPER_RAMA_EXP_REGION_END_G; \ - type CM_SHAPER_RAMA_EXP_REGION_END_BASE_G; \ - type CM_SHAPER_RAMA_EXP_REGION_END_R; \ - type CM_SHAPER_RAMA_EXP_REGION_END_BASE_R; \ - type CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS; \ - type CM_SHAPER_LUT_WRITE_EN_MASK; \ - type CM_SHAPER_CONFIG_STATUS; \ - type CM_SHAPER_LUT_WRITE_SEL; \ - type CM_SHAPER_LUT_INDEX; \ - type CM_SHAPER_LUT_DATA; \ - type CM_DGAM_CONFIG_STATUS; \ - type CM_ICSC_MODE; \ - type CM_ICSC_C11; \ - type CM_ICSC_C12; \ - type CM_ICSC_C33; \ - type CM_ICSC_C34; \ - type CM_BNS_BIAS_R; \ - type CM_BNS_BIAS_G; \ - type CM_BNS_BIAS_B; \ - type CM_BNS_SCALE_R; \ - type CM_BNS_SCALE_G; \ - type CM_BNS_SCALE_B; \ - type CM_DGAM_RAMB_EXP_REGION_START_B; \ - type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; \ - type CM_DGAM_RAMB_EXP_REGION_START_G; \ - type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G; \ - type CM_DGAM_RAMB_EXP_REGION_START_R; \ - type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R; \ - type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \ - type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \ - type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \ - type CM_DGAM_RAMB_EXP_REGION_END_B; \ - type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; \ - type CM_DGAM_RAMB_EXP_REGION_END_BASE_B; \ - type CM_DGAM_RAMB_EXP_REGION_END_G; \ - type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G; \ - type CM_DGAM_RAMB_EXP_REGION_END_BASE_G; \ - type CM_DGAM_RAMB_EXP_REGION_END_R; \ - type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R; \ - type CM_DGAM_RAMB_EXP_REGION_END_BASE_R; \ - type CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET; \ - type CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \ - type CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET; \ - type CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \ - type CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET; \ - type CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS; \ - type CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET; \ - type CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS; \ - type CM_DGAM_RAMA_EXP_REGION_START_B; \ - type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B; \ - type CM_DGAM_RAMA_EXP_REGION_START_G; \ - type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G; \ - type CM_DGAM_RAMA_EXP_REGION_START_R; \ - type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R; \ - type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \ - type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \ - type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \ - type CM_DGAM_RAMA_EXP_REGION_END_B; \ - type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B; \ - type CM_DGAM_RAMA_EXP_REGION_END_BASE_B; \ - type CM_DGAM_RAMA_EXP_REGION_END_G; \ - type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G; \ - type CM_DGAM_RAMA_EXP_REGION_END_BASE_G; \ - type CM_DGAM_RAMA_EXP_REGION_END_R; \ - type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R; \ - type CM_DGAM_RAMA_EXP_REGION_END_BASE_R; \ - type CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; \ - type CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \ - type CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; \ - type CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \ - type CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET; \ - type CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS; \ - type CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET; \ - type CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS; \ - type SHARED_MEM_PWR_DIS; \ - type CM_IGAM_LUT_FORMAT_R; \ - type CM_IGAM_LUT_FORMAT_G; \ - type CM_IGAM_LUT_FORMAT_B; \ - type CM_IGAM_LUT_HOST_EN; \ - type CM_IGAM_LUT_RW_MODE; \ - type CM_IGAM_LUT_WRITE_EN_MASK; \ - type CM_IGAM_LUT_SEL; \ - type CM_IGAM_LUT_SEQ_COLOR; \ - type CM_IGAM_DGAM_CONFIG_STATUS; \ - type CM_DGAM_LUT_WRITE_EN_MASK; \ - type CM_DGAM_LUT_WRITE_SEL; \ - type CM_DGAM_LUT_INDEX; \ - type CM_DGAM_LUT_DATA; \ - type CM_DGAM_LUT_MODE; \ - type CM_IGAM_LUT_MODE; \ - type CM_IGAM_INPUT_FORMAT; \ - type CM_IGAM_LUT_RW_INDEX; \ - type CM_BYPASS_EN; \ - type FORMAT_EXPANSION_MODE; \ - type CNVC_BYPASS; \ - type OUTPUT_FP; \ - type CNVC_SURFACE_PIXEL_FORMAT; \ - type CURSOR_MODE; \ - type CURSOR_PITCH; \ - type CURSOR_LINES_PER_CHUNK; \ - type CURSOR_ENABLE; \ - type CUR0_MODE; \ - type CUR0_EXPANSION_MODE; \ - type CUR0_ENABLE; \ - type CM_BYPASS; \ - type CM_TEST_DEBUG_INDEX; \ - type CM_TEST_DEBUG_DATA_ID9_ICSC_MODE; \ - type CM_TEST_DEBUG_DATA_ID9_OCSC_MODE;\ - type FORMAT_CONTROL__ALPHA_EN; \ - type CUR0_COLOR0; \ - type CUR0_COLOR1; \ - type DPPCLK_RATE_CONTROL; \ - type DPP_CLOCK_ENABLE; \ - type CM_HDR_MULT_COEF; \ - type CUR0_FP_BIAS; \ - type CUR0_FP_SCALE; - -struct dcn_dpp_shift { - TF_REG_FIELD_LIST(uint8_t) -}; - -struct dcn_dpp_mask { - TF_REG_FIELD_LIST(uint32_t) -}; - -#define DPP_COMMON_REG_VARIABLE_LIST \ - uint32_t DSCL_EXT_OVERSCAN_LEFT_RIGHT; \ - uint32_t DSCL_EXT_OVERSCAN_TOP_BOTTOM; \ - uint32_t OTG_H_BLANK; \ - uint32_t OTG_V_BLANK; \ - uint32_t DSCL_MEM_PWR_CTRL; \ - uint32_t DSCL_MEM_PWR_STATUS; \ - uint32_t SCL_MODE; \ - uint32_t LB_DATA_FORMAT; \ - uint32_t LB_MEMORY_CTRL; \ - uint32_t DSCL_AUTOCAL; \ - uint32_t DSCL_CONTROL; \ - uint32_t SCL_BLACK_OFFSET; \ - uint32_t SCL_TAP_CONTROL; \ - uint32_t SCL_COEF_RAM_TAP_SELECT; \ - uint32_t SCL_COEF_RAM_TAP_DATA; \ - uint32_t DSCL_2TAP_CONTROL; \ - uint32_t MPC_SIZE; \ - uint32_t SCL_HORZ_FILTER_SCALE_RATIO; \ - uint32_t SCL_VERT_FILTER_SCALE_RATIO; \ - uint32_t SCL_HORZ_FILTER_SCALE_RATIO_C; \ - uint32_t SCL_VERT_FILTER_SCALE_RATIO_C; \ - uint32_t SCL_HORZ_FILTER_INIT; \ - uint32_t SCL_HORZ_FILTER_INIT_C; \ - uint32_t SCL_VERT_FILTER_INIT; \ - uint32_t SCL_VERT_FILTER_INIT_BOT; \ - uint32_t SCL_VERT_FILTER_INIT_C; \ - uint32_t SCL_VERT_FILTER_INIT_BOT_C; \ - uint32_t RECOUT_START; \ - uint32_t RECOUT_SIZE; \ - uint32_t CM_GAMUT_REMAP_CONTROL; \ - uint32_t CM_GAMUT_REMAP_C11_C12; \ - uint32_t CM_GAMUT_REMAP_C13_C14; \ - uint32_t CM_GAMUT_REMAP_C21_C22; \ - uint32_t CM_GAMUT_REMAP_C23_C24; \ - uint32_t CM_GAMUT_REMAP_C31_C32; \ - uint32_t CM_GAMUT_REMAP_C33_C34; \ - uint32_t CM_COMA_C11_C12; \ - uint32_t CM_COMA_C33_C34; \ - uint32_t CM_COMB_C11_C12; \ - uint32_t CM_COMB_C33_C34; \ - uint32_t CM_OCSC_CONTROL; \ - uint32_t CM_OCSC_C11_C12; \ - uint32_t CM_OCSC_C33_C34; \ - uint32_t CM_MEM_PWR_CTRL; \ - uint32_t CM_RGAM_LUT_DATA; \ - uint32_t CM_RGAM_LUT_WRITE_EN_MASK; \ - uint32_t CM_RGAM_LUT_INDEX; \ - uint32_t CM_RGAM_RAMB_START_CNTL_B; \ - uint32_t CM_RGAM_RAMB_START_CNTL_G; \ - uint32_t CM_RGAM_RAMB_START_CNTL_R; \ - uint32_t CM_RGAM_RAMB_SLOPE_CNTL_B; \ - uint32_t CM_RGAM_RAMB_SLOPE_CNTL_G; \ - uint32_t CM_RGAM_RAMB_SLOPE_CNTL_R; \ - uint32_t CM_RGAM_RAMB_END_CNTL1_B; \ - uint32_t CM_RGAM_RAMB_END_CNTL2_B; \ - uint32_t CM_RGAM_RAMB_END_CNTL1_G; \ - uint32_t CM_RGAM_RAMB_END_CNTL2_G; \ - uint32_t CM_RGAM_RAMB_END_CNTL1_R; \ - uint32_t CM_RGAM_RAMB_END_CNTL2_R; \ - uint32_t CM_RGAM_RAMB_REGION_0_1; \ - uint32_t CM_RGAM_RAMB_REGION_32_33; \ - uint32_t CM_RGAM_RAMA_START_CNTL_B; \ - uint32_t CM_RGAM_RAMA_START_CNTL_G; \ - uint32_t CM_RGAM_RAMA_START_CNTL_R; \ - uint32_t CM_RGAM_RAMA_SLOPE_CNTL_B; \ - uint32_t CM_RGAM_RAMA_SLOPE_CNTL_G; \ - uint32_t CM_RGAM_RAMA_SLOPE_CNTL_R; \ - uint32_t CM_RGAM_RAMA_END_CNTL1_B; \ - uint32_t CM_RGAM_RAMA_END_CNTL2_B; \ - uint32_t CM_RGAM_RAMA_END_CNTL1_G; \ - uint32_t CM_RGAM_RAMA_END_CNTL2_G; \ - uint32_t CM_RGAM_RAMA_END_CNTL1_R; \ - uint32_t CM_RGAM_RAMA_END_CNTL2_R; \ - uint32_t CM_RGAM_RAMA_REGION_0_1; \ - uint32_t CM_RGAM_RAMA_REGION_32_33; \ - uint32_t CM_RGAM_CONTROL; \ - uint32_t CM_CMOUT_CONTROL; \ - uint32_t CM_BLNDGAM_LUT_WRITE_EN_MASK; \ - uint32_t CM_BLNDGAM_CONTROL; \ - uint32_t CM_BLNDGAM_RAMB_START_CNTL_B; \ - uint32_t CM_BLNDGAM_RAMB_START_CNTL_G; \ - uint32_t CM_BLNDGAM_RAMB_START_CNTL_R; \ - uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_B; \ - uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_G; \ - uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_R; \ - uint32_t CM_BLNDGAM_RAMB_END_CNTL1_B; \ - uint32_t CM_BLNDGAM_RAMB_END_CNTL2_B; \ - uint32_t CM_BLNDGAM_RAMB_END_CNTL1_G; \ - uint32_t CM_BLNDGAM_RAMB_END_CNTL2_G; \ - uint32_t CM_BLNDGAM_RAMB_END_CNTL1_R; \ - uint32_t CM_BLNDGAM_RAMB_END_CNTL2_R; \ - uint32_t CM_BLNDGAM_RAMB_REGION_0_1; \ - uint32_t CM_BLNDGAM_RAMB_REGION_2_3; \ - uint32_t CM_BLNDGAM_RAMB_REGION_4_5; \ - uint32_t CM_BLNDGAM_RAMB_REGION_6_7; \ - uint32_t CM_BLNDGAM_RAMB_REGION_8_9; \ - uint32_t CM_BLNDGAM_RAMB_REGION_10_11; \ - uint32_t CM_BLNDGAM_RAMB_REGION_12_13; \ - uint32_t CM_BLNDGAM_RAMB_REGION_14_15; \ - uint32_t CM_BLNDGAM_RAMB_REGION_16_17; \ - uint32_t CM_BLNDGAM_RAMB_REGION_18_19; \ - uint32_t CM_BLNDGAM_RAMB_REGION_20_21; \ - uint32_t CM_BLNDGAM_RAMB_REGION_22_23; \ - uint32_t CM_BLNDGAM_RAMB_REGION_24_25; \ - uint32_t CM_BLNDGAM_RAMB_REGION_26_27; \ - uint32_t CM_BLNDGAM_RAMB_REGION_28_29; \ - uint32_t CM_BLNDGAM_RAMB_REGION_30_31; \ - uint32_t CM_BLNDGAM_RAMB_REGION_32_33; \ - uint32_t CM_BLNDGAM_RAMA_START_CNTL_B; \ - uint32_t CM_BLNDGAM_RAMA_START_CNTL_G; \ - uint32_t CM_BLNDGAM_RAMA_START_CNTL_R; \ - uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_B; \ - uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_G; \ - uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_R; \ - uint32_t CM_BLNDGAM_RAMA_END_CNTL1_B; \ - uint32_t CM_BLNDGAM_RAMA_END_CNTL2_B; \ - uint32_t CM_BLNDGAM_RAMA_END_CNTL1_G; \ - uint32_t CM_BLNDGAM_RAMA_END_CNTL2_G; \ - uint32_t CM_BLNDGAM_RAMA_END_CNTL1_R; \ - uint32_t CM_BLNDGAM_RAMA_END_CNTL2_R; \ - uint32_t CM_BLNDGAM_RAMA_REGION_0_1; \ - uint32_t CM_BLNDGAM_RAMA_REGION_2_3; \ - uint32_t CM_BLNDGAM_RAMA_REGION_4_5; \ - uint32_t CM_BLNDGAM_RAMA_REGION_6_7; \ - uint32_t CM_BLNDGAM_RAMA_REGION_8_9; \ - uint32_t CM_BLNDGAM_RAMA_REGION_10_11; \ - uint32_t CM_BLNDGAM_RAMA_REGION_12_13; \ - uint32_t CM_BLNDGAM_RAMA_REGION_14_15; \ - uint32_t CM_BLNDGAM_RAMA_REGION_16_17; \ - uint32_t CM_BLNDGAM_RAMA_REGION_18_19; \ - uint32_t CM_BLNDGAM_RAMA_REGION_20_21; \ - uint32_t CM_BLNDGAM_RAMA_REGION_22_23; \ - uint32_t CM_BLNDGAM_RAMA_REGION_24_25; \ - uint32_t CM_BLNDGAM_RAMA_REGION_26_27; \ - uint32_t CM_BLNDGAM_RAMA_REGION_28_29; \ - uint32_t CM_BLNDGAM_RAMA_REGION_30_31; \ - uint32_t CM_BLNDGAM_RAMA_REGION_32_33; \ - uint32_t CM_BLNDGAM_LUT_INDEX; \ - uint32_t CM_3DLUT_MODE; \ - uint32_t CM_3DLUT_INDEX; \ - uint32_t CM_3DLUT_DATA; \ - uint32_t CM_3DLUT_DATA_30BIT; \ - uint32_t CM_3DLUT_READ_WRITE_CONTROL; \ - uint32_t CM_SHAPER_LUT_WRITE_EN_MASK; \ - uint32_t CM_SHAPER_CONTROL; \ - uint32_t CM_SHAPER_RAMB_START_CNTL_B; \ - uint32_t CM_SHAPER_RAMB_START_CNTL_G; \ - uint32_t CM_SHAPER_RAMB_START_CNTL_R; \ - uint32_t CM_SHAPER_RAMB_END_CNTL_B; \ - uint32_t CM_SHAPER_RAMB_END_CNTL_G; \ - uint32_t CM_SHAPER_RAMB_END_CNTL_R; \ - uint32_t CM_SHAPER_RAMB_REGION_0_1; \ - uint32_t CM_SHAPER_RAMB_REGION_2_3; \ - uint32_t CM_SHAPER_RAMB_REGION_4_5; \ - uint32_t CM_SHAPER_RAMB_REGION_6_7; \ - uint32_t CM_SHAPER_RAMB_REGION_8_9; \ - uint32_t CM_SHAPER_RAMB_REGION_10_11; \ - uint32_t CM_SHAPER_RAMB_REGION_12_13; \ - uint32_t CM_SHAPER_RAMB_REGION_14_15; \ - uint32_t CM_SHAPER_RAMB_REGION_16_17; \ - uint32_t CM_SHAPER_RAMB_REGION_18_19; \ - uint32_t CM_SHAPER_RAMB_REGION_20_21; \ - uint32_t CM_SHAPER_RAMB_REGION_22_23; \ - uint32_t CM_SHAPER_RAMB_REGION_24_25; \ - uint32_t CM_SHAPER_RAMB_REGION_26_27; \ - uint32_t CM_SHAPER_RAMB_REGION_28_29; \ - uint32_t CM_SHAPER_RAMB_REGION_30_31; \ - uint32_t CM_SHAPER_RAMB_REGION_32_33; \ - uint32_t CM_SHAPER_RAMA_START_CNTL_B; \ - uint32_t CM_SHAPER_RAMA_START_CNTL_G; \ - uint32_t CM_SHAPER_RAMA_START_CNTL_R; \ - uint32_t CM_SHAPER_RAMA_END_CNTL_B; \ - uint32_t CM_SHAPER_RAMA_END_CNTL_G; \ - uint32_t CM_SHAPER_RAMA_END_CNTL_R; \ - uint32_t CM_SHAPER_RAMA_REGION_0_1; \ - uint32_t CM_SHAPER_RAMA_REGION_2_3; \ - uint32_t CM_SHAPER_RAMA_REGION_4_5; \ - uint32_t CM_SHAPER_RAMA_REGION_6_7; \ - uint32_t CM_SHAPER_RAMA_REGION_8_9; \ - uint32_t CM_SHAPER_RAMA_REGION_10_11; \ - uint32_t CM_SHAPER_RAMA_REGION_12_13; \ - uint32_t CM_SHAPER_RAMA_REGION_14_15; \ - uint32_t CM_SHAPER_RAMA_REGION_16_17; \ - uint32_t CM_SHAPER_RAMA_REGION_18_19; \ - uint32_t CM_SHAPER_RAMA_REGION_20_21; \ - uint32_t CM_SHAPER_RAMA_REGION_22_23; \ - uint32_t CM_SHAPER_RAMA_REGION_24_25; \ - uint32_t CM_SHAPER_RAMA_REGION_26_27; \ - uint32_t CM_SHAPER_RAMA_REGION_28_29; \ - uint32_t CM_SHAPER_RAMA_REGION_30_31; \ - uint32_t CM_SHAPER_RAMA_REGION_32_33; \ - uint32_t CM_SHAPER_LUT_INDEX; \ - uint32_t CM_SHAPER_LUT_DATA; \ - uint32_t CM_ICSC_CONTROL; \ - uint32_t CM_ICSC_C11_C12; \ - uint32_t CM_ICSC_C33_C34; \ - uint32_t CM_BNS_VALUES_R; \ - uint32_t CM_BNS_VALUES_G; \ - uint32_t CM_BNS_VALUES_B; \ - uint32_t CM_DGAM_RAMB_START_CNTL_B; \ - uint32_t CM_DGAM_RAMB_START_CNTL_G; \ - uint32_t CM_DGAM_RAMB_START_CNTL_R; \ - uint32_t CM_DGAM_RAMB_SLOPE_CNTL_B; \ - uint32_t CM_DGAM_RAMB_SLOPE_CNTL_G; \ - uint32_t CM_DGAM_RAMB_SLOPE_CNTL_R; \ - uint32_t CM_DGAM_RAMB_END_CNTL1_B; \ - uint32_t CM_DGAM_RAMB_END_CNTL2_B; \ - uint32_t CM_DGAM_RAMB_END_CNTL1_G; \ - uint32_t CM_DGAM_RAMB_END_CNTL2_G; \ - uint32_t CM_DGAM_RAMB_END_CNTL1_R; \ - uint32_t CM_DGAM_RAMB_END_CNTL2_R; \ - uint32_t CM_DGAM_RAMB_REGION_0_1; \ - uint32_t CM_DGAM_RAMB_REGION_14_15; \ - uint32_t CM_DGAM_RAMA_START_CNTL_B; \ - uint32_t CM_DGAM_RAMA_START_CNTL_G; \ - uint32_t CM_DGAM_RAMA_START_CNTL_R; \ - uint32_t CM_DGAM_RAMA_SLOPE_CNTL_B; \ - uint32_t CM_DGAM_RAMA_SLOPE_CNTL_G; \ - uint32_t CM_DGAM_RAMA_SLOPE_CNTL_R; \ - uint32_t CM_DGAM_RAMA_END_CNTL1_B; \ - uint32_t CM_DGAM_RAMA_END_CNTL2_B; \ - uint32_t CM_DGAM_RAMA_END_CNTL1_G; \ - uint32_t CM_DGAM_RAMA_END_CNTL2_G; \ - uint32_t CM_DGAM_RAMA_END_CNTL1_R; \ - uint32_t CM_DGAM_RAMA_END_CNTL2_R; \ - uint32_t CM_DGAM_RAMA_REGION_0_1; \ - uint32_t CM_DGAM_RAMA_REGION_14_15; \ - uint32_t CM_DGAM_LUT_WRITE_EN_MASK; \ - uint32_t CM_DGAM_LUT_INDEX; \ - uint32_t CM_DGAM_LUT_DATA; \ - uint32_t CM_CONTROL; \ - uint32_t CM_DGAM_CONTROL; \ - uint32_t CM_IGAM_CONTROL; \ - uint32_t CM_IGAM_LUT_RW_CONTROL; \ - uint32_t CM_IGAM_LUT_RW_INDEX; \ - uint32_t CM_IGAM_LUT_SEQ_COLOR; \ - uint32_t CM_TEST_DEBUG_INDEX; \ - uint32_t CM_TEST_DEBUG_DATA; \ - uint32_t FORMAT_CONTROL; \ - uint32_t CNVC_SURFACE_PIXEL_FORMAT; \ - uint32_t CURSOR_CONTROL; \ - uint32_t CURSOR0_CONTROL; \ - uint32_t CURSOR0_COLOR0; \ - uint32_t CURSOR0_COLOR1; \ - uint32_t DPP_CONTROL; \ - uint32_t CM_HDR_MULT_COEF; \ - uint32_t CURSOR0_FP_SCALE_BIAS; - -struct dcn_dpp_registers { - DPP_COMMON_REG_VARIABLE_LIST -}; - -struct dcn10_dpp { - struct dpp base; - - const struct dcn_dpp_registers *tf_regs; - const struct dcn_dpp_shift *tf_shift; - const struct dcn_dpp_mask *tf_mask; - - const uint16_t *filter_v; - const uint16_t *filter_h; - const uint16_t *filter_v_c; - const uint16_t *filter_h_c; - int lb_pixel_depth_supported; - int lb_memory_size; - int lb_bits_per_entry; - bool is_write_to_ram_a_safe; - struct scaler_data scl_data; - struct pwl_params pwl_data; -}; - -enum dcn10_input_csc_select { - INPUT_CSC_SELECT_BYPASS = 0, - INPUT_CSC_SELECT_ICSC = 1, - INPUT_CSC_SELECT_COMA = 2 -}; - -void dpp1_set_cursor_attributes( - struct dpp *dpp_base, - struct dc_cursor_attributes *cursor_attributes); - -void dpp1_set_cursor_position( - struct dpp *dpp_base, - const struct dc_cursor_position *pos, - const struct dc_cursor_mi_param *param, - uint32_t width, - uint32_t height); - -void dpp1_cnv_set_optional_cursor_attributes( - struct dpp *dpp_base, - struct dpp_cursor_attributes *attr); - -bool dpp1_dscl_is_lb_conf_valid( - int ceil_vratio, - int num_partitions, - int vtaps); - -void dpp1_dscl_calc_lb_num_partitions( - const struct scaler_data *scl_data, - enum lb_memory_config lb_config, - int *num_part_y, - int *num_part_c); - -void dpp1_degamma_ram_select( - struct dpp *dpp_base, - bool use_ram_a); - -void dpp1_program_degamma_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params); - -void dpp1_program_degamma_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params); - -void dpp1_program_degamma_lut( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num, - bool is_ram_a); - -void dpp1_power_on_degamma_lut( - struct dpp *dpp_base, - bool power_on); - -void dpp1_program_input_csc( - struct dpp *dpp_base, - enum dc_color_space color_space, - enum dcn10_input_csc_select select, - const struct out_csc_color_matrix *tbl_entry); - -void dpp1_program_bias_and_scale( - struct dpp *dpp_base, - struct dc_bias_and_scale *params); - -void dpp1_program_input_lut( - struct dpp *dpp_base, - const struct dc_gamma *gamma); - -void dpp1_full_bypass(struct dpp *dpp_base); - -void dpp1_set_degamma( - struct dpp *dpp_base, - enum ipp_degamma_mode mode); - -void dpp1_set_degamma_pwl(struct dpp *dpp_base, - const struct pwl_params *params); - - -void dpp_read_state(struct dpp *dpp_base, - struct dcn_dpp_state *s); - -void dpp_reset(struct dpp *dpp_base); - -void dpp1_cm_program_regamma_lut( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num); - -void dpp1_cm_power_on_regamma_lut( - struct dpp *dpp_base, - bool power_on); - -void dpp1_cm_configure_regamma_lut( - struct dpp *dpp_base, - bool is_ram_a); - -/*program re gamma RAM A*/ -void dpp1_cm_program_regamma_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params); - -/*program re gamma RAM B*/ -void dpp1_cm_program_regamma_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params); -void dpp1_cm_set_output_csc_adjustment( - struct dpp *dpp_base, - const uint16_t *regval); - -void dpp1_cm_set_output_csc_default( - struct dpp *dpp_base, - enum dc_color_space colorspace); - -void dpp1_cm_set_gamut_remap( - struct dpp *dpp, - const struct dpp_grph_csc_adjustment *adjust); - -void dpp1_dscl_set_scaler_manual_scale( - struct dpp *dpp_base, - const struct scaler_data *scl_data); - -void dpp1_cnv_setup ( - struct dpp *dpp_base, - enum surface_pixel_format format, - enum expansion_mode mode, - struct dc_csc_transform input_csc_color_matrix, - enum dc_color_space input_color_space, - struct cnv_alpha_2bit_lut *alpha_2bit_lut); - -void dpp1_dppclk_control( - struct dpp *dpp_base, - bool dppclk_div, - bool enable); - -void dpp1_set_hdr_multiplier( - struct dpp *dpp_base, - uint32_t multiplier); - -bool dpp1_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps); - -void dpp1_construct(struct dcn10_dpp *dpp1, - struct dc_context *ctx, - uint32_t inst, - const struct dcn_dpp_registers *tf_regs, - const struct dcn_dpp_shift *tf_shift, - const struct dcn_dpp_mask *tf_mask); - -void dpp1_cm_get_gamut_remap(struct dpp *dpp_base, - struct dpp_grph_csc_adjustment *adjust); -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c deleted file mode 100644 index 2f994a3a0b..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ /dev/null @@ -1,884 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "core_types.h" - -#include "reg_helper.h" -#include "dcn10_dpp.h" -#include "basics/conversion.h" -#include "dcn10_cm_common.h" - -#define NUM_PHASES 64 -#define HORZ_MAX_TAPS 8 -#define VERT_MAX_TAPS 8 - -#define BLACK_OFFSET_RGB_Y 0x0 -#define BLACK_OFFSET_CBCR 0x8000 - -#define REG(reg)\ - dpp->tf_regs->reg - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - -#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) - - -enum dcn10_coef_filter_type_sel { - SCL_COEF_LUMA_VERT_FILTER = 0, - SCL_COEF_LUMA_HORZ_FILTER = 1, - SCL_COEF_CHROMA_VERT_FILTER = 2, - SCL_COEF_CHROMA_HORZ_FILTER = 3, - SCL_COEF_ALPHA_VERT_FILTER = 4, - SCL_COEF_ALPHA_HORZ_FILTER = 5 -}; - -enum dscl_autocal_mode { - AUTOCAL_MODE_OFF = 0, - - /* Autocal calculate the scaling ratio and initial phase and the - * DSCL_MODE_SEL must be set to 1 - */ - AUTOCAL_MODE_AUTOSCALE = 1, - /* Autocal perform auto centering without replication and the - * DSCL_MODE_SEL must be set to 0 - */ - AUTOCAL_MODE_AUTOCENTER = 2, - /* Autocal perform auto centering and auto replication and the - * DSCL_MODE_SEL must be set to 0 - */ - AUTOCAL_MODE_AUTOREPLICATE = 3 -}; - -enum dscl_mode_sel { - DSCL_MODE_SCALING_444_BYPASS = 0, - DSCL_MODE_SCALING_444_RGB_ENABLE = 1, - DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, - DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, - DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, - DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, - DSCL_MODE_DSCL_BYPASS = 6 -}; - -static void program_gamut_remap( - struct dcn10_dpp *dpp, - const uint16_t *regval, - enum gamut_remap_select select) -{ - uint16_t selection = 0; - struct color_matrices_reg gam_regs; - - if (regval == NULL || select == GAMUT_REMAP_BYPASS) { - REG_SET(CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, 0); - return; - } - switch (select) { - case GAMUT_REMAP_COEFF: - selection = 1; - break; - case GAMUT_REMAP_COMA_COEFF: - selection = 2; - break; - case GAMUT_REMAP_COMB_COEFF: - selection = 3; - break; - default: - break; - } - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; - - - if (select == GAMUT_REMAP_COEFF) { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - } else if (select == GAMUT_REMAP_COMA_COEFF) { - - gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - } else { - - gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - } - - REG_SET( - CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, selection); - -} - -void dpp1_cm_set_gamut_remap( - struct dpp *dpp_base, - const struct dpp_grph_csc_adjustment *adjust) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - int i = 0; - - if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) - /* Bypass if type is bypass or hw */ - program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS); - else { - struct fixed31_32 arr_matrix[12]; - uint16_t arr_reg_val[12]; - - for (i = 0; i < 12; i++) - arr_matrix[i] = adjust->temperature_matrix[i]; - - convert_float_matrix( - arr_reg_val, arr_matrix, 12); - - program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF); - } -} - -static void read_gamut_remap(struct dcn10_dpp *dpp, - uint16_t *regval, - enum gamut_remap_select *select) -{ - struct color_matrices_reg gam_regs; - uint32_t selection; - - REG_GET(CM_GAMUT_REMAP_CONTROL, - CM_GAMUT_REMAP_MODE, &selection); - - *select = selection; - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; - - if (*select == GAMUT_REMAP_COEFF) { - - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); - - cm_helper_read_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - } else if (*select == GAMUT_REMAP_COMA_COEFF) { - - gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); - - cm_helper_read_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - } else if (*select == GAMUT_REMAP_COMB_COEFF) { - - gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); - - cm_helper_read_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - } -} - -void dpp1_cm_get_gamut_remap(struct dpp *dpp_base, - struct dpp_grph_csc_adjustment *adjust) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - uint16_t arr_reg_val[12]; - enum gamut_remap_select select; - - read_gamut_remap(dpp, arr_reg_val, &select); - - if (select == GAMUT_REMAP_BYPASS) { - adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; - return; - } - - adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; - convert_hw_matrix(adjust->temperature_matrix, - arr_reg_val, ARRAY_SIZE(arr_reg_val)); -} - -static void dpp1_cm_program_color_matrix( - struct dcn10_dpp *dpp, - const uint16_t *regval) -{ - uint32_t ocsc_mode; - uint32_t cur_mode; - struct color_matrices_reg gam_regs; - - if (regval == NULL) { - BREAK_TO_DEBUGGER(); - return; - } - - /* determine which CSC matrix (ocsc or comb) we are using - * currently. select the alternate set to double buffer - * the CSC update so CSC is updated on frame boundary - */ - REG_SET(CM_TEST_DEBUG_INDEX, 0, - CM_TEST_DEBUG_INDEX, 9); - - REG_GET(CM_TEST_DEBUG_DATA, - CM_TEST_DEBUG_DATA_ID9_OCSC_MODE, &cur_mode); - - if (cur_mode != 4) - ocsc_mode = 4; - else - ocsc_mode = 5; - - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12; - - if (ocsc_mode == 4) { - - gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34); - - } else { - - gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); - - } - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode); - -} - -void dpp1_cm_set_output_csc_default( - struct dpp *dpp_base, - enum dc_color_space colorspace) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - const uint16_t *regval = NULL; - int arr_size; - - regval = find_color_matrix(colorspace, &arr_size); - if (regval == NULL) { - BREAK_TO_DEBUGGER(); - return; - } - - dpp1_cm_program_color_matrix(dpp, regval); -} - -static void dpp1_cm_get_reg_field( - struct dcn10_dpp *dpp, - struct xfer_func_reg *reg) -{ - reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - - reg->shifts.field_region_end = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_B; - reg->masks.field_region_end = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_B; - reg->shifts.field_region_end_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; - reg->masks.field_region_end_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; - reg->shifts.field_region_end_base = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_BASE_B; - reg->masks.field_region_end_base = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_BASE_B; - reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; - reg->masks.field_region_linear_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; - reg->shifts.exp_region_start = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_B; - reg->masks.exp_region_start = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_B; - reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; - reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; -} - -static void dpp1_cm_get_degamma_reg_field( - struct dcn10_dpp *dpp, - struct xfer_func_reg *reg) -{ - reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - - reg->shifts.field_region_end = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_B; - reg->masks.field_region_end = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_B; - reg->shifts.field_region_end_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; - reg->masks.field_region_end_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; - reg->shifts.field_region_end_base = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_BASE_B; - reg->masks.field_region_end_base = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_BASE_B; - reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; - reg->masks.field_region_linear_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; - reg->shifts.exp_region_start = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_B; - reg->masks.exp_region_start = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_B; - reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; - reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; -} -void dpp1_cm_set_output_csc_adjustment( - struct dpp *dpp_base, - const uint16_t *regval) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - dpp1_cm_program_color_matrix(dpp, regval); -} - -void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base, - bool power_on) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_SET(CM_MEM_PWR_CTRL, 0, - RGAM_MEM_PWR_FORCE, power_on == true ? 0:1); - -} - -void dpp1_cm_program_regamma_lut(struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num) -{ - uint32_t i; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_SEQ_START(); - - for (i = 0 ; i < num; i++) { - REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg); - REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg); - REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].blue_reg); - - REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_red_reg); - REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_green_reg); - REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg); - - } - -} - -void dpp1_cm_configure_regamma_lut( - struct dpp *dpp_base, - bool is_ram_a) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK, - CM_RGAM_LUT_WRITE_EN_MASK, 7); - REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK, - CM_RGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1); - REG_SET(CM_RGAM_LUT_INDEX, 0, CM_RGAM_LUT_INDEX, 0); -} - -/*program re gamma RAM A*/ -void dpp1_cm_program_regamma_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - struct xfer_func_reg gam_regs; - - dpp1_cm_get_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_RGAM_RAMA_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_RGAM_RAMA_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_RGAM_RAMA_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMA_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMA_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMA_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMA_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMA_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMA_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMA_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMA_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMA_END_CNTL2_R); - gam_regs.region_start = REG(CM_RGAM_RAMA_REGION_0_1); - gam_regs.region_end = REG(CM_RGAM_RAMA_REGION_32_33); - - cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); - -} - -/*program re gamma RAM B*/ -void dpp1_cm_program_regamma_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - struct xfer_func_reg gam_regs; - - dpp1_cm_get_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_RGAM_RAMB_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_RGAM_RAMB_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_RGAM_RAMB_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMB_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMB_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMB_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMB_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMB_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMB_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMB_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMB_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMB_END_CNTL2_R); - gam_regs.region_start = REG(CM_RGAM_RAMB_REGION_0_1); - gam_regs.region_end = REG(CM_RGAM_RAMB_REGION_32_33); - - cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); -} - -void dpp1_program_input_csc( - struct dpp *dpp_base, - enum dc_color_space color_space, - enum dcn10_input_csc_select input_select, - const struct out_csc_color_matrix *tbl_entry) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - int i; - int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); - const uint16_t *regval = NULL; - uint32_t cur_select = 0; - enum dcn10_input_csc_select select; - struct color_matrices_reg gam_regs; - - if (input_select == INPUT_CSC_SELECT_BYPASS) { - REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); - return; - } - - if (tbl_entry == NULL) { - for (i = 0; i < arr_size; i++) - if (dpp_input_csc_matrix[i].color_space == color_space) { - regval = dpp_input_csc_matrix[i].regval; - break; - } - - if (regval == NULL) { - BREAK_TO_DEBUGGER(); - return; - } - } else { - regval = tbl_entry->regval; - } - - /* determine which CSC matrix (icsc or coma) we are using - * currently. select the alternate set to double buffer - * the CSC update so CSC is updated on frame boundary - */ - REG_SET(CM_TEST_DEBUG_INDEX, 0, - CM_TEST_DEBUG_INDEX, 9); - - REG_GET(CM_TEST_DEBUG_DATA, - CM_TEST_DEBUG_DATA_ID9_ICSC_MODE, &cur_select); - - if (cur_select != INPUT_CSC_SELECT_ICSC) - select = INPUT_CSC_SELECT_ICSC; - else - select = INPUT_CSC_SELECT_COMA; - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; - - if (select == INPUT_CSC_SELECT_ICSC) { - - gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); - - } else { - - gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); - - } - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - REG_SET(CM_ICSC_CONTROL, 0, - CM_ICSC_MODE, select); -} - -//keep here for now, decide multi dce support later -void dpp1_program_bias_and_scale( - struct dpp *dpp_base, - struct dc_bias_and_scale *params) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_SET_2(CM_BNS_VALUES_R, 0, - CM_BNS_SCALE_R, params->scale_red, - CM_BNS_BIAS_R, params->bias_red); - - REG_SET_2(CM_BNS_VALUES_G, 0, - CM_BNS_SCALE_G, params->scale_green, - CM_BNS_BIAS_G, params->bias_green); - - REG_SET_2(CM_BNS_VALUES_B, 0, - CM_BNS_SCALE_B, params->scale_blue, - CM_BNS_BIAS_B, params->bias_blue); - -} - -/*program de gamma RAM B*/ -void dpp1_program_degamma_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - struct xfer_func_reg gam_regs; - - dpp1_cm_get_degamma_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_DGAM_RAMB_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMB_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMB_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMB_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMB_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMB_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMB_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMB_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMB_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMB_END_CNTL2_R); - gam_regs.region_start = REG(CM_DGAM_RAMB_REGION_0_1); - gam_regs.region_end = REG(CM_DGAM_RAMB_REGION_14_15); - - - cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); -} - -/*program de gamma RAM A*/ -void dpp1_program_degamma_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - struct xfer_func_reg gam_regs; - - dpp1_cm_get_degamma_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_DGAM_RAMA_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMA_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMA_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMA_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMA_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMA_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMA_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMA_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMA_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMA_END_CNTL2_R); - gam_regs.region_start = REG(CM_DGAM_RAMA_REGION_0_1); - gam_regs.region_end = REG(CM_DGAM_RAMA_REGION_14_15); - - cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); -} - -void dpp1_power_on_degamma_lut( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_SET(CM_MEM_PWR_CTRL, 0, - SHARED_MEM_PWR_DIS, power_on ? 0:1); - -} - -static void dpp1_enable_cm_block( - struct dpp *dpp_base) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_UPDATE(CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, 8); - REG_UPDATE(CM_CONTROL, CM_BYPASS_EN, 0); -} - -void dpp1_set_degamma( - struct dpp *dpp_base, - enum ipp_degamma_mode mode) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - dpp1_enable_cm_block(dpp_base); - - switch (mode) { - case IPP_DEGAMMA_MODE_BYPASS: - /* Setting de gamma bypass for now */ - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0); - break; - case IPP_DEGAMMA_MODE_HW_sRGB: - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1); - break; - case IPP_DEGAMMA_MODE_HW_xvYCC: - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); - break; - case IPP_DEGAMMA_MODE_USER_PWL: - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); - break; - default: - BREAK_TO_DEBUGGER(); - break; - } - - REG_SEQ_SUBMIT(); - REG_SEQ_WAIT_DONE(); -} - -void dpp1_degamma_ram_select( - struct dpp *dpp_base, - bool use_ram_a) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - if (use_ram_a) - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); - else - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 4); - -} - -static bool dpp1_degamma_ram_inuse( - struct dpp *dpp_base, - bool *ram_a_inuse) -{ - bool ret = false; - uint32_t status_reg = 0; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, - &status_reg); - - if (status_reg == 9) { - *ram_a_inuse = true; - ret = true; - } else if (status_reg == 10) { - *ram_a_inuse = false; - ret = true; - } - return ret; -} - -void dpp1_program_degamma_lut( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num, - bool is_ram_a) -{ - uint32_t i; - - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, 0); - REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, - CM_DGAM_LUT_WRITE_EN_MASK, 7); - REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, - is_ram_a == true ? 0:1); - - REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0); - for (i = 0 ; i < num; i++) { - REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg); - - REG_SET(CM_DGAM_LUT_DATA, 0, - CM_DGAM_LUT_DATA, rgb[i].delta_red_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, - CM_DGAM_LUT_DATA, rgb[i].delta_green_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, - CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg); - } -} - -void dpp1_set_degamma_pwl(struct dpp *dpp_base, - const struct pwl_params *params) -{ - bool is_ram_a = true; - - dpp1_power_on_degamma_lut(dpp_base, true); - dpp1_enable_cm_block(dpp_base); - dpp1_degamma_ram_inuse(dpp_base, &is_ram_a); - if (is_ram_a == true) - dpp1_program_degamma_lutb_settings(dpp_base, params); - else - dpp1_program_degamma_luta_settings(dpp_base, params); - - dpp1_program_degamma_lut(dpp_base, params->rgb_resulted, - params->hw_points_num, !is_ram_a); - dpp1_degamma_ram_select(dpp_base, !is_ram_a); -} - -void dpp1_full_bypass(struct dpp *dpp_base) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - /* Input pixel format: ARGB8888 */ - REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, - CNVC_SURFACE_PIXEL_FORMAT, 0x8); - - /* Zero expansion */ - REG_SET_3(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_CONTROL__ALPHA_EN, 0, - FORMAT_EXPANSION_MODE, 0); - - /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */ - if (dpp->tf_mask->CM_BYPASS_EN) - REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1); - else - REG_SET(CM_CONTROL, 0, CM_BYPASS, 1); - - /* Setting degamma bypass for now */ - REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0); -} - -static bool dpp1_ingamma_ram_inuse(struct dpp *dpp_base, - bool *ram_a_inuse) -{ - bool in_use = false; - uint32_t status_reg = 0; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, - &status_reg); - - // 1 => IGAM_RAMA, 3 => IGAM_RAMA & DGAM_ROMA, 4 => IGAM_RAMA & DGAM_ROMB - if (status_reg == 1 || status_reg == 3 || status_reg == 4) { - *ram_a_inuse = true; - in_use = true; - // 2 => IGAM_RAMB, 5 => IGAM_RAMB & DGAM_ROMA, 6 => IGAM_RAMB & DGAM_ROMB - } else if (status_reg == 2 || status_reg == 5 || status_reg == 6) { - *ram_a_inuse = false; - in_use = true; - } - return in_use; -} - -/* - * Input gamma LUT currently supports 256 values only. This means input color - * can have a maximum of 8 bits per channel (= 256 possible values) in order to - * have a one-to-one mapping with the LUT. Truncation will occur with color - * values greater than 8 bits. - * - * In the future, this function should support additional input gamma methods, - * such as piecewise linear mapping, and input gamma bypass. - */ -void dpp1_program_input_lut( - struct dpp *dpp_base, - const struct dc_gamma *gamma) -{ - int i; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - bool rama_occupied = false; - uint32_t ram_num; - // Power on LUT memory. - REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 1); - dpp1_enable_cm_block(dpp_base); - // Determine whether to use RAM A or RAM B - dpp1_ingamma_ram_inuse(dpp_base, &rama_occupied); - if (!rama_occupied) - REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 0); - else - REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 1); - // RW mode is 256-entry LUT - REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, 0); - // IGAM Input format should be 8 bits per channel. - REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 0); - // Do not mask any R,G,B values - REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, 7); - // LUT-256, unsigned, integer, new u0.12 format - REG_UPDATE_3( - CM_IGAM_CONTROL, - CM_IGAM_LUT_FORMAT_R, 3, - CM_IGAM_LUT_FORMAT_G, 3, - CM_IGAM_LUT_FORMAT_B, 3); - // Start at index 0 of IGAM LUT - REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0); - for (i = 0; i < gamma->num_entries; i++) { - REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, - dc_fixpt_round( - gamma->entries.red[i])); - REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, - dc_fixpt_round( - gamma->entries.green[i])); - REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, - dc_fixpt_round( - gamma->entries.blue[i])); - } - // Power off LUT memory - REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 0); - // Enable IGAM LUT on ram we just wrote to. 2 => RAMA, 3 => RAMB - REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2); - REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num); -} - -void dpp1_set_hdr_multiplier( - struct dpp *dpp_base, - uint32_t multiplier) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c deleted file mode 100644 index 5ca9ab8a76..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ /dev/null @@ -1,696 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "core_types.h" - -#include "reg_helper.h" -#include "dcn10_dpp.h" -#include "basics/conversion.h" - - -#define NUM_PHASES 64 -#define HORZ_MAX_TAPS 8 -#define VERT_MAX_TAPS 8 - -#define BLACK_OFFSET_RGB_Y 0x0 -#define BLACK_OFFSET_CBCR 0x8000 - - -#define REG(reg)\ - dpp->tf_regs->reg - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - -enum dcn10_coef_filter_type_sel { - SCL_COEF_LUMA_VERT_FILTER = 0, - SCL_COEF_LUMA_HORZ_FILTER = 1, - SCL_COEF_CHROMA_VERT_FILTER = 2, - SCL_COEF_CHROMA_HORZ_FILTER = 3, - SCL_COEF_ALPHA_VERT_FILTER = 4, - SCL_COEF_ALPHA_HORZ_FILTER = 5 -}; - -enum dscl_autocal_mode { - AUTOCAL_MODE_OFF = 0, - - /* Autocal calculate the scaling ratio and initial phase and the - * DSCL_MODE_SEL must be set to 1 - */ - AUTOCAL_MODE_AUTOSCALE = 1, - /* Autocal perform auto centering without replication and the - * DSCL_MODE_SEL must be set to 0 - */ - AUTOCAL_MODE_AUTOCENTER = 2, - /* Autocal perform auto centering and auto replication and the - * DSCL_MODE_SEL must be set to 0 - */ - AUTOCAL_MODE_AUTOREPLICATE = 3 -}; - -enum dscl_mode_sel { - DSCL_MODE_SCALING_444_BYPASS = 0, - DSCL_MODE_SCALING_444_RGB_ENABLE = 1, - DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, - DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, - DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, - DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, - DSCL_MODE_DSCL_BYPASS = 6 -}; - -static int dpp1_dscl_get_pixel_depth_val(enum lb_pixel_depth depth) -{ - if (depth == LB_PIXEL_DEPTH_30BPP) - return 0; /* 10 bpc */ - else if (depth == LB_PIXEL_DEPTH_24BPP) - return 1; /* 8 bpc */ - else if (depth == LB_PIXEL_DEPTH_18BPP) - return 2; /* 6 bpc */ - else if (depth == LB_PIXEL_DEPTH_36BPP) - return 3; /* 12 bpc */ - else { - ASSERT(0); - return -1; /* Unsupported */ - } -} - -static bool dpp1_dscl_is_video_format(enum pixel_format format) -{ - if (format >= PIXEL_FORMAT_VIDEO_BEGIN - && format <= PIXEL_FORMAT_VIDEO_END) - return true; - else - return false; -} - -static bool dpp1_dscl_is_420_format(enum pixel_format format) -{ - if (format == PIXEL_FORMAT_420BPP8 || - format == PIXEL_FORMAT_420BPP10) - return true; - else - return false; -} - -static enum dscl_mode_sel dpp1_dscl_get_dscl_mode( - struct dpp *dpp_base, - const struct scaler_data *data, - bool dbg_always_scale) -{ - const long long one = dc_fixpt_one.value; - - if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) { - /* DSCL is processing data in fixed format */ - if (data->format == PIXEL_FORMAT_FP16) - return DSCL_MODE_DSCL_BYPASS; - } - - if (data->ratios.horz.value == one - && data->ratios.vert.value == one - && data->ratios.horz_c.value == one - && data->ratios.vert_c.value == one - && !dbg_always_scale) - return DSCL_MODE_SCALING_444_BYPASS; - - if (!dpp1_dscl_is_420_format(data->format)) { - if (dpp1_dscl_is_video_format(data->format)) - return DSCL_MODE_SCALING_444_YCBCR_ENABLE; - else - return DSCL_MODE_SCALING_444_RGB_ENABLE; - } - if (data->ratios.horz.value == one && data->ratios.vert.value == one) - return DSCL_MODE_SCALING_420_LUMA_BYPASS; - if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one) - return DSCL_MODE_SCALING_420_CHROMA_BYPASS; - - return DSCL_MODE_SCALING_420_YCBCR_ENABLE; -} - -static void dpp1_power_on_dscl( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - if (dpp->tf_regs->DSCL_MEM_PWR_CTRL) { - if (power_on) { - REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 0); - REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 5); - } else { - if (dpp->base.ctx->dc->debug.enable_mem_low_power.bits.dscl) { - dpp->base.ctx->dc->optimized_required = true; - dpp->base.deferred_reg_writes.bits.disable_dscl = true; - } else { - REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3); - } - } - } -} - - -static void dpp1_dscl_set_lb( - struct dcn10_dpp *dpp, - const struct line_buffer_params *lb_params, - enum lb_memory_config mem_size_config) -{ - uint32_t max_partitions = 63; /* Currently hardcoded on all ASICs before DCN 3.2 */ - - /* LB */ - if (dpp->base.caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) { - /* DSCL caps: pixel data processed in fixed format */ - uint32_t pixel_depth = dpp1_dscl_get_pixel_depth_val(lb_params->depth); - uint32_t dyn_pix_depth = lb_params->dynamic_pixel_depth; - - REG_SET_7(LB_DATA_FORMAT, 0, - PIXEL_DEPTH, pixel_depth, /* Pixel depth stored in LB */ - PIXEL_EXPAN_MODE, lb_params->pixel_expan_mode, /* Pixel expansion mode */ - PIXEL_REDUCE_MODE, 1, /* Pixel reduction mode: Rounding */ - DYNAMIC_PIXEL_DEPTH, dyn_pix_depth, /* Dynamic expansion pixel depth */ - DITHER_EN, 0, /* Dithering enable: Disabled */ - INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ - LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ - } else { - /* DSCL caps: pixel data processed in float format */ - REG_SET_2(LB_DATA_FORMAT, 0, - INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ - LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ - } - - if (dpp->base.caps->max_lb_partitions == 31) - max_partitions = 31; - - REG_SET_2(LB_MEMORY_CTRL, 0, - MEMORY_CONFIG, mem_size_config, - LB_MAX_PARTITIONS, max_partitions); -} - -static const uint16_t *dpp1_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio) -{ - if (taps == 8) - return get_filter_8tap_64p(ratio); - else if (taps == 7) - return get_filter_7tap_64p(ratio); - else if (taps == 6) - return get_filter_6tap_64p(ratio); - else if (taps == 5) - return get_filter_5tap_64p(ratio); - else if (taps == 4) - return get_filter_4tap_64p(ratio); - else if (taps == 3) - return get_filter_3tap_64p(ratio); - else if (taps == 2) - return get_filter_2tap_64p(); - else if (taps == 1) - return NULL; - else { - /* should never happen, bug */ - BREAK_TO_DEBUGGER(); - return NULL; - } -} - -static void dpp1_dscl_set_scaler_filter( - struct dcn10_dpp *dpp, - uint32_t taps, - enum dcn10_coef_filter_type_sel filter_type, - const uint16_t *filter) -{ - const int tap_pairs = (taps + 1) / 2; - int phase; - int pair; - uint16_t odd_coef, even_coef; - - REG_SET_3(SCL_COEF_RAM_TAP_SELECT, 0, - SCL_COEF_RAM_TAP_PAIR_IDX, 0, - SCL_COEF_RAM_PHASE, 0, - SCL_COEF_RAM_FILTER_TYPE, filter_type); - - for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) { - for (pair = 0; pair < tap_pairs; pair++) { - even_coef = filter[phase * taps + 2 * pair]; - if ((pair * 2 + 1) < taps) - odd_coef = filter[phase * taps + 2 * pair + 1]; - else - odd_coef = 0; - - REG_SET_4(SCL_COEF_RAM_TAP_DATA, 0, - /* Even tap coefficient (bits 1:0 fixed to 0) */ - SCL_COEF_RAM_EVEN_TAP_COEF, even_coef, - /* Write/read control for even coefficient */ - SCL_COEF_RAM_EVEN_TAP_COEF_EN, 1, - /* Odd tap coefficient (bits 1:0 fixed to 0) */ - SCL_COEF_RAM_ODD_TAP_COEF, odd_coef, - /* Write/read control for odd coefficient */ - SCL_COEF_RAM_ODD_TAP_COEF_EN, 1); - } - } - -} - -static void dpp1_dscl_set_scl_filter( - struct dcn10_dpp *dpp, - const struct scaler_data *scl_data, - bool chroma_coef_mode) -{ - bool h_2tap_hardcode_coef_en = false; - bool v_2tap_hardcode_coef_en = false; - bool h_2tap_sharp_en = false; - bool v_2tap_sharp_en = false; - uint32_t h_2tap_sharp_factor = scl_data->sharpness.horz; - uint32_t v_2tap_sharp_factor = scl_data->sharpness.vert; - bool coef_ram_current; - const uint16_t *filter_h = NULL; - const uint16_t *filter_v = NULL; - const uint16_t *filter_h_c = NULL; - const uint16_t *filter_v_c = NULL; - - h_2tap_hardcode_coef_en = scl_data->taps.h_taps < 3 - && scl_data->taps.h_taps_c < 3 - && (scl_data->taps.h_taps > 1 && scl_data->taps.h_taps_c > 1); - v_2tap_hardcode_coef_en = scl_data->taps.v_taps < 3 - && scl_data->taps.v_taps_c < 3 - && (scl_data->taps.v_taps > 1 && scl_data->taps.v_taps_c > 1); - - h_2tap_sharp_en = h_2tap_hardcode_coef_en && h_2tap_sharp_factor != 0; - v_2tap_sharp_en = v_2tap_hardcode_coef_en && v_2tap_sharp_factor != 0; - - REG_UPDATE_6(DSCL_2TAP_CONTROL, - SCL_H_2TAP_HARDCODE_COEF_EN, h_2tap_hardcode_coef_en, - SCL_H_2TAP_SHARP_EN, h_2tap_sharp_en, - SCL_H_2TAP_SHARP_FACTOR, h_2tap_sharp_factor, - SCL_V_2TAP_HARDCODE_COEF_EN, v_2tap_hardcode_coef_en, - SCL_V_2TAP_SHARP_EN, v_2tap_sharp_en, - SCL_V_2TAP_SHARP_FACTOR, v_2tap_sharp_factor); - - if (!v_2tap_hardcode_coef_en || !h_2tap_hardcode_coef_en) { - bool filter_updated = false; - - filter_h = dpp1_dscl_get_filter_coeffs_64p( - scl_data->taps.h_taps, scl_data->ratios.horz); - filter_v = dpp1_dscl_get_filter_coeffs_64p( - scl_data->taps.v_taps, scl_data->ratios.vert); - - filter_updated = (filter_h && (filter_h != dpp->filter_h)) - || (filter_v && (filter_v != dpp->filter_v)); - - if (chroma_coef_mode) { - filter_h_c = dpp1_dscl_get_filter_coeffs_64p( - scl_data->taps.h_taps_c, scl_data->ratios.horz_c); - filter_v_c = dpp1_dscl_get_filter_coeffs_64p( - scl_data->taps.v_taps_c, scl_data->ratios.vert_c); - filter_updated = filter_updated || (filter_h_c && (filter_h_c != dpp->filter_h_c)) - || (filter_v_c && (filter_v_c != dpp->filter_v_c)); - } - - if (filter_updated) { - uint32_t scl_mode = REG_READ(SCL_MODE); - - if (!h_2tap_hardcode_coef_en && filter_h) { - dpp1_dscl_set_scaler_filter( - dpp, scl_data->taps.h_taps, - SCL_COEF_LUMA_HORZ_FILTER, filter_h); - } - dpp->filter_h = filter_h; - if (!v_2tap_hardcode_coef_en && filter_v) { - dpp1_dscl_set_scaler_filter( - dpp, scl_data->taps.v_taps, - SCL_COEF_LUMA_VERT_FILTER, filter_v); - } - dpp->filter_v = filter_v; - if (chroma_coef_mode) { - if (!h_2tap_hardcode_coef_en && filter_h_c) { - dpp1_dscl_set_scaler_filter( - dpp, scl_data->taps.h_taps_c, - SCL_COEF_CHROMA_HORZ_FILTER, filter_h_c); - } - if (!v_2tap_hardcode_coef_en && filter_v_c) { - dpp1_dscl_set_scaler_filter( - dpp, scl_data->taps.v_taps_c, - SCL_COEF_CHROMA_VERT_FILTER, filter_v_c); - } - } - dpp->filter_h_c = filter_h_c; - dpp->filter_v_c = filter_v_c; - - coef_ram_current = get_reg_field_value_ex( - scl_mode, dpp->tf_mask->SCL_COEF_RAM_SELECT_CURRENT, - dpp->tf_shift->SCL_COEF_RAM_SELECT_CURRENT); - - /* Swap coefficient RAM and set chroma coefficient mode */ - REG_SET_2(SCL_MODE, scl_mode, - SCL_COEF_RAM_SELECT, !coef_ram_current, - SCL_CHROMA_COEF_MODE, chroma_coef_mode); - } - } -} - -static int dpp1_dscl_get_lb_depth_bpc(enum lb_pixel_depth depth) -{ - if (depth == LB_PIXEL_DEPTH_30BPP) - return 10; - else if (depth == LB_PIXEL_DEPTH_24BPP) - return 8; - else if (depth == LB_PIXEL_DEPTH_18BPP) - return 6; - else if (depth == LB_PIXEL_DEPTH_36BPP) - return 12; - else { - BREAK_TO_DEBUGGER(); - return -1; /* Unsupported */ - } -} - -void dpp1_dscl_calc_lb_num_partitions( - const struct scaler_data *scl_data, - enum lb_memory_config lb_config, - int *num_part_y, - int *num_part_c) -{ - int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a, - lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a; - - int line_size = scl_data->viewport.width < scl_data->recout.width ? - scl_data->viewport.width : scl_data->recout.width; - int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? - scl_data->viewport_c.width : scl_data->recout.width; - - if (line_size == 0) - line_size = 1; - - if (line_size_c == 0) - line_size_c = 1; - - - lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth); - memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */ - memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */ - memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ - - if (lb_config == LB_MEMORY_CONFIG_1) { - lb_memory_size = 816; - lb_memory_size_c = 816; - lb_memory_size_a = 984; - } else if (lb_config == LB_MEMORY_CONFIG_2) { - lb_memory_size = 1088; - lb_memory_size_c = 1088; - lb_memory_size_a = 1312; - } else if (lb_config == LB_MEMORY_CONFIG_3) { - /* 420 mode: using 3rd mem from Y, Cr and Cb */ - lb_memory_size = 816 + 1088 + 848 + 848 + 848; - lb_memory_size_c = 816 + 1088; - lb_memory_size_a = 984 + 1312 + 456; - } else { - lb_memory_size = 816 + 1088 + 848; - lb_memory_size_c = 816 + 1088 + 848; - lb_memory_size_a = 984 + 1312 + 456; - } - *num_part_y = lb_memory_size / memory_line_size_y; - *num_part_c = lb_memory_size_c / memory_line_size_c; - num_partitions_a = lb_memory_size_a / memory_line_size_a; - - if (scl_data->lb_params.alpha_en - && (num_partitions_a < *num_part_y)) - *num_part_y = num_partitions_a; - - if (*num_part_y > 64) - *num_part_y = 64; - if (*num_part_c > 64) - *num_part_c = 64; - -} - -bool dpp1_dscl_is_lb_conf_valid(int ceil_vratio, int num_partitions, int vtaps) -{ - if (ceil_vratio > 2) - return vtaps <= (num_partitions - ceil_vratio + 2); - else - return vtaps <= num_partitions; -} - -/*find first match configuration which meets the min required lb size*/ -static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *dpp, - const struct scaler_data *scl_data) -{ - int num_part_y, num_part_c; - int vtaps = scl_data->taps.v_taps; - int vtaps_c = scl_data->taps.v_taps_c; - int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert); - int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c); - - if (dpp->base.ctx->dc->debug.use_max_lb) { - if (scl_data->format == PIXEL_FORMAT_420BPP8 - || scl_data->format == PIXEL_FORMAT_420BPP10) - return LB_MEMORY_CONFIG_3; - return LB_MEMORY_CONFIG_0; - } - - dpp->base.caps->dscl_calc_lb_num_partitions( - scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c); - - if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) - && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)) - return LB_MEMORY_CONFIG_1; - - dpp->base.caps->dscl_calc_lb_num_partitions( - scl_data, LB_MEMORY_CONFIG_2, &num_part_y, &num_part_c); - - if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) - && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)) - return LB_MEMORY_CONFIG_2; - - if (scl_data->format == PIXEL_FORMAT_420BPP8 - || scl_data->format == PIXEL_FORMAT_420BPP10) { - dpp->base.caps->dscl_calc_lb_num_partitions( - scl_data, LB_MEMORY_CONFIG_3, &num_part_y, &num_part_c); - - if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) - && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)) - return LB_MEMORY_CONFIG_3; - } - - dpp->base.caps->dscl_calc_lb_num_partitions( - scl_data, LB_MEMORY_CONFIG_0, &num_part_y, &num_part_c); - - /*Ensure we can support the requested number of vtaps*/ - ASSERT(dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) - && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)); - - return LB_MEMORY_CONFIG_0; -} - - -static void dpp1_dscl_set_manual_ratio_init( - struct dcn10_dpp *dpp, const struct scaler_data *data) -{ - uint32_t init_frac = 0; - uint32_t init_int = 0; - - REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0, - SCL_H_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.horz) << 5); - - REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0, - SCL_V_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.vert) << 5); - - REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0, - SCL_H_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.horz_c) << 5); - - REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0, - SCL_V_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.vert_c) << 5); - - /* - * 0.24 format for fraction, first five bits zeroed - */ - init_frac = dc_fixpt_u0d19(data->inits.h) << 5; - init_int = dc_fixpt_floor(data->inits.h); - REG_SET_2(SCL_HORZ_FILTER_INIT, 0, - SCL_H_INIT_FRAC, init_frac, - SCL_H_INIT_INT, init_int); - - init_frac = dc_fixpt_u0d19(data->inits.h_c) << 5; - init_int = dc_fixpt_floor(data->inits.h_c); - REG_SET_2(SCL_HORZ_FILTER_INIT_C, 0, - SCL_H_INIT_FRAC_C, init_frac, - SCL_H_INIT_INT_C, init_int); - - init_frac = dc_fixpt_u0d19(data->inits.v) << 5; - init_int = dc_fixpt_floor(data->inits.v); - REG_SET_2(SCL_VERT_FILTER_INIT, 0, - SCL_V_INIT_FRAC, init_frac, - SCL_V_INIT_INT, init_int); - - if (REG(SCL_VERT_FILTER_INIT_BOT)) { - struct fixed31_32 bot = dc_fixpt_add(data->inits.v, data->ratios.vert); - - init_frac = dc_fixpt_u0d19(bot) << 5; - init_int = dc_fixpt_floor(bot); - REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0, - SCL_V_INIT_FRAC_BOT, init_frac, - SCL_V_INIT_INT_BOT, init_int); - } - - init_frac = dc_fixpt_u0d19(data->inits.v_c) << 5; - init_int = dc_fixpt_floor(data->inits.v_c); - REG_SET_2(SCL_VERT_FILTER_INIT_C, 0, - SCL_V_INIT_FRAC_C, init_frac, - SCL_V_INIT_INT_C, init_int); - - if (REG(SCL_VERT_FILTER_INIT_BOT_C)) { - struct fixed31_32 bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c); - - init_frac = dc_fixpt_u0d19(bot) << 5; - init_int = dc_fixpt_floor(bot); - REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0, - SCL_V_INIT_FRAC_BOT_C, init_frac, - SCL_V_INIT_INT_BOT_C, init_int); - } -} - -/** - * dpp1_dscl_set_recout - Set the first pixel of RECOUT in the OTG active area - * - * @dpp: DPP data struct - * @recout: Rectangle information - * - * This function sets the MPC RECOUT_START and RECOUT_SIZE registers based on - * the values specified in the recount parameter. - * - * Note: This function only have effect if AutoCal is disabled. - */ -static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, - const struct rect *recout) -{ - REG_SET_2(RECOUT_START, 0, - /* First pixel of RECOUT in the active OTG area */ - RECOUT_START_X, recout->x, - /* First line of RECOUT in the active OTG area */ - RECOUT_START_Y, recout->y); - - REG_SET_2(RECOUT_SIZE, 0, - /* Number of RECOUT horizontal pixels */ - RECOUT_WIDTH, recout->width, - /* Number of RECOUT vertical lines */ - RECOUT_HEIGHT, recout->height); -} - -/** - * dpp1_dscl_set_scaler_manual_scale - Manually program scaler and line buffer - * - * @dpp_base: High level DPP struct - * @scl_data: scalaer_data info - * - * This is the primary function to program scaler and line buffer in manual - * scaling mode. To execute the required operations for manual scale, we need - * to disable AutoCal first. - */ -void dpp1_dscl_set_scaler_manual_scale(struct dpp *dpp_base, - const struct scaler_data *scl_data) -{ - enum lb_memory_config lb_config; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode( - dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale); - bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN - && scl_data->format <= PIXEL_FORMAT_VIDEO_END; - - if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0) - return; - - PERF_TRACE(); - - dpp->scl_data = *scl_data; - - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) { - if (dscl_mode != DSCL_MODE_DSCL_BYPASS) - dpp1_power_on_dscl(dpp_base, true); - } - - /* Autocal off */ - REG_SET_3(DSCL_AUTOCAL, 0, - AUTOCAL_MODE, AUTOCAL_MODE_OFF, - AUTOCAL_NUM_PIPE, 0, - AUTOCAL_PIPE_ID, 0); - - /*clean scaler boundary mode when Autocal off*/ - REG_SET(DSCL_CONTROL, 0, - SCL_BOUNDARY_MODE, 0); - - /* Recout */ - dpp1_dscl_set_recout(dpp, &scl_data->recout); - - /* MPC Size */ - REG_SET_2(MPC_SIZE, 0, - /* Number of horizontal pixels of MPC */ - MPC_WIDTH, scl_data->h_active, - /* Number of vertical lines of MPC */ - MPC_HEIGHT, scl_data->v_active); - - /* SCL mode */ - REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode); - - if (dscl_mode == DSCL_MODE_DSCL_BYPASS) { - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) - dpp1_power_on_dscl(dpp_base, false); - return; - } - - /* LB */ - lb_config = dpp1_dscl_find_lb_memory_config(dpp, scl_data); - dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config); - - if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) - return; - - /* Black offsets */ - if (REG(SCL_BLACK_OFFSET)) { - if (ycbcr) - REG_SET_2(SCL_BLACK_OFFSET, 0, - SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, - SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR); - else - - REG_SET_2(SCL_BLACK_OFFSET, 0, - SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, - SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y); - } - - /* Manually calculate scale ratio and init values */ - dpp1_dscl_set_manual_ratio_init(dpp, scl_data); - - /* HTaps/VTaps */ - REG_SET_4(SCL_TAP_CONTROL, 0, - SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1, - SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1, - SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1, - SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1); - - dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr); - PERF_TRACE(); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index d51f1ce028..6dd355a030 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -130,7 +130,7 @@ bool hubbub1_verify_allow_pstate_change_high( static unsigned int max_sampled_pstate_wait_us; /* data collection */ static bool forced_pstate_allow; /* help with revert wa */ - unsigned int debug_data; + unsigned int debug_data = 0; unsigned int i; if (forced_pstate_allow) { @@ -242,7 +242,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub) bool hubbub1_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -356,7 +356,7 @@ bool hubbub1_program_urgent_watermarks( bool hubbub1_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -501,7 +501,7 @@ bool hubbub1_program_stutter_watermarks( bool hubbub1_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -582,7 +582,7 @@ bool hubbub1_program_pstate_watermarks( bool hubbub1_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index 4201b76270..d1f9e63944 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -409,7 +409,7 @@ struct dcn10_hubbub { const struct dcn_hubbub_shift *shifts; const struct dcn_hubbub_mask *masks; unsigned int debug_test_index_pstate; - struct dcn_watermark_set watermarks; + union dcn_watermark_set watermarks; }; void hubbub1_update_dchub( @@ -423,7 +423,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub); bool hubbub1_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); @@ -446,17 +446,17 @@ void hubbub1_construct(struct hubbub *hubbub, bool hubbub1_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub1_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub1_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 09784222cc..69119b2fdc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -692,6 +692,7 @@ struct dcn_hubp_state { uint32_t primary_meta_addr_hi; uint32_t uclk_pstate_force; uint32_t hubp_cntl; + uint32_t flip_control; }; struct dcn10_hubp { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index 9033b39e0e..c51b717e56 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -392,7 +392,7 @@ static unsigned int dcn10_get_mpcc_states(struct dc *dc, char *pBuf, unsigned in remaining_buffer -= chars_printed; pBuf += chars_printed; - for (i = 0; i < pool->pipe_count; i++) { + for (i = 0; i < pool->mpcc_count; i++) { struct mpcc_state s = {0}; pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index 377f1ba1a8..4d0eed7598 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -1439,7 +1439,6 @@ enum signal_type dcn10_get_dig_mode( default: return SIGNAL_TYPE_NONE; } - return SIGNAL_TYPE_NONE; } void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index d980e6bd6c..b7a89c39f4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -167,7 +167,6 @@ struct dcn10_link_enc_registers { uint32_t DIO_LINKD_CNTL; uint32_t DIO_LINKE_CNTL; uint32_t DIO_LINKF_CNTL; - uint32_t DIG_FIFO_CTRL0; uint32_t DIO_CLK_CNTL; uint32_t DIG_BE_CLK_CNTL; }; @@ -475,9 +474,6 @@ struct dcn10_link_enc_registers { type HPO_DP_ENC_SEL;\ type HPO_HDMI_ENC_SEL -#define DCN32_LINK_ENCODER_REG_FIELD_LIST(type) \ - type DIG_FIFO_OUTPUT_PIXEL_MODE - #define DCN35_LINK_ENCODER_REG_FIELD_LIST(type) \ type DIG_BE_ENABLE;\ type DIG_RB_SWITCH_EN;\ @@ -512,7 +508,6 @@ struct dcn10_link_enc_shift { DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t); DCN30_LINK_ENCODER_REG_FIELD_LIST(uint8_t); DCN31_LINK_ENCODER_REG_FIELD_LIST(uint8_t); - DCN32_LINK_ENCODER_REG_FIELD_LIST(uint8_t); DCN35_LINK_ENCODER_REG_FIELD_LIST(uint8_t); }; @@ -521,7 +516,6 @@ struct dcn10_link_enc_mask { DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t); DCN30_LINK_ENCODER_REG_FIELD_LIST(uint32_t); DCN31_LINK_ENCODER_REG_FIELD_LIST(uint32_t); - DCN32_LINK_ENCODER_REG_FIELD_LIST(uint32_t); DCN35_LINK_ENCODER_REG_FIELD_LIST(uint32_t); }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index 5838a11efd..71e9288d60 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -168,6 +168,10 @@ static void opp1_set_pixel_encoding( case PIXEL_ENCODING_RGB: case PIXEL_ENCODING_YCBCR444: + REG_UPDATE_3(FMT_CONTROL, + FMT_PIXEL_ENCODING, 0, + FMT_SUBSAMPLING_MODE, 0, + FMT_CBCR_BIT_REDUCTION_BYPASS, 0); REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0); break; case PIXEL_ENCODING_YCBCR422: @@ -177,7 +181,10 @@ static void opp1_set_pixel_encoding( FMT_CBCR_BIT_REDUCTION_BYPASS, 0); break; case PIXEL_ENCODING_YCBCR420: - REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2); + REG_UPDATE_3(FMT_CONTROL, + FMT_PIXEL_ENCODING, 2, + FMT_SUBSAMPLING_MODE, 2, + FMT_CBCR_BIT_REDUCTION_BYPASS, 1); break; default: break; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h index 2c0ecfa5a6..c87de68a50 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h @@ -79,6 +79,8 @@ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh), \ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh), \ OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh), \ + OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh), \ + OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh), \ OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh), \ OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh), \ OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh), \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index c429590f12..1b96972b9d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -127,7 +127,6 @@ struct dcn10_stream_enc_registers { uint32_t AFMT_60958_1; uint32_t AFMT_60958_2; uint32_t DIG_FE_CNTL; - uint32_t DIG_FE_CNTL2; uint32_t DIG_FIFO_STATUS; uint32_t DP_MSE_RATE_CNTL; uint32_t DP_MSE_RATE_UPDATE; @@ -570,7 +569,7 @@ struct dcn10_stream_enc_registers { type DP_SEC_GSP11_ENABLE;\ type DP_SEC_GSP11_LINE_NUM -#define SE_REG_FIELD_LIST_DCN3_2(type) \ +#define SE_REG_FIELD_LIST_DCN3_1_COMMON(type) \ type DIG_FIFO_OUTPUT_PIXEL_MODE;\ type DP_PIXEL_PER_CYCLE_PROCESSING_MODE;\ type DIG_SYMCLK_FE_ON;\ @@ -599,7 +598,7 @@ struct dcn10_stream_encoder_shift { uint8_t HDMI_ACP_SEND; SE_REG_FIELD_LIST_DCN2_0(uint8_t); SE_REG_FIELD_LIST_DCN3_0(uint8_t); - SE_REG_FIELD_LIST_DCN3_2(uint8_t); + SE_REG_FIELD_LIST_DCN3_1_COMMON(uint8_t); SE_REG_FIELD_LIST_DCN3_5_COMMON(uint8_t); }; @@ -608,7 +607,7 @@ struct dcn10_stream_encoder_mask { uint32_t HDMI_ACP_SEND; SE_REG_FIELD_LIST_DCN2_0(uint32_t); SE_REG_FIELD_LIST_DCN3_0(uint32_t); - SE_REG_FIELD_LIST_DCN3_2(uint32_t); + SE_REG_FIELD_LIST_DCN3_1_COMMON(uint32_t); SE_REG_FIELD_LIST_DCN3_5_COMMON(uint32_t); }; @@ -667,9 +666,6 @@ void enc1_stream_encoder_send_immediate_sdp_message( void enc1_stream_encoder_stop_dp_info_packets( struct stream_encoder *enc); -void enc1_stream_encoder_reset_fifo( - struct stream_encoder *enc); - void enc1_stream_encoder_dp_blank( struct dc_link *link, struct stream_encoder *enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index 3dae3943b0..9b6070c997 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -2,7 +2,7 @@ # # Makefile for DCN. -DCN20 = dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \ +DCN20 = dcn20_hubp.o \ dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \ dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \ dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c deleted file mode 100644 index 1516c0a487..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ /dev/null @@ -1,435 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "core_types.h" - -#include "reg_helper.h" -#include "dcn20_dpp.h" -#include "basics/conversion.h" - -#define NUM_PHASES 64 -#define HORZ_MAX_TAPS 8 -#define VERT_MAX_TAPS 8 - -#define BLACK_OFFSET_RGB_Y 0x0 -#define BLACK_OFFSET_CBCR 0x8000 - -#define REG(reg)\ - dpp->tf_regs->reg - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - -void dpp20_read_state(struct dpp *dpp_base, - struct dcn_dpp_state *s) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_GET(DPP_CONTROL, - DPP_CLOCK_ENABLE, &s->is_enabled); - - // Degamma LUT (RAM) - REG_GET(CM_DGAM_CONTROL, - CM_DGAM_LUT_MODE, &s->dgam_lut_mode); - - // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size) - REG_GET(CM_SHAPER_CONTROL, - CM_SHAPER_LUT_MODE, &s->shaper_lut_mode); - REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL, - CM_3DLUT_CONFIG_STATUS, &s->lut3d_mode, - CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth); - REG_GET(CM_3DLUT_MODE, - CM_3DLUT_SIZE, &s->lut3d_size); - - // Blend/Out Gamma (RAM) - REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK, - CM_BLNDGAM_CONFIG_STATUS, &s->rgam_lut_mode); -} - -void dpp2_power_on_obuf( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE(CM_MEM_PWR_CTRL, SHARED_MEM_PWR_DIS, power_on == true ? 1:0); - - REG_UPDATE(OBUF_MEM_PWR_CTRL, - OBUF_MEM_PWR_FORCE, power_on == true ? 0:1); - - REG_UPDATE(DSCL_MEM_PWR_CTRL, - LUT_MEM_PWR_FORCE, power_on == true ? 0:1); -} - -void dpp2_dummy_program_input_lut( - struct dpp *dpp_base, - const struct dc_gamma *gamma) -{} - -static void dpp2_cnv_setup ( - struct dpp *dpp_base, - enum surface_pixel_format format, - enum expansion_mode mode, - struct dc_csc_transform input_csc_color_matrix, - enum dc_color_space input_color_space, - struct cnv_alpha_2bit_lut *alpha_2bit_lut) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - uint32_t pixel_format = 0; - uint32_t alpha_en = 1; - enum dc_color_space color_space = COLOR_SPACE_SRGB; - enum dcn20_input_csc_select select = DCN2_ICSC_SELECT_BYPASS; - bool force_disable_cursor = false; - struct out_csc_color_matrix tbl_entry; - uint32_t is_2bit = 0; - int i = 0; - - REG_SET_2(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_EXPANSION_MODE, mode); - - //hardcode default - //FORMAT_CONTROL. FORMAT_CNV16 default 0: U0.16/S.1.15; 1: U1.15/ S.1.14 - //FORMAT_CONTROL. CNVC_BYPASS_MSB_ALIGN default 0: disabled 1: enabled - //FORMAT_CONTROL. CLAMP_POSITIVE default 0: disabled 1: enabled - //FORMAT_CONTROL. CLAMP_POSITIVE_C default 0: disabled 1: enabled - REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0); - REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0); - REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0); - REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0); - - switch (format) { - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - pixel_format = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - pixel_format = 3; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: - pixel_format = 8; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: - pixel_format = 10; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: - force_disable_cursor = false; - pixel_format = 65; - color_space = COLOR_SPACE_YCBCR709; - select = DCN2_ICSC_SELECT_ICSC_A; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: - force_disable_cursor = true; - pixel_format = 64; - color_space = COLOR_SPACE_YCBCR709; - select = DCN2_ICSC_SELECT_ICSC_A; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: - force_disable_cursor = true; - pixel_format = 67; - color_space = COLOR_SPACE_YCBCR709; - select = DCN2_ICSC_SELECT_ICSC_A; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: - force_disable_cursor = true; - pixel_format = 66; - color_space = COLOR_SPACE_YCBCR709; - select = DCN2_ICSC_SELECT_ICSC_A; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: - pixel_format = 26; /* ARGB16161616_UNORM */ - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: - pixel_format = 24; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - pixel_format = 25; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: - pixel_format = 12; - color_space = COLOR_SPACE_YCBCR709; - select = DCN2_ICSC_SELECT_ICSC_A; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: - pixel_format = 112; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: - pixel_format = 113; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: - pixel_format = 114; - color_space = COLOR_SPACE_YCBCR709; - select = DCN2_ICSC_SELECT_ICSC_A; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: - pixel_format = 115; - color_space = COLOR_SPACE_YCBCR709; - select = DCN2_ICSC_SELECT_ICSC_A; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: - pixel_format = 118; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: - pixel_format = 119; - alpha_en = 0; - break; - default: - break; - } - - /* Set default color space based on format if none is given. */ - color_space = input_color_space ? input_color_space : color_space; - - if (is_2bit == 1 && alpha_2bit_lut != NULL) { - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3); - } - - REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, - CNVC_SURFACE_PIXEL_FORMAT, pixel_format); - REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); - - // if input adjustments exist, program icsc with those values - if (input_csc_color_matrix.enable_adjustment - == true) { - for (i = 0; i < 12; i++) - tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; - - tbl_entry.color_space = input_color_space; - - if (color_space >= COLOR_SPACE_YCBCR601) - select = DCN2_ICSC_SELECT_ICSC_A; - else - select = DCN2_ICSC_SELECT_BYPASS; - - dpp2_program_input_csc(dpp_base, color_space, select, &tbl_entry); - } else - dpp2_program_input_csc(dpp_base, color_space, select, NULL); - - if (force_disable_cursor) { - REG_UPDATE(CURSOR_CONTROL, - CURSOR_ENABLE, 0); - REG_UPDATE(CURSOR0_CONTROL, - CUR0_ENABLE, 0); - - } - dpp2_power_on_obuf(dpp_base, true); - -} - -/*compute the maximum number of lines that we can fit in the line buffer*/ -void dscl2_calc_lb_num_partitions( - const struct scaler_data *scl_data, - enum lb_memory_config lb_config, - int *num_part_y, - int *num_part_c) -{ - int memory_line_size_y, memory_line_size_c, memory_line_size_a, - lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a; - - int line_size = scl_data->viewport.width < scl_data->recout.width ? - scl_data->viewport.width : scl_data->recout.width; - int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? - scl_data->viewport_c.width : scl_data->recout.width; - - if (line_size == 0) - line_size = 1; - - if (line_size_c == 0) - line_size_c = 1; - - memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */ - memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */ - memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ - - if (lb_config == LB_MEMORY_CONFIG_1) { - lb_memory_size = 970; - lb_memory_size_c = 970; - lb_memory_size_a = 970; - } else if (lb_config == LB_MEMORY_CONFIG_2) { - lb_memory_size = 1290; - lb_memory_size_c = 1290; - lb_memory_size_a = 1290; - } else if (lb_config == LB_MEMORY_CONFIG_3) { - /* 420 mode: using 3rd mem from Y, Cr and Cb */ - lb_memory_size = 970 + 1290 + 484 + 484 + 484; - lb_memory_size_c = 970 + 1290; - lb_memory_size_a = 970 + 1290 + 484; - } else { - lb_memory_size = 970 + 1290 + 484; - lb_memory_size_c = 970 + 1290 + 484; - lb_memory_size_a = 970 + 1290 + 484; - } - *num_part_y = lb_memory_size / memory_line_size_y; - *num_part_c = lb_memory_size_c / memory_line_size_c; - num_partitions_a = lb_memory_size_a / memory_line_size_a; - - if (scl_data->lb_params.alpha_en - && (num_partitions_a < *num_part_y)) - *num_part_y = num_partitions_a; - - if (*num_part_y > 64) - *num_part_y = 64; - if (*num_part_c > 64) - *num_part_c = 64; -} - -void dpp2_cnv_set_alpha_keyer( - struct dpp *dpp_base, - struct cnv_color_keyer_params *color_keyer) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE(COLOR_KEYER_CONTROL, COLOR_KEYER_EN, color_keyer->color_keyer_en); - - REG_UPDATE(COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, color_keyer->color_keyer_mode); - - REG_UPDATE(COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, color_keyer->color_keyer_alpha_low); - REG_UPDATE(COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, color_keyer->color_keyer_alpha_high); - - REG_UPDATE(COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, color_keyer->color_keyer_red_low); - REG_UPDATE(COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, color_keyer->color_keyer_red_high); - - REG_UPDATE(COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, color_keyer->color_keyer_green_low); - REG_UPDATE(COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, color_keyer->color_keyer_green_high); - - REG_UPDATE(COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, color_keyer->color_keyer_blue_low); - REG_UPDATE(COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, color_keyer->color_keyer_blue_high); -} - -void dpp2_set_cursor_attributes( - struct dpp *dpp_base, - struct dc_cursor_attributes *cursor_attributes) -{ - enum dc_cursor_color_format color_format = cursor_attributes->color_format; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - int cur_rom_en = 0; - - if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || - color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { - if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { - cur_rom_en = 1; - } - } - - REG_UPDATE_3(CURSOR0_CONTROL, - CUR0_MODE, color_format, - CUR0_EXPANSION_MODE, 0, - CUR0_ROM_EN, cur_rom_en); - - if (color_format == CURSOR_MODE_MONO) { - /* todo: clarify what to program these to */ - REG_UPDATE(CURSOR0_COLOR0, - CUR0_COLOR0, 0x00000000); - REG_UPDATE(CURSOR0_COLOR1, - CUR0_COLOR1, 0xFFFFFFFF); - } -} - -void oppn20_dummy_program_regamma_pwl( - struct dpp *dpp, - const struct pwl_params *params, - enum opp_regamma mode) -{} - -static struct dpp_funcs dcn20_dpp_funcs = { - .dpp_read_state = dpp20_read_state, - .dpp_reset = dpp_reset, - .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, - .dpp_set_gamut_remap = dpp2_cm_set_gamut_remap, - .dpp_set_csc_adjustment = NULL, - .dpp_set_csc_default = NULL, - .dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl, - .dpp_set_degamma = dpp2_set_degamma, - .dpp_program_input_lut = dpp2_dummy_program_input_lut, - .dpp_full_bypass = dpp1_full_bypass, - .dpp_setup = dpp2_cnv_setup, - .dpp_program_degamma_pwl = dpp2_set_degamma_pwl, - .dpp_program_blnd_lut = dpp20_program_blnd_lut, - .dpp_program_shaper_lut = dpp20_program_shaper, - .dpp_program_3dlut = dpp20_program_3dlut, - .dpp_program_bias_and_scale = NULL, - .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, - .set_cursor_attributes = dpp2_set_cursor_attributes, - .set_cursor_position = dpp1_set_cursor_position, - .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, - .dpp_dppclk_control = dpp1_dppclk_control, - .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier, - .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap, -}; - -static struct dpp_caps dcn20_dpp_cap = { - .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, - .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions, -}; - -bool dpp2_construct( - struct dcn20_dpp *dpp, - struct dc_context *ctx, - uint32_t inst, - const struct dcn2_dpp_registers *tf_regs, - const struct dcn2_dpp_shift *tf_shift, - const struct dcn2_dpp_mask *tf_mask) -{ - dpp->base.ctx = ctx; - - dpp->base.inst = inst; - dpp->base.funcs = &dcn20_dpp_funcs; - dpp->base.caps = &dcn20_dpp_cap; - - dpp->tf_regs = tf_regs; - dpp->tf_shift = tf_shift; - dpp->tf_mask = tf_mask; - - dpp->lb_pixel_depth_supported = - LB_PIXEL_DEPTH_18BPP | - LB_PIXEL_DEPTH_24BPP | - LB_PIXEL_DEPTH_30BPP | - LB_PIXEL_DEPTH_36BPP; - - dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY; - dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/ - - return true; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h deleted file mode 100644 index 672cde46c4..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h +++ /dev/null @@ -1,781 +0,0 @@ -/* Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DCN20_DPP_H__ -#define __DCN20_DPP_H__ - -#include "dcn10/dcn10_dpp.h" - -#define TO_DCN20_DPP(dpp)\ - container_of(dpp, struct dcn20_dpp, base) - -#define TF_REG_LIST_DCN20_COMMON_UPDATED(id) \ - SRI(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id) - -#define TF_REG_LIST_DCN20_COMMON(id) \ - SRI(CM_BLNDGAM_CONTROL, CM, id), \ - SRI(CM_BLNDGAM_RAMB_START_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMB_START_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMB_START_CNTL_R, CM, id), \ - SRI(CM_BLNDGAM_RAMB_END_CNTL1_B, CM, id), \ - SRI(CM_BLNDGAM_RAMB_END_CNTL2_B, CM, id), \ - SRI(CM_BLNDGAM_RAMB_END_CNTL1_G, CM, id), \ - SRI(CM_BLNDGAM_RAMB_END_CNTL2_G, CM, id), \ - SRI(CM_BLNDGAM_RAMB_END_CNTL1_R, CM, id), \ - SRI(CM_BLNDGAM_RAMB_END_CNTL2_R, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_0_1, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_2_3, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_4_5, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_6_7, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_8_9, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_10_11, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_12_13, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_14_15, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_16_17, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_18_19, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_20_21, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_22_23, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_24_25, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_26_27, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_28_29, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_30_31, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_32_33, CM, id), \ - SRI(CM_BLNDGAM_RAMA_START_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMA_START_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMA_START_CNTL_R, CM, id), \ - SRI(CM_BLNDGAM_RAMA_END_CNTL1_B, CM, id), \ - SRI(CM_BLNDGAM_RAMA_END_CNTL2_B, CM, id), \ - SRI(CM_BLNDGAM_RAMA_END_CNTL1_G, CM, id), \ - SRI(CM_BLNDGAM_RAMA_END_CNTL2_G, CM, id), \ - SRI(CM_BLNDGAM_RAMA_END_CNTL1_R, CM, id), \ - SRI(CM_BLNDGAM_RAMA_END_CNTL2_R, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_0_1, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_2_3, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_4_5, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_6_7, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_8_9, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_10_11, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_12_13, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_14_15, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_16_17, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_18_19, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_20_21, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_22_23, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_24_25, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_26_27, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_28_29, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_30_31, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_32_33, CM, id), \ - SRI(CM_BLNDGAM_LUT_INDEX, CM, id), \ - SRI(CM_BLNDGAM_LUT_DATA, CM, id), \ - SRI(CM_3DLUT_MODE, CM, id), \ - SRI(CM_3DLUT_INDEX, CM, id), \ - SRI(CM_3DLUT_DATA, CM, id), \ - SRI(CM_3DLUT_DATA_30BIT, CM, id), \ - SRI(CM_3DLUT_READ_WRITE_CONTROL, CM, id), \ - SRI(CM_SHAPER_LUT_WRITE_EN_MASK, CM, id), \ - SRI(CM_SHAPER_CONTROL, CM, id), \ - SRI(CM_SHAPER_RAMB_START_CNTL_B, CM, id), \ - SRI(CM_SHAPER_RAMB_START_CNTL_G, CM, id), \ - SRI(CM_SHAPER_RAMB_START_CNTL_R, CM, id), \ - SRI(CM_SHAPER_RAMB_END_CNTL_B, CM, id), \ - SRI(CM_SHAPER_RAMB_END_CNTL_G, CM, id), \ - SRI(CM_SHAPER_RAMB_END_CNTL_R, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_0_1, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_2_3, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_4_5, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_6_7, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_8_9, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_10_11, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_12_13, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_14_15, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_16_17, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_18_19, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_20_21, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_22_23, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_24_25, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_26_27, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_28_29, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_30_31, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_32_33, CM, id), \ - SRI(CM_SHAPER_RAMA_START_CNTL_B, CM, id), \ - SRI(CM_SHAPER_RAMA_START_CNTL_G, CM, id), \ - SRI(CM_SHAPER_RAMA_START_CNTL_R, CM, id), \ - SRI(CM_SHAPER_RAMA_END_CNTL_B, CM, id), \ - SRI(CM_SHAPER_RAMA_END_CNTL_G, CM, id), \ - SRI(CM_SHAPER_RAMA_END_CNTL_R, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_0_1, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_2_3, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_4_5, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_6_7, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_8_9, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_10_11, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_12_13, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_14_15, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_16_17, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_18_19, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_20_21, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_22_23, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_24_25, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_26_27, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_28_29, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_30_31, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \ - SRI(CM_SHAPER_LUT_INDEX, CM, id) - -#define TF_REG_LIST_DCN20_COMMON_APPEND(id) \ - SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\ - SRI(CM_ICSC_B_C11_C12, CM, id), \ - SRI(CM_ICSC_B_C33_C34, CM, id) - -#define TF_REG_LIST_DCN20(id) \ - TF_REG_LIST_DCN(id), \ - TF_REG_LIST_DCN20_COMMON(id), \ - TF_REG_LIST_DCN20_COMMON_UPDATED(id), \ - SRI(CURSOR_CONTROL, CURSOR0_, id), \ - SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \ - SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \ - SRI(FCNV_FP_BIAS_G, CNVC_CFG, id), \ - SRI(FCNV_FP_BIAS_B, CNVC_CFG, id), \ - SRI(FCNV_FP_SCALE_R, CNVC_CFG, id), \ - SRI(FCNV_FP_SCALE_G, CNVC_CFG, id), \ - SRI(FCNV_FP_SCALE_B, CNVC_CFG, id), \ - SRI(COLOR_KEYER_CONTROL, CNVC_CFG, id), \ - SRI(COLOR_KEYER_ALPHA, CNVC_CFG, id), \ - SRI(COLOR_KEYER_RED, CNVC_CFG, id), \ - SRI(COLOR_KEYER_GREEN, CNVC_CFG, id), \ - SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \ - SRI(CM_SHAPER_LUT_DATA, CM, id), \ - SRI(CURSOR_CONTROL, CURSOR0_, id),\ - SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\ - SRI(DSCL_MEM_PWR_CTRL, DSCL, id) - - -#define TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh)\ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_LUT_MODE, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh) - - -#define TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh)\ - TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_INDEX, CM_BLNDGAM_LUT_INDEX, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_DATA, CM_BLNDGAM_LUT_DATA, mask_sh), \ - TF_SF(CM0_CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, mask_sh), \ - TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \ - TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_SIZE, mask_sh), \ - TF_SF(CM0_CM_3DLUT_INDEX, CM_3DLUT_INDEX, mask_sh), \ - TF_SF(CM0_CM_3DLUT_DATA, CM_3DLUT_DATA0, mask_sh), \ - TF_SF(CM0_CM_3DLUT_DATA, CM_3DLUT_DATA1, mask_sh), \ - TF_SF(CM0_CM_3DLUT_DATA_30BIT, CM_3DLUT_DATA_30BIT, mask_sh), \ - TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mask_sh), \ - TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, mask_sh), \ - TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_READ_SEL, mask_sh), \ - TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_INDEX, CM_SHAPER_LUT_INDEX, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh) - - -#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\ - TF_REG_LIST_SH_MASK_DCN(mask_sh), \ - TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \ - TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh), \ - TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, mask_sh), \ - TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CNV16, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE_C, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_EN, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIX_INV_MODE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIXEL_ALPHA_MOD_EN, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh),\ - TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\ - TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh) - -/* DPP CM debug status register: - * - * Status index including current ICSC, Gamut Remap Mode is 9 - * ICSC Mode: [4..3] - * Gamut Remap Mode: [10..9] - */ -#define CM_TEST_DEBUG_DATA_STATUS_IDX 9 - -#define TF_DEBUG_REG_LIST_SH_DCN20 \ - TF_DEBUG_REG_LIST_SH_DCN10, \ - .CM_TEST_DEBUG_DATA_ICSC_MODE = 3, \ - .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 9 - -#define TF_DEBUG_REG_LIST_MASK_DCN20 \ - TF_DEBUG_REG_LIST_MASK_DCN10, \ - .CM_TEST_DEBUG_DATA_ICSC_MODE = 0x18, \ - .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 0x600 - -#define TF_REG_FIELD_LIST_DCN2_0(type) \ - TF_REG_FIELD_LIST(type) \ - type CM_BLNDGAM_LUT_DATA; \ - type CM_TEST_DEBUG_DATA_ICSC_MODE; \ - type CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE; \ - type FORMAT_CNV16; \ - type CNVC_BYPASS_MSB_ALIGN; \ - type CLAMP_POSITIVE; \ - type CLAMP_POSITIVE_C; \ - type ALPHA_2BIT_LUT0; \ - type ALPHA_2BIT_LUT1; \ - type ALPHA_2BIT_LUT2; \ - type ALPHA_2BIT_LUT3; \ - type FCNV_FP_BIAS_R; \ - type FCNV_FP_BIAS_G; \ - type FCNV_FP_BIAS_B; \ - type FCNV_FP_SCALE_R; \ - type FCNV_FP_SCALE_G; \ - type FCNV_FP_SCALE_B; \ - type COLOR_KEYER_EN; \ - type COLOR_KEYER_MODE; \ - type COLOR_KEYER_ALPHA_LOW; \ - type COLOR_KEYER_ALPHA_HIGH; \ - type COLOR_KEYER_RED_LOW; \ - type COLOR_KEYER_RED_HIGH; \ - type COLOR_KEYER_GREEN_LOW; \ - type COLOR_KEYER_GREEN_HIGH; \ - type COLOR_KEYER_BLUE_LOW; \ - type COLOR_KEYER_BLUE_HIGH; \ - type CUR0_PIX_INV_MODE; \ - type CUR0_PIXEL_ALPHA_MOD_EN; \ - type CUR0_ROM_EN;\ - type OBUF_MEM_PWR_FORCE - - -struct dcn2_dpp_shift { - TF_REG_FIELD_LIST_DCN2_0(uint8_t); -}; - -struct dcn2_dpp_mask { - TF_REG_FIELD_LIST_DCN2_0(uint32_t); -}; - -#define DPP_DCN2_REG_VARIABLE_LIST \ - DPP_COMMON_REG_VARIABLE_LIST \ - uint32_t CM_BLNDGAM_LUT_DATA; \ - uint32_t ALPHA_2BIT_LUT; \ - uint32_t FCNV_FP_BIAS_R; \ - uint32_t FCNV_FP_BIAS_G; \ - uint32_t FCNV_FP_BIAS_B; \ - uint32_t FCNV_FP_SCALE_R; \ - uint32_t FCNV_FP_SCALE_G; \ - uint32_t FCNV_FP_SCALE_B; \ - uint32_t COLOR_KEYER_CONTROL; \ - uint32_t COLOR_KEYER_ALPHA; \ - uint32_t COLOR_KEYER_RED; \ - uint32_t COLOR_KEYER_GREEN; \ - uint32_t COLOR_KEYER_BLUE; \ - uint32_t OBUF_MEM_PWR_CTRL - -#define DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND \ - uint32_t CM_GAMUT_REMAP_B_C11_C12; \ - uint32_t CM_GAMUT_REMAP_B_C13_C14; \ - uint32_t CM_GAMUT_REMAP_B_C21_C22; \ - uint32_t CM_GAMUT_REMAP_B_C23_C24; \ - uint32_t CM_GAMUT_REMAP_B_C31_C32; \ - uint32_t CM_GAMUT_REMAP_B_C33_C34; \ - uint32_t CM_ICSC_B_C11_C12; \ - uint32_t CM_ICSC_B_C33_C34 - -struct dcn2_dpp_registers { - DPP_DCN2_REG_VARIABLE_LIST; - DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND; -}; - -struct dcn20_dpp { - struct dpp base; - - const struct dcn2_dpp_registers *tf_regs; - const struct dcn2_dpp_shift *tf_shift; - const struct dcn2_dpp_mask *tf_mask; - - const uint16_t *filter_v; - const uint16_t *filter_h; - const uint16_t *filter_v_c; - const uint16_t *filter_h_c; - int lb_pixel_depth_supported; - int lb_memory_size; - int lb_bits_per_entry; - bool is_write_to_ram_a_safe; - struct scaler_data scl_data; - struct pwl_params pwl_data; -}; - -enum dcn20_input_csc_select { - DCN2_ICSC_SELECT_BYPASS = 0, - DCN2_ICSC_SELECT_ICSC_A = 1, - DCN2_ICSC_SELECT_ICSC_B = 2 -}; - -enum dcn20_gamut_remap_select { - DCN2_GAMUT_REMAP_BYPASS = 0, - DCN2_GAMUT_REMAP_COEF_A = 1, - DCN2_GAMUT_REMAP_COEF_B = 2 -}; - -void dpp20_read_state(struct dpp *dpp_base, - struct dcn_dpp_state *s); - -void dpp2_set_degamma_pwl( - struct dpp *dpp_base, - const struct pwl_params *params); - -void dpp2_set_degamma( - struct dpp *dpp_base, - enum ipp_degamma_mode mode); - -void dpp2_cm_set_gamut_remap( - struct dpp *dpp_base, - const struct dpp_grph_csc_adjustment *adjust); - -void dpp2_program_input_csc( - struct dpp *dpp_base, - enum dc_color_space color_space, - enum dcn20_input_csc_select input_select, - const struct out_csc_color_matrix *tbl_entry); - -bool dpp20_program_blnd_lut( - struct dpp *dpp_base, const struct pwl_params *params); - -bool dpp20_program_shaper( - struct dpp *dpp_base, - const struct pwl_params *params); - -bool dpp20_program_3dlut( - struct dpp *dpp_base, - struct tetrahedral_params *params); - -void dpp2_cnv_set_alpha_keyer( - struct dpp *dpp_base, - struct cnv_color_keyer_params *color_keyer); - -void dscl2_calc_lb_num_partitions( - const struct scaler_data *scl_data, - enum lb_memory_config lb_config, - int *num_part_y, - int *num_part_c); - -void dpp2_set_cursor_attributes( - struct dpp *dpp_base, - struct dc_cursor_attributes *cursor_attributes); - -void dpp2_dummy_program_input_lut( - struct dpp *dpp_base, - const struct dc_gamma *gamma); - -void oppn20_dummy_program_regamma_pwl( - struct dpp *dpp, - const struct pwl_params *params, - enum opp_regamma mode); - -void dpp2_set_hdr_multiplier( - struct dpp *dpp_base, - uint32_t multiplier); - -bool dpp2_construct(struct dcn20_dpp *dpp2, - struct dc_context *ctx, - uint32_t inst, - const struct dcn2_dpp_registers *tf_regs, - const struct dcn2_dpp_shift *tf_shift, - const struct dcn2_dpp_mask *tf_mask); - -void dpp2_power_on_obuf( - struct dpp *dpp_base, - bool power_on); - -void dpp2_cm_get_gamut_remap(struct dpp *dpp_base, - struct dpp_grph_csc_adjustment *adjust); -#endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c deleted file mode 100644 index 58dc69926e..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c +++ /dev/null @@ -1,1202 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "core_types.h" - -#include "reg_helper.h" -#include "dcn20_dpp.h" -#include "basics/conversion.h" - -#include "dcn10/dcn10_cm_common.h" - -#define REG(reg)\ - dpp->tf_regs->reg - -#define IND_REG(index) \ - (index) - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - - -static void dpp2_enable_cm_block( - struct dpp *dpp_base) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - unsigned int cm_bypass_mode = 0; - //Temp, put CM in bypass mode - if (dpp_base->ctx->dc->debug.cm_in_bypass) - cm_bypass_mode = 1; - - REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode); -} - - -static bool dpp2_degamma_ram_inuse( - struct dpp *dpp_base, - bool *ram_a_inuse) -{ - bool ret = false; - uint32_t status_reg = 0; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_GET(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, - &status_reg); - - if (status_reg == 3) { - *ram_a_inuse = true; - ret = true; - } else if (status_reg == 4) { - *ram_a_inuse = false; - ret = true; - } - return ret; -} - -static void dpp2_program_degamma_lut( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num, - bool is_ram_a) -{ - uint32_t i; - - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, - CM_DGAM_LUT_WRITE_EN_MASK, 7); - REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, - is_ram_a == true ? 0:1); - - REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0); - for (i = 0 ; i < num; i++) { - REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg); - - REG_SET(CM_DGAM_LUT_DATA, 0, - CM_DGAM_LUT_DATA, rgb[i].delta_red_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, - CM_DGAM_LUT_DATA, rgb[i].delta_green_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, - CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg); - - } - -} - -void dpp2_set_degamma_pwl( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - bool is_ram_a = true; - - dpp1_power_on_degamma_lut(dpp_base, true); - dpp2_enable_cm_block(dpp_base); - dpp2_degamma_ram_inuse(dpp_base, &is_ram_a); - if (is_ram_a == true) - dpp1_program_degamma_lutb_settings(dpp_base, params); - else - dpp1_program_degamma_luta_settings(dpp_base, params); - - dpp2_program_degamma_lut(dpp_base, params->rgb_resulted, params->hw_points_num, !is_ram_a); - dpp1_degamma_ram_select(dpp_base, !is_ram_a); -} - -void dpp2_set_degamma( - struct dpp *dpp_base, - enum ipp_degamma_mode mode) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - dpp2_enable_cm_block(dpp_base); - - switch (mode) { - case IPP_DEGAMMA_MODE_BYPASS: - /* Setting de gamma bypass for now */ - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0); - break; - case IPP_DEGAMMA_MODE_HW_sRGB: - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1); - break; - case IPP_DEGAMMA_MODE_HW_xvYCC: - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); - break; - case IPP_DEGAMMA_MODE_USER_PWL: - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); - break; - default: - BREAK_TO_DEBUGGER(); - break; - } -} - -static void program_gamut_remap( - struct dcn20_dpp *dpp, - const uint16_t *regval, - enum dcn20_gamut_remap_select select) -{ - uint32_t cur_select = 0; - struct color_matrices_reg gam_regs; - - if (regval == NULL || select == DCN2_GAMUT_REMAP_BYPASS) { - REG_SET(CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, 0); - return; - } - - /* determine which gamut_remap coefficients (A or B) we are using - * currently. select the alternate set to double buffer - * the update so gamut_remap is updated on frame boundary - */ - IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, - CM_TEST_DEBUG_DATA_STATUS_IDX, - CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &cur_select); - - /* value stored in dbg reg will be 1 greater than mode we want */ - if (cur_select != DCN2_GAMUT_REMAP_COEF_A) - select = DCN2_GAMUT_REMAP_COEF_A; - else - select = DCN2_GAMUT_REMAP_COEF_B; - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; - - if (select == DCN2_GAMUT_REMAP_COEF_A) { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); - } else { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); - } - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - REG_SET( - CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, select); - -} - -void dpp2_cm_set_gamut_remap( - struct dpp *dpp_base, - const struct dpp_grph_csc_adjustment *adjust) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - int i = 0; - - if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) - /* Bypass if type is bypass or hw */ - program_gamut_remap(dpp, NULL, DCN2_GAMUT_REMAP_BYPASS); - else { - struct fixed31_32 arr_matrix[12]; - uint16_t arr_reg_val[12]; - - for (i = 0; i < 12; i++) - arr_matrix[i] = adjust->temperature_matrix[i]; - - convert_float_matrix( - arr_reg_val, arr_matrix, 12); - - program_gamut_remap(dpp, arr_reg_val, DCN2_GAMUT_REMAP_COEF_A); - } -} - -static void read_gamut_remap(struct dcn20_dpp *dpp, - uint16_t *regval, - enum dcn20_gamut_remap_select *select) -{ - struct color_matrices_reg gam_regs; - uint32_t selection; - - IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, - CM_TEST_DEBUG_DATA_STATUS_IDX, - CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &selection); - - *select = selection; - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; - - if (*select == DCN2_GAMUT_REMAP_COEF_A) { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); - - cm_helper_read_color_matrices(dpp->base.ctx, - regval, - &gam_regs); - - } else if (*select == DCN2_GAMUT_REMAP_COEF_B) { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); - - cm_helper_read_color_matrices(dpp->base.ctx, - regval, - &gam_regs); - } -} - -void dpp2_cm_get_gamut_remap(struct dpp *dpp_base, - struct dpp_grph_csc_adjustment *adjust) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - uint16_t arr_reg_val[12]; - enum dcn20_gamut_remap_select select; - - read_gamut_remap(dpp, arr_reg_val, &select); - - if (select == DCN2_GAMUT_REMAP_BYPASS) { - adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; - return; - } - - adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; - convert_hw_matrix(adjust->temperature_matrix, - arr_reg_val, ARRAY_SIZE(arr_reg_val)); -} - -void dpp2_program_input_csc( - struct dpp *dpp_base, - enum dc_color_space color_space, - enum dcn20_input_csc_select input_select, - const struct out_csc_color_matrix *tbl_entry) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - int i; - int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); - const uint16_t *regval = NULL; - uint32_t cur_select = 0; - enum dcn20_input_csc_select select; - struct color_matrices_reg icsc_regs; - - if (input_select == DCN2_ICSC_SELECT_BYPASS) { - REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); - return; - } - - if (tbl_entry == NULL) { - for (i = 0; i < arr_size; i++) - if (dpp_input_csc_matrix[i].color_space == color_space) { - regval = dpp_input_csc_matrix[i].regval; - break; - } - - if (regval == NULL) { - BREAK_TO_DEBUGGER(); - return; - } - } else { - regval = tbl_entry->regval; - } - - /* determine which CSC coefficients (A or B) we are using - * currently. select the alternate set to double buffer - * the CSC update so CSC is updated on frame boundary - */ - IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, - CM_TEST_DEBUG_DATA_STATUS_IDX, - CM_TEST_DEBUG_DATA_ICSC_MODE, &cur_select); - - if (cur_select != DCN2_ICSC_SELECT_ICSC_A) - select = DCN2_ICSC_SELECT_ICSC_A; - else - select = DCN2_ICSC_SELECT_ICSC_B; - - icsc_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; - icsc_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11; - icsc_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; - icsc_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; - - if (select == DCN2_ICSC_SELECT_ICSC_A) { - - icsc_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); - icsc_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); - - } else { - - icsc_regs.csc_c11_c12 = REG(CM_ICSC_B_C11_C12); - icsc_regs.csc_c33_c34 = REG(CM_ICSC_B_C33_C34); - - } - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &icsc_regs); - - REG_SET(CM_ICSC_CONTROL, 0, - CM_ICSC_MODE, select); -} - -static void dpp20_power_on_blnd_lut( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_SET(CM_MEM_PWR_CTRL, 0, - BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0:1); - -} - -static void dpp20_configure_blnd_lut( - struct dpp *dpp_base, - bool is_ram_a) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE(CM_BLNDGAM_LUT_WRITE_EN_MASK, - CM_BLNDGAM_LUT_WRITE_EN_MASK, 7); - REG_UPDATE(CM_BLNDGAM_LUT_WRITE_EN_MASK, - CM_BLNDGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1); - REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); -} - -static void dpp20_program_blnd_pwl( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num) -{ - uint32_t i; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - for (i = 0 ; i < num; i++) { - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg); - - REG_SET(CM_BLNDGAM_LUT_DATA, 0, - CM_BLNDGAM_LUT_DATA, rgb[i].delta_red_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, - CM_BLNDGAM_LUT_DATA, rgb[i].delta_green_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, - CM_BLNDGAM_LUT_DATA, rgb[i].delta_blue_reg); - - } - -} - -static void dcn20_dpp_cm_get_reg_field( - struct dcn20_dpp *dpp, - struct xfer_func_reg *reg) -{ - reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - - reg->shifts.field_region_end = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_B; - reg->masks.field_region_end = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_B; - reg->shifts.field_region_end_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; - reg->masks.field_region_end_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; - reg->shifts.field_region_end_base = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; - reg->masks.field_region_end_base = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; - reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; - reg->masks.field_region_linear_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; - reg->shifts.exp_region_start = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_B; - reg->masks.exp_region_start = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_B; - reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; - reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; -} - -/*program blnd lut RAM A*/ -static void dpp20_program_blnd_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - struct xfer_func_reg gam_regs; - - dcn20_dpp_cm_get_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMA_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMA_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMA_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMA_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMA_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMA_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMA_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMA_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMA_END_CNTL2_R); - gam_regs.region_start = REG(CM_BLNDGAM_RAMA_REGION_0_1); - gam_regs.region_end = REG(CM_BLNDGAM_RAMA_REGION_32_33); - - cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); -} - -/*program blnd lut RAM B*/ -static void dpp20_program_blnd_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - struct xfer_func_reg gam_regs; - - dcn20_dpp_cm_get_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMB_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMB_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMB_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMB_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMB_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMB_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMB_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMB_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMB_END_CNTL2_R); - gam_regs.region_start = REG(CM_BLNDGAM_RAMB_REGION_0_1); - gam_regs.region_end = REG(CM_BLNDGAM_RAMB_REGION_32_33); - - cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); -} - -static enum dc_lut_mode dpp20_get_blndgam_current(struct dpp *dpp_base) -{ - enum dc_lut_mode mode; - uint32_t state_mode; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, &state_mode); - - switch (state_mode) { - case 0: - mode = LUT_BYPASS; - break; - case 1: - mode = LUT_RAM_A; - break; - case 2: - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - - return mode; -} - -bool dpp20_program_blnd_lut( - struct dpp *dpp_base, const struct pwl_params *params) -{ - enum dc_lut_mode current_mode; - enum dc_lut_mode next_mode; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - if (params == NULL) { - REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_LUT_MODE, 0); - return false; - } - current_mode = dpp20_get_blndgam_current(dpp_base); - if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) - next_mode = LUT_RAM_B; - else - next_mode = LUT_RAM_A; - - dpp20_power_on_blnd_lut(dpp_base, true); - dpp20_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A); - - if (next_mode == LUT_RAM_A) - dpp20_program_blnd_luta_settings(dpp_base, params); - else - dpp20_program_blnd_lutb_settings(dpp_base, params); - - dpp20_program_blnd_pwl( - dpp_base, params->rgb_resulted, params->hw_points_num); - - REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_LUT_MODE, - next_mode == LUT_RAM_A ? 1:2); - - return true; -} - - -static void dpp20_program_shaper_lut( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num) -{ - uint32_t i, red, green, blue; - uint32_t red_delta, green_delta, blue_delta; - uint32_t red_value, green_value, blue_value; - - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - for (i = 0 ; i < num; i++) { - - red = rgb[i].red_reg; - green = rgb[i].green_reg; - blue = rgb[i].blue_reg; - - red_delta = rgb[i].delta_red_reg; - green_delta = rgb[i].delta_green_reg; - blue_delta = rgb[i].delta_blue_reg; - - red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff); - green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff); - blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff); - - REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, red_value); - REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, green_value); - REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, blue_value); - } - -} - -static enum dc_lut_mode dpp20_get_shaper_current(struct dpp *dpp_base) -{ - enum dc_lut_mode mode; - uint32_t state_mode; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_GET(CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, &state_mode); - - switch (state_mode) { - case 0: - mode = LUT_BYPASS; - break; - case 1: - mode = LUT_RAM_A; - break; - case 2: - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - - return mode; -} - -static void dpp20_configure_shaper_lut( - struct dpp *dpp_base, - bool is_ram_a) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, - CM_SHAPER_LUT_WRITE_EN_MASK, 7); - REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, - CM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1); - REG_SET(CM_SHAPER_LUT_INDEX, 0, CM_SHAPER_LUT_INDEX, 0); -} - -/*program shaper RAM A*/ - -static void dpp20_program_shaper_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - const struct gamma_curve *curve; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_SET_2(CM_SHAPER_RAMA_START_CNTL_B, 0, - CM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); - REG_SET_2(CM_SHAPER_RAMA_START_CNTL_G, 0, - CM_SHAPER_RAMA_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, 0); - REG_SET_2(CM_SHAPER_RAMA_START_CNTL_R, 0, - CM_SHAPER_RAMA_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, 0); - - REG_SET_2(CM_SHAPER_RAMA_END_CNTL_B, 0, - CM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMA_END_CNTL_G, 0, - CM_SHAPER_RAMA_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMA_END_CNTL_R, 0, - CM_SHAPER_RAMA_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); - - curve = params->arr_curve_points; - REG_SET_4(CM_SHAPER_RAMA_REGION_0_1, 0, - CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_2_3, 0, - CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_4_5, 0, - CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_6_7, 0, - CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_8_9, 0, - CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_10_11, 0, - CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_12_13, 0, - CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_14_15, 0, - CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_16_17, 0, - CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_18_19, 0, - CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_20_21, 0, - CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_22_23, 0, - CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_24_25, 0, - CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_26_27, 0, - CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_28_29, 0, - CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_30_31, 0, - CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_32_33, 0, - CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); -} - -/*program shaper RAM B*/ -static void dpp20_program_shaper_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - const struct gamma_curve *curve; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_SET_2(CM_SHAPER_RAMB_START_CNTL_B, 0, - CM_SHAPER_RAMB_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, 0); - REG_SET_2(CM_SHAPER_RAMB_START_CNTL_G, 0, - CM_SHAPER_RAMB_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, 0); - REG_SET_2(CM_SHAPER_RAMB_START_CNTL_R, 0, - CM_SHAPER_RAMB_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, 0); - - REG_SET_2(CM_SHAPER_RAMB_END_CNTL_B, 0, - CM_SHAPER_RAMB_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMB_END_CNTL_G, 0, - CM_SHAPER_RAMB_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMB_END_CNTL_R, 0, - CM_SHAPER_RAMB_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); - - curve = params->arr_curve_points; - REG_SET_4(CM_SHAPER_RAMB_REGION_0_1, 0, - CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_2_3, 0, - CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_4_5, 0, - CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_6_7, 0, - CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_8_9, 0, - CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_10_11, 0, - CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_12_13, 0, - CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_14_15, 0, - CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_16_17, 0, - CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_18_19, 0, - CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_20_21, 0, - CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_22_23, 0, - CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_24_25, 0, - CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_26_27, 0, - CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_28_29, 0, - CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_30_31, 0, - CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_32_33, 0, - CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); - -} - - -bool dpp20_program_shaper( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - enum dc_lut_mode current_mode; - enum dc_lut_mode next_mode; - - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - if (params == NULL) { - REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0); - return false; - } - current_mode = dpp20_get_shaper_current(dpp_base); - - if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) - next_mode = LUT_RAM_B; - else - next_mode = LUT_RAM_A; - - dpp20_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A); - - if (next_mode == LUT_RAM_A) - dpp20_program_shaper_luta_settings(dpp_base, params); - else - dpp20_program_shaper_lutb_settings(dpp_base, params); - - dpp20_program_shaper_lut( - dpp_base, params->rgb_resulted, params->hw_points_num); - - REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2); - - return true; - -} - -static enum dc_lut_mode get3dlut_config( - struct dpp *dpp_base, - bool *is_17x17x17, - bool *is_12bits_color_channel) -{ - uint32_t i_mode, i_enable_10bits, lut_size; - enum dc_lut_mode mode; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL, - CM_3DLUT_CONFIG_STATUS, &i_mode, - CM_3DLUT_30BIT_EN, &i_enable_10bits); - - switch (i_mode) { - case 0: - mode = LUT_BYPASS; - break; - case 1: - mode = LUT_RAM_A; - break; - case 2: - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - if (i_enable_10bits > 0) - *is_12bits_color_channel = false; - else - *is_12bits_color_channel = true; - - REG_GET(CM_3DLUT_MODE, CM_3DLUT_SIZE, &lut_size); - - if (lut_size == 0) - *is_17x17x17 = true; - else - *is_17x17x17 = false; - - return mode; -} -/* - * select ramA or ramB, or bypass - * select color channel size 10 or 12 bits - * select 3dlut size 17x17x17 or 9x9x9 - */ -static void dpp20_set_3dlut_mode( - struct dpp *dpp_base, - enum dc_lut_mode mode, - bool is_color_channel_12bits, - bool is_lut_size17x17x17) -{ - uint32_t lut_mode; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - if (mode == LUT_BYPASS) - lut_mode = 0; - else if (mode == LUT_RAM_A) - lut_mode = 1; - else - lut_mode = 2; - - REG_UPDATE_2(CM_3DLUT_MODE, - CM_3DLUT_MODE, lut_mode, - CM_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1); -} - -static void dpp20_select_3dlut_ram( - struct dpp *dpp_base, - enum dc_lut_mode mode, - bool is_color_channel_12bits) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE_2(CM_3DLUT_READ_WRITE_CONTROL, - CM_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1, - CM_3DLUT_30BIT_EN, - is_color_channel_12bits == true ? 0:1); -} - - - -static void dpp20_set3dlut_ram12( - struct dpp *dpp_base, - const struct dc_rgb *lut, - uint32_t entries) -{ - uint32_t i, red, green, blue, red1, green1, blue1; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - for (i = 0 ; i < entries; i += 2) { - red = lut[i].red<<4; - green = lut[i].green<<4; - blue = lut[i].blue<<4; - red1 = lut[i+1].red<<4; - green1 = lut[i+1].green<<4; - blue1 = lut[i+1].blue<<4; - - REG_SET_2(CM_3DLUT_DATA, 0, - CM_3DLUT_DATA0, red, - CM_3DLUT_DATA1, red1); - - REG_SET_2(CM_3DLUT_DATA, 0, - CM_3DLUT_DATA0, green, - CM_3DLUT_DATA1, green1); - - REG_SET_2(CM_3DLUT_DATA, 0, - CM_3DLUT_DATA0, blue, - CM_3DLUT_DATA1, blue1); - - } -} - -/* - * load selected lut with 10 bits color channels - */ -static void dpp20_set3dlut_ram10( - struct dpp *dpp_base, - const struct dc_rgb *lut, - uint32_t entries) -{ - uint32_t i, red, green, blue, value; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - for (i = 0; i < entries; i++) { - red = lut[i].red; - green = lut[i].green; - blue = lut[i].blue; - - value = (red<<20) | (green<<10) | blue; - - REG_SET(CM_3DLUT_DATA_30BIT, 0, CM_3DLUT_DATA_30BIT, value); - } - -} - - -static void dpp20_select_3dlut_ram_mask( - struct dpp *dpp_base, - uint32_t ram_selection_mask) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, - ram_selection_mask); - REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0); -} - -bool dpp20_program_3dlut( - struct dpp *dpp_base, - struct tetrahedral_params *params) -{ - enum dc_lut_mode mode; - bool is_17x17x17; - bool is_12bits_color_channel; - struct dc_rgb *lut0; - struct dc_rgb *lut1; - struct dc_rgb *lut2; - struct dc_rgb *lut3; - int lut_size0; - int lut_size; - - if (params == NULL) { - dpp20_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false); - return false; - } - mode = get3dlut_config(dpp_base, &is_17x17x17, &is_12bits_color_channel); - - if (mode == LUT_BYPASS || mode == LUT_RAM_B) - mode = LUT_RAM_A; - else - mode = LUT_RAM_B; - - is_17x17x17 = !params->use_tetrahedral_9; - is_12bits_color_channel = params->use_12bits; - if (is_17x17x17) { - lut0 = params->tetrahedral_17.lut0; - lut1 = params->tetrahedral_17.lut1; - lut2 = params->tetrahedral_17.lut2; - lut3 = params->tetrahedral_17.lut3; - lut_size0 = sizeof(params->tetrahedral_17.lut0)/ - sizeof(params->tetrahedral_17.lut0[0]); - lut_size = sizeof(params->tetrahedral_17.lut1)/ - sizeof(params->tetrahedral_17.lut1[0]); - } else { - lut0 = params->tetrahedral_9.lut0; - lut1 = params->tetrahedral_9.lut1; - lut2 = params->tetrahedral_9.lut2; - lut3 = params->tetrahedral_9.lut3; - lut_size0 = sizeof(params->tetrahedral_9.lut0)/ - sizeof(params->tetrahedral_9.lut0[0]); - lut_size = sizeof(params->tetrahedral_9.lut1)/ - sizeof(params->tetrahedral_9.lut1[0]); - } - - dpp20_select_3dlut_ram(dpp_base, mode, - is_12bits_color_channel); - dpp20_select_3dlut_ram_mask(dpp_base, 0x1); - if (is_12bits_color_channel) - dpp20_set3dlut_ram12(dpp_base, lut0, lut_size0); - else - dpp20_set3dlut_ram10(dpp_base, lut0, lut_size0); - - dpp20_select_3dlut_ram_mask(dpp_base, 0x2); - if (is_12bits_color_channel) - dpp20_set3dlut_ram12(dpp_base, lut1, lut_size); - else - dpp20_set3dlut_ram10(dpp_base, lut1, lut_size); - - dpp20_select_3dlut_ram_mask(dpp_base, 0x4); - if (is_12bits_color_channel) - dpp20_set3dlut_ram12(dpp_base, lut2, lut_size); - else - dpp20_set3dlut_ram10(dpp_base, lut2, lut_size); - - dpp20_select_3dlut_ram_mask(dpp_base, 0x8); - if (is_12bits_color_channel) - dpp20_set3dlut_ram12(dpp_base, lut3, lut_size); - else - dpp20_set3dlut_ram10(dpp_base, lut3, lut_size); - - - dpp20_set_3dlut_mode(dpp_base, mode, is_12bits_color_channel, - is_17x17x17); - - return true; -} - -void dpp2_set_hdr_multiplier( - struct dpp *dpp_base, - uint32_t multiplier) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c index f8667be570..80779e85e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c @@ -299,6 +299,17 @@ void dwb2_set_scaler(struct dwbc *dwbc, struct dc_dwb_params *params) } } + + if (dwbc20->dwbc_mask->WBSCL_COEF_RAM_SEL) { + /* Swap double buffered coefficient set */ + uint32_t wbscl_mode = REG_READ(WBSCL_MODE); + bool coef_ram_current = get_reg_field_value_ex( + wbscl_mode, dwbc20->dwbc_mask->WBSCL_COEF_RAM_SEL_CURRENT, + dwbc20->dwbc_shift->WBSCL_COEF_RAM_SEL_CURRENT); + + REG_UPDATE(WBSCL_MODE, WBSCL_COEF_RAM_SEL, !coef_ram_current); + } + } static const struct dwbc_funcs dcn20_dwbc_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index 6eebcb22e3..c6f859871d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -570,7 +570,7 @@ void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub, static bool hubbub2_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h index 2f6146bf1d..24a9c45988 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h @@ -85,7 +85,7 @@ struct dcn20_hubbub { const struct dcn_hubbub_shift *shifts; const struct dcn_hubbub_mask *masks; unsigned int debug_test_index_pstate; - struct dcn_watermark_set watermarks; + union dcn_watermark_set watermarks; int num_vmid; struct dcn20_vmid vmid[16]; unsigned int detile_buf_size; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 89c3bf0fe0..6bba020ad6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -1331,6 +1331,12 @@ void hubp2_read_state(struct hubp *hubp) SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height, PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear); + if (REG(DCHUBP_CNTL)) + s->hubp_cntl = REG_READ(DCHUBP_CNTL); + + if (REG(DCSURF_FLIP_CONTROL)) + s->flip_control = REG_READ(DCSURF_FLIP_CONTROL); + } static void hubp2_validate_dml_output(struct hubp *hubp, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h index efa2adf4f8..8da3084d93 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h @@ -147,7 +147,7 @@ uint32_t DCN_CUR1_TTU_CNTL1;\ uint32_t VMID_SETTINGS_0 - +/*shared with dcn3.x*/ #define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \ DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \ uint32_t FLIP_PARAMETERS_3;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h index b2b266953d..c34e04cac9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h @@ -147,7 +147,8 @@ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\ - LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh) + LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_DEBUG_CONFIG, DPCS_DBG_CBUS_DIS, mask_sh) #define DPCS_DCN2_MASK_SH_LIST(mask_sh)\ DPCS_MASK_SH_LIST(mask_sh),\ @@ -231,6 +232,8 @@ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \ SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \ SRI(DPCSTX_TX_CNTL, DPCSTX, id), \ + SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \ + SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \ SR(RDPCSTX0_RDPCSTX_SCRATCH) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 16b5ff208d..ea73473b97 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -395,9 +395,12 @@ static void mpc20_program_ogam_pwl( MPCC_OGAM_LUT_DATA, rgb[i].delta_green_reg); REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].delta_blue_reg); - } + REG_SEQ_SUBMIT(); + PERF_TRACE(); + REG_SEQ_WAIT_DONE(); + PERF_TRACE(); } static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id, @@ -501,11 +504,6 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id) ASSERT(!mpc_disabled); ASSERT(!mpc_idle); } - - REG_SEQ_SUBMIT(); - PERF_TRACE(); - REG_SEQ_WAIT_DONE(); - PERF_TRACE(); } static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile index 2b0b4f32e1..3880db59e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile @@ -2,7 +2,7 @@ # # Makefile for DCN. DCN201 = dcn201_hubbub.o\ - dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_dpp.o \ + dcn201_mpc.o dcn201_hubp.o dcn201_opp.o \ dcn201_dccg.o dcn201_link_encoder.o AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c deleted file mode 100644 index f809a7d210..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c +++ /dev/null @@ -1,313 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "core_types.h" - -#include "reg_helper.h" -#include "dcn201_dpp.h" -#include "basics/conversion.h" - -#define REG(reg)\ - dpp->tf_regs->reg - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - -static void dpp201_cnv_setup( - struct dpp *dpp_base, - enum surface_pixel_format format, - enum expansion_mode mode, - struct dc_csc_transform input_csc_color_matrix, - enum dc_color_space input_color_space, - struct cnv_alpha_2bit_lut *alpha_2bit_lut) -{ - struct dcn201_dpp *dpp = TO_DCN201_DPP(dpp_base); - uint32_t pixel_format = 0; - uint32_t alpha_en = 1; - enum dc_color_space color_space = COLOR_SPACE_SRGB; - enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS; - bool force_disable_cursor = false; - uint32_t is_2bit = 0; - - REG_SET_2(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_EXPANSION_MODE, mode); - - REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0); - REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0); - REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0); - REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0); - - switch (format) { - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - pixel_format = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - pixel_format = 3; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: - pixel_format = 8; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: - pixel_format = 10; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: - force_disable_cursor = false; - pixel_format = 65; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: - force_disable_cursor = true; - pixel_format = 64; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: - force_disable_cursor = true; - pixel_format = 67; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: - force_disable_cursor = true; - pixel_format = 66; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - pixel_format = 22; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: - pixel_format = 24; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - pixel_format = 25; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: - pixel_format = 12; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: - pixel_format = 112; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: - pixel_format = 113; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: - pixel_format = 114; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: - pixel_format = 115; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: - pixel_format = 118; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: - pixel_format = 119; - alpha_en = 0; - break; - default: - break; - } - - /* Set default color space based on format if none is given. */ - color_space = input_color_space ? input_color_space : color_space; - - if (is_2bit == 1 && alpha_2bit_lut != NULL) { - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3); - } - - REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, - CNVC_SURFACE_PIXEL_FORMAT, pixel_format); - REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); - - dpp1_program_input_csc(dpp_base, color_space, select, NULL); - - if (force_disable_cursor) { - REG_UPDATE(CURSOR_CONTROL, - CURSOR_ENABLE, 0); - REG_UPDATE(CURSOR0_CONTROL, - CUR0_ENABLE, 0); - } - dpp2_power_on_obuf(dpp_base, true); -} - -#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19)) - -static bool dpp201_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps) -{ - if (scl_data->viewport.width != scl_data->h_active && - scl_data->viewport.height != scl_data->v_active && - dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && - scl_data->format == PIXEL_FORMAT_FP16) - return false; - - if (scl_data->viewport.width > scl_data->h_active && - dpp->ctx->dc->debug.max_downscale_src_width != 0 && - scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) - return false; - - if (scl_data->ratios.horz.value == (8ll << 32)) - scl_data->ratios.horz.value--; - if (scl_data->ratios.vert.value == (8ll << 32)) - scl_data->ratios.vert.value--; - if (scl_data->ratios.horz_c.value == (8ll << 32)) - scl_data->ratios.horz_c.value--; - if (scl_data->ratios.vert_c.value == (8ll << 32)) - scl_data->ratios.vert_c.value--; - - if (in_taps->h_taps == 0) { - if (dc_fixpt_ceil(scl_data->ratios.horz) > 4) - scl_data->taps.h_taps = 8; - else - scl_data->taps.h_taps = 4; - } else - scl_data->taps.h_taps = in_taps->h_taps; - - if (in_taps->v_taps == 0) { - if (dc_fixpt_ceil(scl_data->ratios.vert) > 4) - scl_data->taps.v_taps = 8; - else - scl_data->taps.v_taps = 4; - } else - scl_data->taps.v_taps = in_taps->v_taps; - if (in_taps->v_taps_c == 0) { - if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4) - scl_data->taps.v_taps_c = 4; - else - scl_data->taps.v_taps_c = 2; - } else - scl_data->taps.v_taps_c = in_taps->v_taps_c; - if (in_taps->h_taps_c == 0) { - if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4) - scl_data->taps.h_taps_c = 4; - else - scl_data->taps.h_taps_c = 2; - } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) - scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; - else - scl_data->taps.h_taps_c = in_taps->h_taps_c; - - if (!dpp->ctx->dc->debug.always_scale) { - if (IDENTITY_RATIO(scl_data->ratios.horz)) - scl_data->taps.h_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert)) - scl_data->taps.v_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.horz_c)) - scl_data->taps.h_taps_c = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert_c)) - scl_data->taps.v_taps_c = 1; - } - - return true; -} - -static struct dpp_funcs dcn201_dpp_funcs = { - .dpp_read_state = dpp20_read_state, - .dpp_reset = dpp_reset, - .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp201_get_optimal_number_of_taps, - .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, - .dpp_set_csc_adjustment = NULL, - .dpp_set_csc_default = NULL, - .dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl, - .dpp_set_degamma = dpp2_set_degamma, - .dpp_program_input_lut = dpp2_dummy_program_input_lut, - .dpp_full_bypass = dpp1_full_bypass, - .dpp_setup = dpp201_cnv_setup, - .dpp_program_degamma_pwl = dpp2_set_degamma_pwl, - .dpp_program_blnd_lut = dpp20_program_blnd_lut, - .dpp_program_shaper_lut = dpp20_program_shaper, - .dpp_program_3dlut = dpp20_program_3dlut, - .dpp_program_bias_and_scale = NULL, - .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, - .set_cursor_attributes = dpp2_set_cursor_attributes, - .set_cursor_position = dpp1_set_cursor_position, - .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, - .dpp_dppclk_control = dpp1_dppclk_control, - .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier, - .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap, -}; - -static struct dpp_caps dcn201_dpp_cap = { - .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, - .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions, -}; - -bool dpp201_construct( - struct dcn201_dpp *dpp, - struct dc_context *ctx, - uint32_t inst, - const struct dcn201_dpp_registers *tf_regs, - const struct dcn201_dpp_shift *tf_shift, - const struct dcn201_dpp_mask *tf_mask) -{ - dpp->base.ctx = ctx; - - dpp->base.inst = inst; - dpp->base.funcs = &dcn201_dpp_funcs; - dpp->base.caps = &dcn201_dpp_cap; - - dpp->tf_regs = tf_regs; - dpp->tf_shift = tf_shift; - dpp->tf_mask = tf_mask; - - dpp->lb_pixel_depth_supported = - LB_PIXEL_DEPTH_18BPP | - LB_PIXEL_DEPTH_24BPP | - LB_PIXEL_DEPTH_30BPP; - - dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY; - dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; - - return true; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h deleted file mode 100644 index cbd5b47b4a..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h +++ /dev/null @@ -1,83 +0,0 @@ -/* Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DCN201_DPP_H__ -#define __DCN201_DPP_H__ - -#include "dcn20/dcn20_dpp.h" - -#define TO_DCN201_DPP(dpp)\ - container_of(dpp, struct dcn201_dpp, base) - -#define TF_REG_LIST_DCN201(id) \ - TF_REG_LIST_DCN20(id) - -#define TF_REG_LIST_SH_MASK_DCN201(mask_sh)\ - TF_REG_LIST_SH_MASK_DCN20(mask_sh) - -#define TF_REG_FIELD_LIST_DCN201(type) \ - TF_REG_FIELD_LIST_DCN2_0(type) - -struct dcn201_dpp_shift { - TF_REG_FIELD_LIST_DCN201(uint8_t); -}; - -struct dcn201_dpp_mask { - TF_REG_FIELD_LIST_DCN201(uint32_t); -}; - -#define DPP_DCN201_REG_VARIABLE_LIST \ - DPP_DCN2_REG_VARIABLE_LIST - -struct dcn201_dpp_registers { - DPP_DCN201_REG_VARIABLE_LIST; -}; - -struct dcn201_dpp { - struct dpp base; - - const struct dcn201_dpp_registers *tf_regs; - const struct dcn201_dpp_shift *tf_shift; - const struct dcn201_dpp_mask *tf_mask; - - const uint16_t *filter_v; - const uint16_t *filter_h; - const uint16_t *filter_v_c; - const uint16_t *filter_h_c; - int lb_pixel_depth_supported; - int lb_memory_size; - int lb_bits_per_entry; - bool is_write_to_ram_a_safe; - struct scaler_data scl_data; - struct pwl_params pwl_data; -}; - -bool dpp201_construct(struct dcn201_dpp *dpp2, - struct dc_context *ctx, - uint32_t inst, - const struct dcn201_dpp_registers *tf_regs, - const struct dcn201_dpp_shift *tf_shift, - const struct dcn201_dpp_mask *tf_mask); - -#endif /* __DC_HWSS_DCN201_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c index 037d265431..63798132ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c @@ -52,7 +52,7 @@ static bool hubbub201_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -103,5 +103,5 @@ void hubbub201_construct(struct dcn20_hubbub *hubbub, hubbub->masks = hubbub_mask; hubbub->debug_test_index_pstate = 0xB; - hubbub->detile_buf_size = 164 * 1024; + hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ } diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c index 35dd4bac24..cd2bfcc512 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c @@ -77,6 +77,7 @@ static void hubp201_program_requestor(struct hubp *hubp, MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode, CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode); + /* no need to program PTE */ REG_SET_5(DCHUBP_REQ_SIZE_CONFIG, 0, CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size, MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size, @@ -99,6 +100,10 @@ static void hubp201_setup( struct _vcs_dpi_display_rq_regs_st *rq_regs, struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest) { + /* + * otg is locked when this func is called. Register are double buffered. + * disable the requestors is not needed + */ hubp2_vready_at_or_After_vsync(hubp, pipe_dest); hubp201_program_requestor(hubp, rq_regs); hubp201_program_deadline(hubp, dlg_attr, ttu_attr); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h index 8b95ef2513..be25e8dc06 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h @@ -30,6 +30,10 @@ #define DPCS_DCN201_MASK_SH_LIST(mask_sh)\ DPCS_MASK_SH_LIST(mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD, mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD_EN, mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD, mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD_EN, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DP4, mask_sh),\ @@ -44,7 +48,15 @@ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_EN, mask_sh) #define DPCS_DCN201_REG_LIST(id) \ - DPCS_DCN2_CMN_REG_LIST(id) + DPCS_DCN2_CMN_REG_LIST(id), \ + SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id) void dcn201_link_encoder_construct( struct dcn20_link_encoder *enc20, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c index aeb0e0d9b7..2546224b32 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c @@ -140,7 +140,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub, bool hubbub21_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -334,7 +334,7 @@ bool hubbub21_program_urgent_watermarks( bool hubbub21_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -487,7 +487,7 @@ bool hubbub21_program_stutter_watermarks( bool hubbub21_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -573,7 +573,7 @@ bool hubbub21_program_pstate_watermarks( bool hubbub21_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h index d8eb2bb728..ab2ce03135 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h @@ -127,22 +127,22 @@ int hubbub21_init_dchub(struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config); bool hubbub21_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub21_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub21_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub21_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile index b5b2aa3b37..c6ca70f3c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile @@ -25,13 +25,11 @@ DCN30 := dcn30_hubbub.o \ dcn30_hubp.o \ - dcn30_dpp.o \ dcn30_dccg.o \ dcn30_mpc.o dcn30_vpg.o \ dcn30_afmt.o \ dcn30_dio_stream_encoder.o \ dcn30_dwb.o \ - dcn30_dpp_cm.o \ dcn30_dwb_cm.o \ dcn30_cm_common.o \ dcn30_mmhubbub.o \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c index ddb344056d..b8327237ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c @@ -26,7 +26,7 @@ #include "dm_services.h" #include "core_types.h" #include "reg_helper.h" -#include "dcn30_dpp.h" +#include "dcn30/dcn30_dpp.h" #include "basics/conversion.h" #include "dcn30_cm_common.h" #include "custom_float.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h index 35a613bb08..3f1da7f3a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h @@ -29,15 +29,9 @@ #include "dcn20/dcn20_dccg.h" -#define DCCG_REG_LIST_DCN3AG() \ - DCCG_COMMON_REG_LIST_DCN_BASE(),\ - SR(PHYASYMCLK_CLOCK_CNTL),\ - SR(PHYBSYMCLK_CLOCK_CNTL),\ - SR(PHYCSYMCLK_CLOCK_CNTL) - - #define DCCG_REG_LIST_DCN30() \ DCCG_REG_LIST_DCN2(),\ + DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0),\ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3),\ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 4),\ @@ -46,19 +40,10 @@ SR(PHYBSYMCLK_CLOCK_CNTL),\ SR(PHYCSYMCLK_CLOCK_CNTL) -#define DCCG_MASK_SH_LIST_DCN3AG(mask_sh) \ - DCCG_MASK_SH_LIST_DCN2_1(mask_sh),\ - DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\ - DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\ - DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\ - DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\ - DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\ - DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\ - DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\ - DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh) - #define DCCG_MASK_SH_LIST_DCN3(mask_sh) \ DCCG_MASK_SH_LIST_DCN2(mask_sh),\ + DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\ + DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c index 1fb8fd7afc..b8e31b5ea1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c @@ -30,8 +30,6 @@ #include "dcn30_dio_link_encoder.h" #include "stream_encoder.h" #include "dc_bios_types.h" -/* #include "dcn3ag/dcn3ag_phy_fw.h" */ - #include "gpio_service_interface.h" #define CTX \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h index f2d90f2b8b..5b6177c2ae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h @@ -55,7 +55,8 @@ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id) #define LINK_ENCODER_MASK_SH_LIST_DCN30(mask_sh) \ - LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh) + LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\ + LE_SF(DIG0_TMDS_DCBALANCER_CONTROL, TMDS_SYNC_DCBAL_EN, mask_sh) #define DPCS_DCN3_MASK_SH_LIST(mask_sh)\ DPCS_DCN2_MASK_SH_LIST(mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index 005dbe099a..425b830b88 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -29,9 +29,6 @@ #include "reg_helper.h" #include "hw_shared.h" #include "dc.h" -#include "core_types.h" -#include - #define DC_LOGGER \ enc1->base.ctx->logger diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c deleted file mode 100644 index a3a769aad0..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ /dev/null @@ -1,1527 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "core_types.h" -#include "reg_helper.h" -#include "dcn30_dpp.h" -#include "basics/conversion.h" -#include "dcn30_cm_common.h" - -#define REG(reg)\ - dpp->tf_regs->reg - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - - -void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - uint32_t gamcor_lut_mode, rgam_lut_mode; - - REG_GET(DPP_CONTROL, - DPP_CLOCK_ENABLE, &s->is_enabled); - - // Pre-degamma (ROM) - REG_GET_2(PRE_DEGAM, - PRE_DEGAM_MODE, &s->pre_dgam_mode, - PRE_DEGAM_SELECT, &s->pre_dgam_select); - - // Gamma Correction (RAM) - REG_GET(CM_GAMCOR_CONTROL, - CM_GAMCOR_MODE_CURRENT, &s->gamcor_mode); - if (s->gamcor_mode) { - REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &gamcor_lut_mode); - if (!gamcor_lut_mode) - s->gamcor_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B - } - - // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size) - REG_GET(CM_SHAPER_CONTROL, - CM_SHAPER_LUT_MODE, &s->shaper_lut_mode); - REG_GET(CM_3DLUT_MODE, - CM_3DLUT_MODE_CURRENT, &s->lut3d_mode); - REG_GET(CM_3DLUT_READ_WRITE_CONTROL, - CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth); - REG_GET(CM_3DLUT_MODE, - CM_3DLUT_SIZE, &s->lut3d_size); - - // Blend/Out Gamma (RAM) - REG_GET(CM_BLNDGAM_CONTROL, - CM_BLNDGAM_MODE_CURRENT, &s->rgam_lut_mode); - if (s->rgam_lut_mode){ - REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &rgam_lut_mode); - if (!rgam_lut_mode) - s->rgam_lut_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B - } -} - -/*program post scaler scs block in dpp CM*/ -void dpp3_program_post_csc( - struct dpp *dpp_base, - enum dc_color_space color_space, - enum dcn10_input_csc_select input_select, - const struct out_csc_color_matrix *tbl_entry) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - int i; - int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); - const uint16_t *regval = NULL; - uint32_t cur_select = 0; - enum dcn10_input_csc_select select; - struct color_matrices_reg gam_regs; - - if (input_select == INPUT_CSC_SELECT_BYPASS) { - REG_SET(CM_POST_CSC_CONTROL, 0, CM_POST_CSC_MODE, 0); - return; - } - - if (tbl_entry == NULL) { - for (i = 0; i < arr_size; i++) - if (dpp_input_csc_matrix[i].color_space == color_space) { - regval = dpp_input_csc_matrix[i].regval; - break; - } - - if (regval == NULL) { - BREAK_TO_DEBUGGER(); - return; - } - } else { - regval = tbl_entry->regval; - } - - /* determine which CSC matrix (icsc or coma) we are using - * currently. select the alternate set to double buffer - * the CSC update so CSC is updated on frame boundary - */ - REG_GET(CM_POST_CSC_CONTROL, - CM_POST_CSC_MODE_CURRENT, &cur_select); - - if (cur_select != INPUT_CSC_SELECT_ICSC) - select = INPUT_CSC_SELECT_ICSC; - else - select = INPUT_CSC_SELECT_COMA; - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_POST_CSC_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_POST_CSC_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_POST_CSC_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_POST_CSC_C12; - - if (select == INPUT_CSC_SELECT_ICSC) { - - gam_regs.csc_c11_c12 = REG(CM_POST_CSC_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_POST_CSC_C33_C34); - - } else { - - gam_regs.csc_c11_c12 = REG(CM_POST_CSC_B_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_POST_CSC_B_C33_C34); - - } - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - REG_SET(CM_POST_CSC_CONTROL, 0, - CM_POST_CSC_MODE, select); -} - - -/*CNVC degam unit has read only LUTs*/ -void dpp3_set_pre_degam(struct dpp *dpp_base, enum dc_transfer_func_predefined tr) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - int pre_degam_en = 1; - int degamma_lut_selection = 0; - - switch (tr) { - case TRANSFER_FUNCTION_LINEAR: - case TRANSFER_FUNCTION_UNITY: - pre_degam_en = 0; //bypass - break; - case TRANSFER_FUNCTION_SRGB: - degamma_lut_selection = 0; - break; - case TRANSFER_FUNCTION_BT709: - degamma_lut_selection = 4; - break; - case TRANSFER_FUNCTION_PQ: - degamma_lut_selection = 5; - break; - case TRANSFER_FUNCTION_HLG: - degamma_lut_selection = 6; - break; - case TRANSFER_FUNCTION_GAMMA22: - degamma_lut_selection = 1; - break; - case TRANSFER_FUNCTION_GAMMA24: - degamma_lut_selection = 2; - break; - case TRANSFER_FUNCTION_GAMMA26: - degamma_lut_selection = 3; - break; - default: - pre_degam_en = 0; - break; - } - - REG_SET_2(PRE_DEGAM, 0, - PRE_DEGAM_MODE, pre_degam_en, - PRE_DEGAM_SELECT, degamma_lut_selection); -} - -void dpp3_cnv_setup ( - struct dpp *dpp_base, - enum surface_pixel_format format, - enum expansion_mode mode, - struct dc_csc_transform input_csc_color_matrix, - enum dc_color_space input_color_space, - struct cnv_alpha_2bit_lut *alpha_2bit_lut) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - uint32_t pixel_format = 0; - uint32_t alpha_en = 1; - enum dc_color_space color_space = COLOR_SPACE_SRGB; - enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS; - bool force_disable_cursor = false; - uint32_t is_2bit = 0; - uint32_t alpha_plane_enable = 0; - uint32_t dealpha_en = 0, dealpha_ablnd_en = 0; - uint32_t realpha_en = 0, realpha_ablnd_en = 0; - uint32_t program_prealpha_dealpha = 0; - struct out_csc_color_matrix tbl_entry; - int i; - - REG_SET_2(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_EXPANSION_MODE, mode); - - REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0); - REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0); - REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0); - REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0); - - REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_R, 0); - REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_G, 1); - REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_B, 2); - - switch (format) { - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - pixel_format = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - pixel_format = 3; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: - pixel_format = 8; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: - pixel_format = 10; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: - force_disable_cursor = false; - pixel_format = 65; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: - force_disable_cursor = true; - pixel_format = 64; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: - force_disable_cursor = true; - pixel_format = 67; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: - force_disable_cursor = true; - pixel_format = 66; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: - pixel_format = 26; /* ARGB16161616_UNORM */ - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: - pixel_format = 24; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - pixel_format = 25; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: - pixel_format = 12; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: - pixel_format = 112; - break; - case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: - pixel_format = 113; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: - pixel_format = 114; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: - pixel_format = 115; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGBE: - pixel_format = 116; - alpha_plane_enable = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: - pixel_format = 116; - alpha_plane_enable = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: - pixel_format = 118; - break; - case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: - pixel_format = 119; - break; - default: - break; - } - - /* Set default color space based on format if none is given. */ - color_space = input_color_space ? input_color_space : color_space; - - if (is_2bit == 1 && alpha_2bit_lut != NULL) { - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3); - } - - REG_SET_2(CNVC_SURFACE_PIXEL_FORMAT, 0, - CNVC_SURFACE_PIXEL_FORMAT, pixel_format, - CNVC_ALPHA_PLANE_ENABLE, alpha_plane_enable); - REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); - - if (program_prealpha_dealpha) { - dealpha_en = 1; - realpha_en = 1; - } - REG_SET_2(PRE_DEALPHA, 0, - PRE_DEALPHA_EN, dealpha_en, - PRE_DEALPHA_ABLND_EN, dealpha_ablnd_en); - REG_SET_2(PRE_REALPHA, 0, - PRE_REALPHA_EN, realpha_en, - PRE_REALPHA_ABLND_EN, realpha_ablnd_en); - - /* If input adjustment exists, program the ICSC with those values. */ - if (input_csc_color_matrix.enable_adjustment == true) { - for (i = 0; i < 12; i++) - tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; - - tbl_entry.color_space = input_color_space; - - if (color_space >= COLOR_SPACE_YCBCR601) - select = INPUT_CSC_SELECT_ICSC; - else - select = INPUT_CSC_SELECT_BYPASS; - - dpp3_program_post_csc(dpp_base, color_space, select, - &tbl_entry); - } else { - dpp3_program_post_csc(dpp_base, color_space, select, NULL); - } - - if (force_disable_cursor) { - REG_UPDATE(CURSOR_CONTROL, - CURSOR_ENABLE, 0); - REG_UPDATE(CURSOR0_CONTROL, - CUR0_ENABLE, 0); - } -} - -#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19)) - -void dpp3_set_cursor_attributes( - struct dpp *dpp_base, - struct dc_cursor_attributes *cursor_attributes) -{ - enum dc_cursor_color_format color_format = cursor_attributes->color_format; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - int cur_rom_en = 0; - - if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || - color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { - if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { - cur_rom_en = 1; - } - } - - REG_UPDATE_3(CURSOR0_CONTROL, - CUR0_MODE, color_format, - CUR0_EXPANSION_MODE, 0, - CUR0_ROM_EN, cur_rom_en); - - if (color_format == CURSOR_MODE_MONO) { - /* todo: clarify what to program these to */ - REG_UPDATE(CURSOR0_COLOR0, - CUR0_COLOR0, 0x00000000); - REG_UPDATE(CURSOR0_COLOR1, - CUR0_COLOR1, 0xFFFFFFFF); - } - - dpp_base->att.cur0_ctl.bits.expansion_mode = 0; - dpp_base->att.cur0_ctl.bits.cur0_rom_en = cur_rom_en; - dpp_base->att.cur0_ctl.bits.mode = color_format; -} - - -bool dpp3_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps) -{ - int num_part_y, num_part_c; - int max_taps_y, max_taps_c; - int min_taps_y, min_taps_c; - enum lb_memory_config lb_config; - - if (scl_data->viewport.width > scl_data->h_active && - dpp->ctx->dc->debug.max_downscale_src_width != 0 && - scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) - return false; - - /* - * Set default taps if none are provided - * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling - * taps = 4 for upscaling - */ - if (in_taps->h_taps == 0) { - if (dc_fixpt_ceil(scl_data->ratios.horz) > 1) - scl_data->taps.h_taps = min(2 * dc_fixpt_ceil(scl_data->ratios.horz), 8); - else - scl_data->taps.h_taps = 4; - } else - scl_data->taps.h_taps = in_taps->h_taps; - if (in_taps->v_taps == 0) { - if (dc_fixpt_ceil(scl_data->ratios.vert) > 1) - scl_data->taps.v_taps = min(dc_fixpt_ceil(dc_fixpt_mul_int(scl_data->ratios.vert, 2)), 8); - else - scl_data->taps.v_taps = 4; - } else - scl_data->taps.v_taps = in_taps->v_taps; - if (in_taps->v_taps_c == 0) { - if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 1) - scl_data->taps.v_taps_c = min(dc_fixpt_ceil(dc_fixpt_mul_int(scl_data->ratios.vert_c, 2)), 8); - else - scl_data->taps.v_taps_c = 4; - } else - scl_data->taps.v_taps_c = in_taps->v_taps_c; - if (in_taps->h_taps_c == 0) { - if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 1) - scl_data->taps.h_taps_c = min(2 * dc_fixpt_ceil(scl_data->ratios.horz_c), 8); - else - scl_data->taps.h_taps_c = 4; - } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) - /* Only 1 and even h_taps_c are supported by hw */ - scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; - else - scl_data->taps.h_taps_c = in_taps->h_taps_c; - - /*Ensure we can support the requested number of vtaps*/ - min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert); - min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c); - - /* Use LB_MEMORY_CONFIG_3 for 4:2:0 */ - if ((scl_data->format == PIXEL_FORMAT_420BPP8) || (scl_data->format == PIXEL_FORMAT_420BPP10)) - lb_config = LB_MEMORY_CONFIG_3; - else - lb_config = LB_MEMORY_CONFIG_0; - - dpp->caps->dscl_calc_lb_num_partitions( - scl_data, lb_config, &num_part_y, &num_part_c); - - /* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */ - if (dc_fixpt_ceil(scl_data->ratios.vert) > 2) - max_taps_y = num_part_y - (dc_fixpt_ceil(scl_data->ratios.vert) - 2); - else - max_taps_y = num_part_y; - - if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 2) - max_taps_c = num_part_c - (dc_fixpt_ceil(scl_data->ratios.vert_c) - 2); - else - max_taps_c = num_part_c; - - if (max_taps_y < min_taps_y) - return false; - else if (max_taps_c < min_taps_c) - return false; - - if (scl_data->taps.v_taps > max_taps_y) - scl_data->taps.v_taps = max_taps_y; - - if (scl_data->taps.v_taps_c > max_taps_c) - scl_data->taps.v_taps_c = max_taps_c; - - if (!dpp->ctx->dc->debug.always_scale) { - if (IDENTITY_RATIO(scl_data->ratios.horz)) - scl_data->taps.h_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert)) - scl_data->taps.v_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.horz_c)) - scl_data->taps.h_taps_c = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert_c)) - scl_data->taps.v_taps_c = 1; - } - - return true; -} - -static void dpp3_deferred_update(struct dpp *dpp_base) -{ - int bypass_state; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (dpp_base->deferred_reg_writes.bits.disable_dscl) { - REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3); - dpp_base->deferred_reg_writes.bits.disable_dscl = false; - } - - if (dpp_base->deferred_reg_writes.bits.disable_gamcor) { - REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &bypass_state); - if (bypass_state == 0) { // only program if bypass was latched - REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 3); - } else - ASSERT(0); // LUT select was updated again before vupdate - dpp_base->deferred_reg_writes.bits.disable_gamcor = false; - } - - if (dpp_base->deferred_reg_writes.bits.disable_blnd_lut) { - REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &bypass_state); - if (bypass_state == 0) { // only program if bypass was latched - REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 3); - } else - ASSERT(0); // LUT select was updated again before vupdate - dpp_base->deferred_reg_writes.bits.disable_blnd_lut = false; - } - - if (dpp_base->deferred_reg_writes.bits.disable_3dlut) { - REG_GET(CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, &bypass_state); - if (bypass_state == 0) { // only program if bypass was latched - REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 3); - } else - ASSERT(0); // LUT select was updated again before vupdate - dpp_base->deferred_reg_writes.bits.disable_3dlut = false; - } - - if (dpp_base->deferred_reg_writes.bits.disable_shaper) { - REG_GET(CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, &bypass_state); - if (bypass_state == 0) { // only program if bypass was latched - REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 3); - } else - ASSERT(0); // LUT select was updated again before vupdate - dpp_base->deferred_reg_writes.bits.disable_shaper = false; - } -} - -static void dpp3_power_on_blnd_lut( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { - if (power_on) { - REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 0); - REG_WAIT(CM_MEM_PWR_STATUS, BLNDGAM_MEM_PWR_STATE, 0, 1, 5); - } else { - dpp_base->ctx->dc->optimized_required = true; - dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true; - } - } else { - REG_SET(CM_MEM_PWR_CTRL, 0, - BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1); - } -} - -static void dpp3_power_on_hdr3dlut( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { - if (power_on) { - REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 0); - REG_WAIT(CM_MEM_PWR_STATUS2, HDR3DLUT_MEM_PWR_STATE, 0, 1, 5); - } else { - dpp_base->ctx->dc->optimized_required = true; - dpp_base->deferred_reg_writes.bits.disable_3dlut = true; - } - } -} - -static void dpp3_power_on_shaper( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { - if (power_on) { - REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 0); - REG_WAIT(CM_MEM_PWR_STATUS2, SHAPER_MEM_PWR_STATE, 0, 1, 5); - } else { - dpp_base->ctx->dc->optimized_required = true; - dpp_base->deferred_reg_writes.bits.disable_shaper = true; - } - } -} - -static void dpp3_configure_blnd_lut( - struct dpp *dpp_base, - bool is_ram_a) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_UPDATE_2(CM_BLNDGAM_LUT_CONTROL, - CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 7, - CM_BLNDGAM_LUT_HOST_SEL, is_ram_a == true ? 0 : 1); - - REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); -} - -static void dpp3_program_blnd_pwl( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num) -{ - uint32_t i; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg; - uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg; - uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg; - - if (is_rgb_equal(rgb, num)) { - for (i = 0 ; i < num; i++) - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); - } else { - REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); - REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 4); - for (i = 0 ; i < num; i++) - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); - - REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); - REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 2); - for (i = 0 ; i < num; i++) - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_green); - - REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); - REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 1); - for (i = 0 ; i < num; i++) - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_blue); - } -} - -static void dcn3_dpp_cm_get_reg_field( - struct dcn3_dpp *dpp, - struct dcn3_xfer_func_reg *reg) -{ - reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - - reg->shifts.field_region_end = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_B; - reg->masks.field_region_end = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_B; - reg->shifts.field_region_end_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; - reg->masks.field_region_end_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; - reg->shifts.field_region_end_base = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; - reg->masks.field_region_end_base = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; - reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; - reg->masks.field_region_linear_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; - reg->shifts.exp_region_start = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_B; - reg->masks.exp_region_start = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_B; - reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; - reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; -} - -/*program blnd lut RAM A*/ -static void dpp3_program_blnd_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - struct dcn3_xfer_func_reg gam_regs; - - dcn3_dpp_cm_get_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMA_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMA_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMA_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMA_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMA_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMA_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMA_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMA_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMA_END_CNTL2_R); - gam_regs.region_start = REG(CM_BLNDGAM_RAMA_REGION_0_1); - gam_regs.region_end = REG(CM_BLNDGAM_RAMA_REGION_32_33); - - cm_helper_program_gamcor_xfer_func(dpp->base.ctx, params, &gam_regs); -} - -/*program blnd lut RAM B*/ -static void dpp3_program_blnd_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - struct dcn3_xfer_func_reg gam_regs; - - dcn3_dpp_cm_get_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMB_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMB_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMB_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMB_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMB_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMB_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMB_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMB_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMB_END_CNTL2_R); - gam_regs.region_start = REG(CM_BLNDGAM_RAMB_REGION_0_1); - gam_regs.region_end = REG(CM_BLNDGAM_RAMB_REGION_32_33); - - cm_helper_program_gamcor_xfer_func(dpp->base.ctx, params, &gam_regs); -} - -static enum dc_lut_mode dpp3_get_blndgam_current(struct dpp *dpp_base) -{ - enum dc_lut_mode mode; - uint32_t mode_current = 0; - uint32_t in_use = 0; - - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &mode_current); - REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &in_use); - - switch (mode_current) { - case 0: - case 1: - mode = LUT_BYPASS; - break; - - case 2: - if (in_use == 0) - mode = LUT_RAM_A; - else - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - - return mode; -} - -static bool dpp3_program_blnd_lut(struct dpp *dpp_base, - const struct pwl_params *params) -{ - enum dc_lut_mode current_mode; - enum dc_lut_mode next_mode; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (params == NULL) { - REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_MODE, 0); - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) - dpp3_power_on_blnd_lut(dpp_base, false); - return false; - } - - current_mode = dpp3_get_blndgam_current(dpp_base); - if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_B) - next_mode = LUT_RAM_A; - else - next_mode = LUT_RAM_B; - - dpp3_power_on_blnd_lut(dpp_base, true); - dpp3_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A); - - if (next_mode == LUT_RAM_A) - dpp3_program_blnd_luta_settings(dpp_base, params); - else - dpp3_program_blnd_lutb_settings(dpp_base, params); - - dpp3_program_blnd_pwl( - dpp_base, params->rgb_resulted, params->hw_points_num); - - REG_UPDATE_2(CM_BLNDGAM_CONTROL, - CM_BLNDGAM_MODE, 2, - CM_BLNDGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1); - - return true; -} - - -static void dpp3_program_shaper_lut( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num) -{ - uint32_t i, red, green, blue; - uint32_t red_delta, green_delta, blue_delta; - uint32_t red_value, green_value, blue_value; - - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - for (i = 0 ; i < num; i++) { - - red = rgb[i].red_reg; - green = rgb[i].green_reg; - blue = rgb[i].blue_reg; - - red_delta = rgb[i].delta_red_reg; - green_delta = rgb[i].delta_green_reg; - blue_delta = rgb[i].delta_blue_reg; - - red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff); - green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff); - blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff); - - REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, red_value); - REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, green_value); - REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, blue_value); - } - -} - -static enum dc_lut_mode dpp3_get_shaper_current(struct dpp *dpp_base) -{ - enum dc_lut_mode mode; - uint32_t state_mode; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_GET(CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, &state_mode); - - switch (state_mode) { - case 0: - mode = LUT_BYPASS; - break; - case 1: - mode = LUT_RAM_A; - break; - case 2: - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - - return mode; -} - -static void dpp3_configure_shaper_lut( - struct dpp *dpp_base, - bool is_ram_a) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, - CM_SHAPER_LUT_WRITE_EN_MASK, 7); - REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, - CM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1); - REG_SET(CM_SHAPER_LUT_INDEX, 0, CM_SHAPER_LUT_INDEX, 0); -} - -/*program shaper RAM A*/ - -static void dpp3_program_shaper_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - const struct gamma_curve *curve; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_SET_2(CM_SHAPER_RAMA_START_CNTL_B, 0, - CM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); - REG_SET_2(CM_SHAPER_RAMA_START_CNTL_G, 0, - CM_SHAPER_RAMA_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, 0); - REG_SET_2(CM_SHAPER_RAMA_START_CNTL_R, 0, - CM_SHAPER_RAMA_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, 0); - - REG_SET_2(CM_SHAPER_RAMA_END_CNTL_B, 0, - CM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMA_END_CNTL_G, 0, - CM_SHAPER_RAMA_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMA_END_CNTL_R, 0, - CM_SHAPER_RAMA_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); - - curve = params->arr_curve_points; - REG_SET_4(CM_SHAPER_RAMA_REGION_0_1, 0, - CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_2_3, 0, - CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_4_5, 0, - CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_6_7, 0, - CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_8_9, 0, - CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_10_11, 0, - CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_12_13, 0, - CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_14_15, 0, - CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_16_17, 0, - CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_18_19, 0, - CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_20_21, 0, - CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_22_23, 0, - CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_24_25, 0, - CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_26_27, 0, - CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_28_29, 0, - CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_30_31, 0, - CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_32_33, 0, - CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); -} - -/*program shaper RAM B*/ -static void dpp3_program_shaper_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - const struct gamma_curve *curve; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_SET_2(CM_SHAPER_RAMB_START_CNTL_B, 0, - CM_SHAPER_RAMB_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, 0); - REG_SET_2(CM_SHAPER_RAMB_START_CNTL_G, 0, - CM_SHAPER_RAMB_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, 0); - REG_SET_2(CM_SHAPER_RAMB_START_CNTL_R, 0, - CM_SHAPER_RAMB_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, 0); - - REG_SET_2(CM_SHAPER_RAMB_END_CNTL_B, 0, - CM_SHAPER_RAMB_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMB_END_CNTL_G, 0, - CM_SHAPER_RAMB_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMB_END_CNTL_R, 0, - CM_SHAPER_RAMB_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); - - curve = params->arr_curve_points; - REG_SET_4(CM_SHAPER_RAMB_REGION_0_1, 0, - CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_2_3, 0, - CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_4_5, 0, - CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_6_7, 0, - CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_8_9, 0, - CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_10_11, 0, - CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_12_13, 0, - CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_14_15, 0, - CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_16_17, 0, - CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_18_19, 0, - CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_20_21, 0, - CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_22_23, 0, - CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_24_25, 0, - CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_26_27, 0, - CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_28_29, 0, - CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_30_31, 0, - CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_32_33, 0, - CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); - -} - - -static bool dpp3_program_shaper(struct dpp *dpp_base, - const struct pwl_params *params) -{ - enum dc_lut_mode current_mode; - enum dc_lut_mode next_mode; - - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (params == NULL) { - REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0); - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) - dpp3_power_on_shaper(dpp_base, false); - return false; - } - - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) - dpp3_power_on_shaper(dpp_base, true); - - current_mode = dpp3_get_shaper_current(dpp_base); - - if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) - next_mode = LUT_RAM_B; - else - next_mode = LUT_RAM_A; - - dpp3_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A); - - if (next_mode == LUT_RAM_A) - dpp3_program_shaper_luta_settings(dpp_base, params); - else - dpp3_program_shaper_lutb_settings(dpp_base, params); - - dpp3_program_shaper_lut( - dpp_base, params->rgb_resulted, params->hw_points_num); - - REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2); - - return true; - -} - -static enum dc_lut_mode get3dlut_config( - struct dpp *dpp_base, - bool *is_17x17x17, - bool *is_12bits_color_channel) -{ - uint32_t i_mode, i_enable_10bits, lut_size; - enum dc_lut_mode mode; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_GET(CM_3DLUT_READ_WRITE_CONTROL, - CM_3DLUT_30BIT_EN, &i_enable_10bits); - REG_GET(CM_3DLUT_MODE, - CM_3DLUT_MODE_CURRENT, &i_mode); - - switch (i_mode) { - case 0: - mode = LUT_BYPASS; - break; - case 1: - mode = LUT_RAM_A; - break; - case 2: - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - if (i_enable_10bits > 0) - *is_12bits_color_channel = false; - else - *is_12bits_color_channel = true; - - REG_GET(CM_3DLUT_MODE, CM_3DLUT_SIZE, &lut_size); - - if (lut_size == 0) - *is_17x17x17 = true; - else - *is_17x17x17 = false; - - return mode; -} -/* - * select ramA or ramB, or bypass - * select color channel size 10 or 12 bits - * select 3dlut size 17x17x17 or 9x9x9 - */ -static void dpp3_set_3dlut_mode( - struct dpp *dpp_base, - enum dc_lut_mode mode, - bool is_color_channel_12bits, - bool is_lut_size17x17x17) -{ - uint32_t lut_mode; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (mode == LUT_BYPASS) - lut_mode = 0; - else if (mode == LUT_RAM_A) - lut_mode = 1; - else - lut_mode = 2; - - REG_UPDATE_2(CM_3DLUT_MODE, - CM_3DLUT_MODE, lut_mode, - CM_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1); -} - -static void dpp3_select_3dlut_ram( - struct dpp *dpp_base, - enum dc_lut_mode mode, - bool is_color_channel_12bits) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_UPDATE_2(CM_3DLUT_READ_WRITE_CONTROL, - CM_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1, - CM_3DLUT_30BIT_EN, - is_color_channel_12bits == true ? 0:1); -} - - - -static void dpp3_set3dlut_ram12( - struct dpp *dpp_base, - const struct dc_rgb *lut, - uint32_t entries) -{ - uint32_t i, red, green, blue, red1, green1, blue1; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - for (i = 0 ; i < entries; i += 2) { - red = lut[i].red<<4; - green = lut[i].green<<4; - blue = lut[i].blue<<4; - red1 = lut[i+1].red<<4; - green1 = lut[i+1].green<<4; - blue1 = lut[i+1].blue<<4; - - REG_SET_2(CM_3DLUT_DATA, 0, - CM_3DLUT_DATA0, red, - CM_3DLUT_DATA1, red1); - - REG_SET_2(CM_3DLUT_DATA, 0, - CM_3DLUT_DATA0, green, - CM_3DLUT_DATA1, green1); - - REG_SET_2(CM_3DLUT_DATA, 0, - CM_3DLUT_DATA0, blue, - CM_3DLUT_DATA1, blue1); - - } -} - -/* - * load selected lut with 10 bits color channels - */ -static void dpp3_set3dlut_ram10( - struct dpp *dpp_base, - const struct dc_rgb *lut, - uint32_t entries) -{ - uint32_t i, red, green, blue, value; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - for (i = 0; i < entries; i++) { - red = lut[i].red; - green = lut[i].green; - blue = lut[i].blue; - - value = (red<<20) | (green<<10) | blue; - - REG_SET(CM_3DLUT_DATA_30BIT, 0, CM_3DLUT_DATA_30BIT, value); - } - -} - - -static void dpp3_select_3dlut_ram_mask( - struct dpp *dpp_base, - uint32_t ram_selection_mask) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_UPDATE(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, - ram_selection_mask); - REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0); -} - -static bool dpp3_program_3dlut(struct dpp *dpp_base, - struct tetrahedral_params *params) -{ - enum dc_lut_mode mode; - bool is_17x17x17; - bool is_12bits_color_channel; - struct dc_rgb *lut0; - struct dc_rgb *lut1; - struct dc_rgb *lut2; - struct dc_rgb *lut3; - int lut_size0; - int lut_size; - - if (params == NULL) { - dpp3_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false); - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) - dpp3_power_on_hdr3dlut(dpp_base, false); - return false; - } - - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) - dpp3_power_on_hdr3dlut(dpp_base, true); - - mode = get3dlut_config(dpp_base, &is_17x17x17, &is_12bits_color_channel); - - if (mode == LUT_BYPASS || mode == LUT_RAM_B) - mode = LUT_RAM_A; - else - mode = LUT_RAM_B; - - is_17x17x17 = !params->use_tetrahedral_9; - is_12bits_color_channel = params->use_12bits; - if (is_17x17x17) { - lut0 = params->tetrahedral_17.lut0; - lut1 = params->tetrahedral_17.lut1; - lut2 = params->tetrahedral_17.lut2; - lut3 = params->tetrahedral_17.lut3; - lut_size0 = sizeof(params->tetrahedral_17.lut0)/ - sizeof(params->tetrahedral_17.lut0[0]); - lut_size = sizeof(params->tetrahedral_17.lut1)/ - sizeof(params->tetrahedral_17.lut1[0]); - } else { - lut0 = params->tetrahedral_9.lut0; - lut1 = params->tetrahedral_9.lut1; - lut2 = params->tetrahedral_9.lut2; - lut3 = params->tetrahedral_9.lut3; - lut_size0 = sizeof(params->tetrahedral_9.lut0)/ - sizeof(params->tetrahedral_9.lut0[0]); - lut_size = sizeof(params->tetrahedral_9.lut1)/ - sizeof(params->tetrahedral_9.lut1[0]); - } - - dpp3_select_3dlut_ram(dpp_base, mode, - is_12bits_color_channel); - dpp3_select_3dlut_ram_mask(dpp_base, 0x1); - if (is_12bits_color_channel) - dpp3_set3dlut_ram12(dpp_base, lut0, lut_size0); - else - dpp3_set3dlut_ram10(dpp_base, lut0, lut_size0); - - dpp3_select_3dlut_ram_mask(dpp_base, 0x2); - if (is_12bits_color_channel) - dpp3_set3dlut_ram12(dpp_base, lut1, lut_size); - else - dpp3_set3dlut_ram10(dpp_base, lut1, lut_size); - - dpp3_select_3dlut_ram_mask(dpp_base, 0x4); - if (is_12bits_color_channel) - dpp3_set3dlut_ram12(dpp_base, lut2, lut_size); - else - dpp3_set3dlut_ram10(dpp_base, lut2, lut_size); - - dpp3_select_3dlut_ram_mask(dpp_base, 0x8); - if (is_12bits_color_channel) - dpp3_set3dlut_ram12(dpp_base, lut3, lut_size); - else - dpp3_set3dlut_ram10(dpp_base, lut3, lut_size); - - - dpp3_set_3dlut_mode(dpp_base, mode, is_12bits_color_channel, - is_17x17x17); - - return true; -} -static struct dpp_funcs dcn30_dpp_funcs = { - .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, - .dpp_read_state = dpp30_read_state, - .dpp_reset = dpp_reset, - .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, - .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap, - .dpp_set_csc_adjustment = NULL, - .dpp_set_csc_default = NULL, - .dpp_program_regamma_pwl = NULL, - .dpp_set_pre_degam = dpp3_set_pre_degam, - .dpp_program_input_lut = NULL, - .dpp_full_bypass = dpp1_full_bypass, - .dpp_setup = dpp3_cnv_setup, - .dpp_program_degamma_pwl = NULL, - .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, - .dpp_program_cm_bias = dpp3_program_cm_bias, - .dpp_program_blnd_lut = dpp3_program_blnd_lut, - .dpp_program_shaper_lut = dpp3_program_shaper, - .dpp_program_3dlut = dpp3_program_3dlut, - .dpp_deferred_update = dpp3_deferred_update, - .dpp_program_bias_and_scale = NULL, - .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, - .set_cursor_attributes = dpp3_set_cursor_attributes, - .set_cursor_position = dpp1_set_cursor_position, - .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, - .dpp_dppclk_control = dpp1_dppclk_control, - .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, - .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, -}; - - -static struct dpp_caps dcn30_dpp_cap = { - .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, - .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions, -}; - -bool dpp3_construct( - struct dcn3_dpp *dpp, - struct dc_context *ctx, - uint32_t inst, - const struct dcn3_dpp_registers *tf_regs, - const struct dcn3_dpp_shift *tf_shift, - const struct dcn3_dpp_mask *tf_mask) -{ - dpp->base.ctx = ctx; - - dpp->base.inst = inst; - dpp->base.funcs = &dcn30_dpp_funcs; - dpp->base.caps = &dcn30_dpp_cap; - - dpp->tf_regs = tf_regs; - dpp->tf_shift = tf_shift; - dpp->tf_mask = tf_mask; - - return true; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h deleted file mode 100644 index 2ac8045a87..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h +++ /dev/null @@ -1,642 +0,0 @@ -/* Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DCN30_DPP_H__ -#define __DCN30_DPP_H__ - -#include "dcn20/dcn20_dpp.h" - -#define TO_DCN30_DPP(dpp)\ - container_of(dpp, struct dcn3_dpp, base) - -#define DPP_REG_LIST_DCN30_COMMON(id)\ - SRI(CM_DEALPHA, CM, id),\ - SRI(CM_MEM_PWR_STATUS, CM, id),\ - SRI(CM_BIAS_CR_R, CM, id),\ - SRI(CM_BIAS_Y_G_CB_B, CM, id),\ - SRI(PRE_DEGAM, CNVC_CFG, id),\ - SRI(CM_GAMCOR_CONTROL, CM, id),\ - SRI(CM_GAMCOR_LUT_CONTROL, CM, id),\ - SRI(CM_GAMCOR_LUT_INDEX, CM, id),\ - SRI(CM_GAMCOR_LUT_INDEX, CM, id),\ - SRI(CM_GAMCOR_LUT_DATA, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_CNTL_B, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_CNTL_G, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_CNTL_R, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R, CM, id),\ - SRI(CM_GAMCOR_RAMB_END_CNTL1_B, CM, id),\ - SRI(CM_GAMCOR_RAMB_END_CNTL2_B, CM, id),\ - SRI(CM_GAMCOR_RAMB_END_CNTL1_G, CM, id),\ - SRI(CM_GAMCOR_RAMB_END_CNTL2_G, CM, id),\ - SRI(CM_GAMCOR_RAMB_END_CNTL1_R, CM, id),\ - SRI(CM_GAMCOR_RAMB_END_CNTL2_R, CM, id),\ - SRI(CM_GAMCOR_RAMB_REGION_0_1, CM, id),\ - SRI(CM_GAMCOR_RAMB_REGION_32_33, CM, id),\ - SRI(CM_GAMCOR_RAMB_OFFSET_B, CM, id),\ - SRI(CM_GAMCOR_RAMB_OFFSET_G, CM, id),\ - SRI(CM_GAMCOR_RAMB_OFFSET_R, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_B, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_G, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_R, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_CNTL_B, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_CNTL_G, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_CNTL_R, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R, CM, id),\ - SRI(CM_GAMCOR_RAMA_END_CNTL1_B, CM, id),\ - SRI(CM_GAMCOR_RAMA_END_CNTL2_B, CM, id),\ - SRI(CM_GAMCOR_RAMA_END_CNTL1_G, CM, id),\ - SRI(CM_GAMCOR_RAMA_END_CNTL2_G, CM, id),\ - SRI(CM_GAMCOR_RAMA_END_CNTL1_R, CM, id),\ - SRI(CM_GAMCOR_RAMA_END_CNTL2_R, CM, id),\ - SRI(CM_GAMCOR_RAMA_REGION_0_1, CM, id),\ - SRI(CM_GAMCOR_RAMA_REGION_32_33, CM, id),\ - SRI(CM_GAMCOR_RAMA_OFFSET_B, CM, id),\ - SRI(CM_GAMCOR_RAMA_OFFSET_G, CM, id),\ - SRI(CM_GAMCOR_RAMA_OFFSET_R, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_G, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_R, CM, id),\ - SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\ - SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\ - SRI(CM_GAMUT_REMAP_C13_C14, CM, id),\ - SRI(CM_GAMUT_REMAP_C21_C22, CM, id),\ - SRI(CM_GAMUT_REMAP_C23_C24, CM, id),\ - SRI(CM_GAMUT_REMAP_C31_C32, CM, id),\ - SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\ - SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \ - SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \ - SRI(OTG_H_BLANK, DSCL, id), \ - SRI(OTG_V_BLANK, DSCL, id), \ - SRI(SCL_MODE, DSCL, id), \ - SRI(LB_DATA_FORMAT, DSCL, id), \ - SRI(LB_MEMORY_CTRL, DSCL, id), \ - SRI(DSCL_AUTOCAL, DSCL, id), \ - SRI(DSCL_CONTROL, DSCL, id), \ - SRI(SCL_TAP_CONTROL, DSCL, id), \ - SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \ - SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \ - SRI(DSCL_2TAP_CONTROL, DSCL, id), \ - SRI(MPC_SIZE, DSCL, id), \ - SRI(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \ - SRI(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \ - SRI(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \ - SRI(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \ - SRI(SCL_HORZ_FILTER_INIT, DSCL, id), \ - SRI(SCL_HORZ_FILTER_INIT_C, DSCL, id), \ - SRI(SCL_VERT_FILTER_INIT, DSCL, id), \ - SRI(SCL_VERT_FILTER_INIT_C, DSCL, id), \ - SRI(RECOUT_START, DSCL, id), \ - SRI(RECOUT_SIZE, DSCL, id), \ - SRI(PRE_DEALPHA, CNVC_CFG, id), \ - SRI(PRE_REALPHA, CNVC_CFG, id), \ - SRI(PRE_CSC_MODE, CNVC_CFG, id), \ - SRI(PRE_CSC_C11_C12, CNVC_CFG, id), \ - SRI(PRE_CSC_C33_C34, CNVC_CFG, id), \ - SRI(PRE_CSC_B_C11_C12, CNVC_CFG, id), \ - SRI(PRE_CSC_B_C33_C34, CNVC_CFG, id), \ - SRI(CM_POST_CSC_CONTROL, CM, id), \ - SRI(CM_POST_CSC_C11_C12, CM, id), \ - SRI(CM_POST_CSC_C33_C34, CM, id), \ - SRI(CM_POST_CSC_B_C11_C12, CM, id), \ - SRI(CM_POST_CSC_B_C33_C34, CM, id), \ - SRI(CM_MEM_PWR_CTRL, CM, id), \ - SRI(CM_CONTROL, CM, id), \ - SRI(FORMAT_CONTROL, CNVC_CFG, id), \ - SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ - SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ - SRI(CURSOR0_COLOR0, CNVC_CUR, id), \ - SRI(CURSOR0_COLOR1, CNVC_CUR, id), \ - SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \ - SRI(DPP_CONTROL, DPP_TOP, id), \ - SRI(CM_HDR_MULT_COEF, CM, id), \ - SRI(CURSOR_CONTROL, CURSOR0_, id), \ - SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \ - SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \ - SRI(FCNV_FP_BIAS_G, CNVC_CFG, id), \ - SRI(FCNV_FP_BIAS_B, CNVC_CFG, id), \ - SRI(FCNV_FP_SCALE_R, CNVC_CFG, id), \ - SRI(FCNV_FP_SCALE_G, CNVC_CFG, id), \ - SRI(FCNV_FP_SCALE_B, CNVC_CFG, id), \ - SRI(COLOR_KEYER_CONTROL, CNVC_CFG, id), \ - SRI(COLOR_KEYER_ALPHA, CNVC_CFG, id), \ - SRI(COLOR_KEYER_RED, CNVC_CFG, id), \ - SRI(COLOR_KEYER_GREEN, CNVC_CFG, id), \ - SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \ - SRI(CURSOR_CONTROL, CURSOR0_, id),\ - SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\ - SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \ - SRI(DSCL_MEM_PWR_CTRL, DSCL, id) - -#define DPP_REG_LIST_DCN30(id)\ - DPP_REG_LIST_DCN30_COMMON(id), \ - TF_REG_LIST_DCN20_COMMON(id), \ - SRI(CM_BLNDGAM_CONTROL, CM, id), \ - SRI(CM_SHAPER_LUT_DATA, CM, id),\ - SRI(CM_MEM_PWR_CTRL2, CM, id), \ - SRI(CM_MEM_PWR_STATUS2, CM, id), \ - SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM, id),\ - SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM, id),\ - SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM, id),\ - SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B, CM, id),\ - SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G, CM, id),\ - SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R, CM, id),\ - SRI(CM_BLNDGAM_LUT_CONTROL, CM, id) - - - -#define DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh)\ - TF_SF(CM0_CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, mask_sh),\ - TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_EN, mask_sh),\ - TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_ABLND, mask_sh),\ - TF_SF(CM0_CM_BIAS_CR_R, CM_BIAS_CR_R, mask_sh),\ - TF_SF(CM0_CM_BIAS_Y_G_CB_B, CM_BIAS_Y_G, mask_sh),\ - TF_SF(CM0_CM_BIAS_Y_G_CB_B, CM_BIAS_CB_B, mask_sh),\ - TF_SF(CM0_CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_DIS, mask_sh),\ - TF_SF(CM0_CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, mask_sh),\ - TF_SF(CNVC_CFG0_PRE_DEGAM, PRE_DEGAM_MODE, mask_sh),\ - TF_SF(CNVC_CFG0_PRE_DEGAM, PRE_DEGAM_SELECT, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_MODE, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_PWL_DISABLE, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_LUT_INDEX, CM_GAMCOR_LUT_INDEX, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_LUT_DATA, CM_GAMCOR_LUT_DATA, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_WRITE_COLOR_MASK, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_READ_COLOR_SEL, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_HOST_SEL, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_CONFIG_MODE, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_START_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_START_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL1_B, CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL2_B, CM_GAMCOR_RAMA_EXP_REGION_END_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL2_B, CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_OFFSET_B, CM_GAMCOR_RAMA_OFFSET_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C13, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C14, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C21, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C22, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C23, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C24, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C31, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C32, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh),\ - TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_START, mask_sh),\ - TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_END, mask_sh),\ - TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_START, mask_sh),\ - TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_END, mask_sh),\ - TF_SF(DSCL0_LB_DATA_FORMAT, INTERLEAVE_EN, mask_sh),\ - TF2_SF(DSCL0, LB_DATA_FORMAT__ALPHA_EN, mask_sh),\ - TF_SF(DSCL0_LB_MEMORY_CTRL, MEMORY_CONFIG, mask_sh),\ - TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\ - TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\ - TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\ - TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\ - TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS_C, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS_C, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_PHASE, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_FILTER_TYPE, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_FACTOR, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_FACTOR, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, DSCL_MODE, mask_sh),\ - TF_SF(DSCL0_RECOUT_START, RECOUT_START_X, mask_sh),\ - TF_SF(DSCL0_RECOUT_START, RECOUT_START_Y, mask_sh),\ - TF_SF(DSCL0_RECOUT_SIZE, RECOUT_WIDTH, mask_sh),\ - TF_SF(DSCL0_RECOUT_SIZE, RECOUT_HEIGHT, mask_sh),\ - TF_SF(DSCL0_MPC_SIZE, MPC_WIDTH, mask_sh),\ - TF_SF(DSCL0_MPC_SIZE, MPC_HEIGHT, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C, SCL_H_SCALE_RATIO_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C, SCL_V_SCALE_RATIO_C, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_FRAC_C, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_INT_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_FRAC_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_INT_C, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, SCL_CHROMA_COEF_MODE, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT_CURRENT, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_DEALPHA, PRE_DEALPHA_EN, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_DEALPHA, PRE_DEALPHA_ABLND_EN, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_REALPHA, PRE_REALPHA_EN, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_REALPHA, PRE_REALPHA_ABLND_EN, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_CSC_MODE, PRE_CSC_MODE, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_CSC_MODE, PRE_CSC_MODE_CURRENT, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_CSC_C11_C12, PRE_CSC_C11, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_CSC_C11_C12, PRE_CSC_C12, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_CSC_C33_C34, PRE_CSC_C33, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_CSC_C33_C34, PRE_CSC_C34, mask_sh), \ - TF_SF(CM0_CM_POST_CSC_CONTROL, CM_POST_CSC_MODE, mask_sh), \ - TF_SF(CM0_CM_POST_CSC_CONTROL, CM_POST_CSC_MODE_CURRENT, mask_sh), \ - TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C11, mask_sh), \ - TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C12, mask_sh), \ - TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C33, mask_sh), \ - TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C34, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \ - TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \ - TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \ - TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_ALPHA_PLANE_ENABLE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \ - TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \ - TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh), \ - TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CNV16, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE_C, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_R, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_G, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_B, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_EN, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIX_INV_MODE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIXEL_ALPHA_MOD_EN, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh),\ - TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\ - TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh),\ - TF_SF(DSCL0_DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, mask_sh) - -#define DPP_REG_LIST_SH_MASK_DCN30_UPDATED(mask_sh)\ - TF_SF(CM0_CM_MEM_PWR_STATUS, BLNDGAM_MEM_PWR_STATE, mask_sh), \ - TF_SF(CM0_CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, mask_sh),\ - TF_SF(CM0_CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, mask_sh),\ - TF_SF(CM0_CM_MEM_PWR_STATUS2, HDR3DLUT_MEM_PWR_STATE, mask_sh),\ - TF_SF(CM0_CM_MEM_PWR_STATUS2, SHAPER_MEM_PWR_STATE, mask_sh),\ - TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_HOST_SEL, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_CONFIG_MODE, mask_sh), \ - TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, mask_sh), \ - TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, mask_sh) - - -#define DPP_REG_LIST_SH_MASK_DCN30(mask_sh)\ - DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh), \ - TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \ - DPP_REG_LIST_SH_MASK_DCN30_UPDATED(mask_sh) - -#define DPP_REG_FIELD_LIST_DCN3(type) \ - TF_REG_FIELD_LIST_DCN2_0(type); \ - type FORMAT_CROSSBAR_R; \ - type FORMAT_CROSSBAR_G; \ - type FORMAT_CROSSBAR_B; \ - type CM_DEALPHA_EN;\ - type CM_DEALPHA_ABLND;\ - type CM_BIAS_Y_G;\ - type CM_BIAS_CB_B;\ - type CM_BIAS_CR_R;\ - type GAMCOR_MEM_PWR_DIS; \ - type GAMCOR_MEM_PWR_FORCE; \ - type HDR3DLUT_MEM_PWR_FORCE; \ - type SHAPER_MEM_PWR_FORCE; \ - type PRE_DEGAM_MODE;\ - type PRE_DEGAM_SELECT;\ - type CNVC_ALPHA_PLANE_ENABLE; \ - type PRE_DEALPHA_EN; \ - type PRE_DEALPHA_ABLND_EN; \ - type PRE_REALPHA_EN; \ - type PRE_REALPHA_ABLND_EN; \ - type PRE_CSC_MODE; \ - type PRE_CSC_MODE_CURRENT; \ - type PRE_CSC_C11; \ - type PRE_CSC_C12; \ - type PRE_CSC_C33; \ - type PRE_CSC_C34; \ - type CM_POST_CSC_MODE; \ - type CM_POST_CSC_MODE_CURRENT; \ - type CM_POST_CSC_C11; \ - type CM_POST_CSC_C12; \ - type CM_POST_CSC_C33; \ - type CM_POST_CSC_C34; \ - type CM_GAMCOR_MODE; \ - type CM_GAMCOR_SELECT; \ - type CM_GAMCOR_PWL_DISABLE; \ - type CM_GAMCOR_MODE_CURRENT; \ - type CM_GAMCOR_SELECT_CURRENT; \ - type CM_GAMCOR_LUT_INDEX; \ - type CM_GAMCOR_LUT_DATA; \ - type CM_GAMCOR_LUT_WRITE_COLOR_MASK; \ - type CM_GAMCOR_LUT_READ_COLOR_SEL; \ - type CM_GAMCOR_LUT_HOST_SEL; \ - type CM_GAMCOR_LUT_CONFIG_MODE; \ - type CM_GAMCOR_LUT_STATUS; \ - type CM_GAMCOR_RAMA_EXP_REGION_START_B; \ - type CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B; \ - type CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B; \ - type CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B; \ - type CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B; \ - type CM_GAMCOR_RAMA_EXP_REGION_END_B; \ - type CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B; \ - type CM_GAMCOR_RAMA_OFFSET_B; \ - type CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET; \ - type CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS; \ - type CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET; \ - type CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;\ - type CM_GAMUT_REMAP_MODE_CURRENT;\ - type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_B; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_G; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_R; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_G; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_R; \ - type CM_BLNDGAM_LUT_WRITE_COLOR_MASK; \ - type CM_BLNDGAM_LUT_HOST_SEL; \ - type CM_BLNDGAM_LUT_CONFIG_MODE; \ - type CM_3DLUT_MODE_CURRENT; \ - type CM_SHAPER_MODE_CURRENT; \ - type CM_BLNDGAM_MODE; \ - type CM_BLNDGAM_MODE_CURRENT; \ - type CM_BLNDGAM_SELECT_CURRENT; \ - type CM_BLNDGAM_SELECT; \ - type GAMCOR_MEM_PWR_STATE; \ - type BLNDGAM_MEM_PWR_STATE; \ - type HDR3DLUT_MEM_PWR_STATE; \ - type SHAPER_MEM_PWR_STATE - -struct dcn3_dpp_shift { - DPP_REG_FIELD_LIST_DCN3(uint8_t); -}; - -struct dcn3_dpp_mask { - DPP_REG_FIELD_LIST_DCN3(uint32_t); -}; - -#define DPP_DCN3_REG_VARIABLE_LIST_COMMON \ - DPP_DCN2_REG_VARIABLE_LIST; \ - uint32_t CM_MEM_PWR_STATUS;\ - uint32_t CM_MEM_PWR_STATUS2;\ - uint32_t CM_MEM_PWR_CTRL2;\ - uint32_t CM_DEALPHA;\ - uint32_t CM_BIAS_CR_R;\ - uint32_t CM_BIAS_Y_G_CB_B;\ - uint32_t PRE_DEGAM;\ - uint32_t PRE_DEALPHA; \ - uint32_t PRE_REALPHA; \ - uint32_t PRE_CSC_MODE; \ - uint32_t PRE_CSC_C11_C12; \ - uint32_t PRE_CSC_C33_C34; \ - uint32_t PRE_CSC_B_C11_C12; \ - uint32_t PRE_CSC_B_C33_C34; \ - uint32_t CM_POST_CSC_CONTROL; \ - uint32_t CM_POST_CSC_C11_C12; \ - uint32_t CM_POST_CSC_C33_C34; \ - uint32_t CM_POST_CSC_B_C11_C12; \ - uint32_t CM_POST_CSC_B_C33_C34; \ - uint32_t CM_GAMUT_REMAP_B_C11_C12; \ - uint32_t CM_GAMUT_REMAP_B_C13_C14; \ - uint32_t CM_GAMUT_REMAP_B_C21_C22; \ - uint32_t CM_GAMUT_REMAP_B_C23_C24; \ - uint32_t CM_GAMUT_REMAP_B_C31_C32; \ - uint32_t CM_GAMUT_REMAP_B_C33_C34; \ - uint32_t CM_GAMCOR_CONTROL; \ - uint32_t CM_GAMCOR_LUT_CONTROL; \ - uint32_t CM_GAMCOR_LUT_INDEX; \ - uint32_t CM_GAMCOR_LUT_DATA; \ - uint32_t CM_GAMCOR_RAMB_START_CNTL_B; \ - uint32_t CM_GAMCOR_RAMB_START_CNTL_G; \ - uint32_t CM_GAMCOR_RAMB_START_CNTL_R; \ - uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_B; \ - uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_G; \ - uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_R; \ - uint32_t CM_GAMCOR_RAMB_END_CNTL1_B; \ - uint32_t CM_GAMCOR_RAMB_END_CNTL2_B; \ - uint32_t CM_GAMCOR_RAMB_END_CNTL1_G; \ - uint32_t CM_GAMCOR_RAMB_END_CNTL2_G; \ - uint32_t CM_GAMCOR_RAMB_END_CNTL1_R; \ - uint32_t CM_GAMCOR_RAMB_END_CNTL2_R; \ - uint32_t CM_GAMCOR_RAMB_REGION_0_1; \ - uint32_t CM_GAMCOR_RAMB_REGION_32_33; \ - uint32_t CM_GAMCOR_RAMB_OFFSET_B; \ - uint32_t CM_GAMCOR_RAMB_OFFSET_G; \ - uint32_t CM_GAMCOR_RAMB_OFFSET_R; \ - uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_B; \ - uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_G; \ - uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_R; \ - uint32_t CM_GAMCOR_RAMA_START_CNTL_B; \ - uint32_t CM_GAMCOR_RAMA_START_CNTL_G; \ - uint32_t CM_GAMCOR_RAMA_START_CNTL_R; \ - uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_B; \ - uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_G; \ - uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_R; \ - uint32_t CM_GAMCOR_RAMA_END_CNTL1_B; \ - uint32_t CM_GAMCOR_RAMA_END_CNTL2_B; \ - uint32_t CM_GAMCOR_RAMA_END_CNTL1_G; \ - uint32_t CM_GAMCOR_RAMA_END_CNTL2_G; \ - uint32_t CM_GAMCOR_RAMA_END_CNTL1_R; \ - uint32_t CM_GAMCOR_RAMA_END_CNTL2_R; \ - uint32_t CM_GAMCOR_RAMA_REGION_0_1; \ - uint32_t CM_GAMCOR_RAMA_REGION_32_33; \ - uint32_t CM_GAMCOR_RAMA_OFFSET_B; \ - uint32_t CM_GAMCOR_RAMA_OFFSET_G; \ - uint32_t CM_GAMCOR_RAMA_OFFSET_R; \ - uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_B; \ - uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_G; \ - uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_R; \ - uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B; \ - uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G; \ - uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R; \ - uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B; \ - uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G; \ - uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R; \ - uint32_t CM_BLNDGAM_LUT_CONTROL - - -struct dcn3_dpp_registers { - DPP_DCN3_REG_VARIABLE_LIST_COMMON; -}; - - -struct dcn3_dpp { - struct dpp base; - - const struct dcn3_dpp_registers *tf_regs; - const struct dcn3_dpp_shift *tf_shift; - const struct dcn3_dpp_mask *tf_mask; - - const uint16_t *filter_v; - const uint16_t *filter_h; - const uint16_t *filter_v_c; - const uint16_t *filter_h_c; - int lb_pixel_depth_supported; - int lb_memory_size; - int lb_bits_per_entry; - bool is_write_to_ram_a_safe; - struct scaler_data scl_data; - struct pwl_params pwl_data; -}; - -bool dpp3_construct(struct dcn3_dpp *dpp3, - struct dc_context *ctx, - uint32_t inst, - const struct dcn3_dpp_registers *tf_regs, - const struct dcn3_dpp_shift *tf_shift, - const struct dcn3_dpp_mask *tf_mask); - -bool dpp3_program_gamcor_lut( - struct dpp *dpp_base, const struct pwl_params *params); - -void dpp3_program_CM_dealpha( - struct dpp *dpp_base, - uint32_t enable, uint32_t additive_blending); - -void dpp30_read_state(struct dpp *dpp_base, - struct dcn_dpp_state *s); - -bool dpp3_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps); - -void dpp3_cnv_setup ( - struct dpp *dpp_base, - enum surface_pixel_format format, - enum expansion_mode mode, - struct dc_csc_transform input_csc_color_matrix, - enum dc_color_space input_color_space, - struct cnv_alpha_2bit_lut *alpha_2bit_lut); - -void dpp3_program_CM_bias( - struct dpp *dpp_base, - struct CM_bias_params *bias_params); - -void dpp3_set_hdr_multiplier( - struct dpp *dpp_base, - uint32_t multiplier); - -void dpp3_cm_set_gamut_remap( - struct dpp *dpp_base, - const struct dpp_grph_csc_adjustment *adjust); - -void dpp3_set_pre_degam(struct dpp *dpp_base, - enum dc_transfer_func_predefined tr); - -void dpp3_set_cursor_attributes( - struct dpp *dpp_base, - struct dc_cursor_attributes *cursor_attributes); - -void dpp3_program_post_csc( - struct dpp *dpp_base, - enum dc_color_space color_space, - enum dcn10_input_csc_select input_select, - const struct out_csc_color_matrix *tbl_entry); - -void dpp3_program_cm_bias( - struct dpp *dpp_base, - struct CM_bias_params *bias_params); - -void dpp3_program_cm_dealpha( - struct dpp *dpp_base, - uint32_t enable, uint32_t additive_blending); - -void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, - struct dpp_grph_csc_adjustment *adjust); -#endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c deleted file mode 100644 index 2f5b3fbd35..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c +++ /dev/null @@ -1,461 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "core_types.h" -#include "reg_helper.h" -#include "dcn30_dpp.h" -#include "basics/conversion.h" -#include "dcn30_cm_common.h" - -#define REG(reg)\ - dpp->tf_regs->reg - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - -static void dpp3_enable_cm_block( - struct dpp *dpp_base) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - unsigned int cm_bypass_mode = 0; - - // debug option: put CM in bypass mode - if (dpp_base->ctx->dc->debug.cm_in_bypass) - cm_bypass_mode = 1; - - REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode); -} - -static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base) -{ - enum dc_lut_mode mode = LUT_BYPASS; - uint32_t state_mode; - uint32_t lut_mode; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode); - - if (state_mode == 2) {//Programmable RAM LUT - REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode); - if (lut_mode == 0) - mode = LUT_RAM_A; - else - mode = LUT_RAM_B; - } - - return mode; -} - -static void dpp3_program_gammcor_lut( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num, - bool is_ram_a) -{ - uint32_t i; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg; - uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg; - uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg; - - /*fill in the LUT with all base values to be used by pwl module - * HW auto increments the LUT index: back-to-back write - */ - if (is_rgb_equal(rgb, num)) { - for (i = 0 ; i < num; i++) - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg); - - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red); - - } else { - REG_UPDATE(CM_GAMCOR_LUT_CONTROL, - CM_GAMCOR_LUT_WRITE_COLOR_MASK, 4); - for (i = 0 ; i < num; i++) - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg); - - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red); - - REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0); - - REG_UPDATE(CM_GAMCOR_LUT_CONTROL, - CM_GAMCOR_LUT_WRITE_COLOR_MASK, 2); - for (i = 0 ; i < num; i++) - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].green_reg); - - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_green); - - REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0); - - REG_UPDATE(CM_GAMCOR_LUT_CONTROL, - CM_GAMCOR_LUT_WRITE_COLOR_MASK, 1); - for (i = 0 ; i < num; i++) - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].blue_reg); - - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_blue); - } -} - -static void dpp3_power_on_gamcor_lut( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { - if (power_on) { - REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 0); - REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5); - } else { - dpp_base->ctx->dc->optimized_required = true; - dpp_base->deferred_reg_writes.bits.disable_gamcor = true; - } - } else - REG_SET(CM_MEM_PWR_CTRL, 0, - GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1); -} - -void dpp3_program_cm_dealpha( - struct dpp *dpp_base, - uint32_t enable, uint32_t additive_blending) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_SET_2(CM_DEALPHA, 0, - CM_DEALPHA_EN, enable, - CM_DEALPHA_ABLND, additive_blending); -} - -void dpp3_program_cm_bias( - struct dpp *dpp_base, - struct CM_bias_params *bias_params) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_SET(CM_BIAS_CR_R, 0, CM_BIAS_CR_R, bias_params->cm_bias_cr_r); - REG_SET_2(CM_BIAS_Y_G_CB_B, 0, - CM_BIAS_Y_G, bias_params->cm_bias_y_g, - CM_BIAS_CB_B, bias_params->cm_bias_cb_b); -} - -static void dpp3_gamcor_reg_field( - struct dcn3_dpp *dpp, - struct dcn3_xfer_func_reg *reg) -{ - - reg->shifts.field_region_start_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B; - reg->masks.field_region_start_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B; - reg->shifts.field_offset = dpp->tf_shift->CM_GAMCOR_RAMA_OFFSET_B; - reg->masks.field_offset = dpp->tf_mask->CM_GAMCOR_RAMA_OFFSET_B; - - reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET; - reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET; - reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET; - reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET; - reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS; - reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS; - - reg->shifts.field_region_end = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_B; - reg->masks.field_region_end = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_B; - reg->shifts.field_region_end_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B; - reg->masks.field_region_end_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B; - reg->shifts.field_region_end_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B; - reg->masks.field_region_end_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B; - reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B; - reg->masks.field_region_linear_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B; - reg->shifts.exp_region_start = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_B; - reg->masks.exp_region_start = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_B; - reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B; - reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B; -} - -static void dpp3_configure_gamcor_lut( - struct dpp *dpp_base, - bool is_ram_a) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_UPDATE(CM_GAMCOR_LUT_CONTROL, - CM_GAMCOR_LUT_WRITE_COLOR_MASK, 7); - REG_UPDATE(CM_GAMCOR_LUT_CONTROL, - CM_GAMCOR_LUT_HOST_SEL, is_ram_a == true ? 0:1); - REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0); -} - - -bool dpp3_program_gamcor_lut( - struct dpp *dpp_base, const struct pwl_params *params) -{ - enum dc_lut_mode current_mode; - enum dc_lut_mode next_mode; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - struct dcn3_xfer_func_reg gam_regs; - - dpp3_enable_cm_block(dpp_base); - - if (params == NULL) { //bypass if we have no pwl data - REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 0); - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) - dpp3_power_on_gamcor_lut(dpp_base, false); - return false; - } - dpp3_power_on_gamcor_lut(dpp_base, true); - REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 2); - - current_mode = dpp30_get_gamcor_current(dpp_base); - if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) - next_mode = LUT_RAM_B; - else - next_mode = LUT_RAM_A; - - dpp3_power_on_gamcor_lut(dpp_base, true); - dpp3_configure_gamcor_lut(dpp_base, next_mode == LUT_RAM_A); - - if (next_mode == LUT_RAM_B) { - gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMB_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMB_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMB_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMB_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMB_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMB_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMB_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMB_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMB_END_CNTL2_R); - gam_regs.region_start = REG(CM_GAMCOR_RAMB_REGION_0_1); - gam_regs.region_end = REG(CM_GAMCOR_RAMB_REGION_32_33); - //New registers in DCN3AG/DCN GAMCOR block - gam_regs.offset_b = REG(CM_GAMCOR_RAMB_OFFSET_B); - gam_regs.offset_g = REG(CM_GAMCOR_RAMB_OFFSET_G); - gam_regs.offset_r = REG(CM_GAMCOR_RAMB_OFFSET_R); - gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_B); - gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_G); - gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_R); - } else { - gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMA_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMA_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMA_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMA_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMA_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMA_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMA_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMA_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMA_END_CNTL2_R); - gam_regs.region_start = REG(CM_GAMCOR_RAMA_REGION_0_1); - gam_regs.region_end = REG(CM_GAMCOR_RAMA_REGION_32_33); - //New registers in DCN3AG/DCN GAMCOR block - gam_regs.offset_b = REG(CM_GAMCOR_RAMA_OFFSET_B); - gam_regs.offset_g = REG(CM_GAMCOR_RAMA_OFFSET_G); - gam_regs.offset_r = REG(CM_GAMCOR_RAMA_OFFSET_R); - gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_B); - gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_G); - gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_R); - } - - //get register fields - dpp3_gamcor_reg_field(dpp, &gam_regs); - - //program register set for LUTA/LUTB - cm_helper_program_gamcor_xfer_func(dpp_base->ctx, params, &gam_regs); - - dpp3_program_gammcor_lut(dpp_base, params->rgb_resulted, params->hw_points_num, - next_mode == LUT_RAM_A); - - //select Gamma LUT to use for next frame - REG_UPDATE(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, next_mode == LUT_RAM_A ? 0:1); - - return true; -} - -void dpp3_set_hdr_multiplier( - struct dpp *dpp_base, - uint32_t multiplier) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier); -} - - -static void program_gamut_remap( - struct dcn3_dpp *dpp, - const uint16_t *regval, - int select) -{ - uint16_t selection = 0; - struct color_matrices_reg gam_regs; - - if (regval == NULL || select == GAMUT_REMAP_BYPASS) { - REG_SET(CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, 0); - return; - } - switch (select) { - case GAMUT_REMAP_COEFF: - selection = 1; - break; - /*this corresponds to GAMUT_REMAP coefficients set B - *we don't have common coefficient sets in dcn3ag/dcn3 - */ - case GAMUT_REMAP_COMA_COEFF: - selection = 2; - break; - default: - break; - } - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; - - - if (select == GAMUT_REMAP_COEFF) { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - } else if (select == GAMUT_REMAP_COMA_COEFF) { - - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - } - //select coefficient set to use - REG_SET( - CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, selection); -} - -void dpp3_cm_set_gamut_remap( - struct dpp *dpp_base, - const struct dpp_grph_csc_adjustment *adjust) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - int i = 0; - int gamut_mode; - - if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) - /* Bypass if type is bypass or hw */ - program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS); - else { - struct fixed31_32 arr_matrix[12]; - uint16_t arr_reg_val[12]; - - for (i = 0; i < 12; i++) - arr_matrix[i] = adjust->temperature_matrix[i]; - - convert_float_matrix( - arr_reg_val, arr_matrix, 12); - - //current coefficient set in use - REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &gamut_mode); - - if (gamut_mode == 0) - gamut_mode = 1; //use coefficient set A - else if (gamut_mode == 1) - gamut_mode = 2; - else - gamut_mode = 1; - - //follow dcn2 approach for now - using only coefficient set A - program_gamut_remap(dpp, arr_reg_val, gamut_mode); - } -} - -static void read_gamut_remap(struct dcn3_dpp *dpp, - uint16_t *regval, - int *select) -{ - struct color_matrices_reg gam_regs; - uint32_t selection; - - //current coefficient set in use - REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &selection); - - *select = selection; - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; - - if (*select == GAMUT_REMAP_COEFF) { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); - - cm_helper_read_color_matrices(dpp->base.ctx, - regval, - &gam_regs); - - } else if (*select == GAMUT_REMAP_COMA_COEFF) { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); - - cm_helper_read_color_matrices(dpp->base.ctx, - regval, - &gam_regs); - } -} - -void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, - struct dpp_grph_csc_adjustment *adjust) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - uint16_t arr_reg_val[12]; - int select; - - read_gamut_remap(dpp, arr_reg_val, &select); - - if (select == GAMUT_REMAP_BYPASS) { - adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; - return; - } - - adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; - convert_hw_matrix(adjust->temperature_matrix, - arr_reg_val, ARRAY_SIZE(arr_reg_val)); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c index 1b9d9495f7..fae98cf520 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c @@ -251,9 +251,7 @@ static const struct dwbc_funcs dcn30_dwbc_funcs = { .set_fc_enable = dwb3_set_fc_enable, .set_stereo = dwb3_set_stereo, .set_new_content = dwb3_set_new_content, - .dwb_program_output_csc = NULL, .dwb_ogam_set_input_transfer_func = dwb3_ogam_set_input_transfer_func, //TODO: rename - .dwb_set_scaler = NULL, }; void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h index 332634b76a..0f3f7c5fba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h @@ -217,6 +217,7 @@ SF_DWB2(DWB_OGAM_LUT_DATA, DWBCP, 0, DWB_OGAM_LUT_DATA, mask_sh),\ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_COLOR_SEL, mask_sh),\ + SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_DBG, mask_sh),\ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_HOST_SEL, mask_sh),\ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_CONFIG_MODE, mask_sh),\ SF_DWB2(DWB_OGAM_RAMA_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\ @@ -524,6 +525,7 @@ type DWB_OGAM_LUT_DATA;\ type DWB_OGAM_LUT_WRITE_COLOR_MASK;\ type DWB_OGAM_LUT_READ_COLOR_SEL;\ + type DWB_OGAM_LUT_READ_DBG;\ type DWB_OGAM_LUT_HOST_SEL;\ type DWB_OGAM_LUT_CONFIG_MODE;\ type DWB_OGAM_LUT_STATUS;\ @@ -710,7 +712,7 @@ type DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET;\ type DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS;\ type DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; + type DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS struct dcn30_dwbc_registers { /* DCN3AG */ @@ -733,6 +735,10 @@ struct dcn30_dwbc_registers { uint32_t DWB_MMHUBBUB_BACKPRESSURE_CNT; uint32_t DWB_HOST_READ_CONTROL; uint32_t DWB_SOFT_RESET; + uint32_t DWB_DEBUG_CTRL; + uint32_t DWB_DEBUG; + uint32_t DWB_TEST_DEBUG_INDEX; + uint32_t DWB_TEST_DEBUG_DATA; /* DWBSCL */ uint32_t DWBSCL_COEF_RAM_TAP_SELECT; @@ -747,6 +753,9 @@ struct dcn30_dwbc_registers { uint32_t DWBSCL_DEST_SIZE; uint32_t DWBSCL_OVERFLOW_STATUS; uint32_t DWBSCL_OVERFLOW_COUNTER; + uint32_t DWBSCL_DEBUG; + uint32_t DWBSCL_TEST_DEBUG_INDEX; + uint32_t DWBSCL_TEST_DEBUG_DATA; /* DWBCP */ uint32_t DWB_HDR_MULT_COEF; @@ -838,6 +847,9 @@ struct dcn30_dwbc_registers { uint32_t DWB_OGAM_RAMB_REGION_28_29; uint32_t DWB_OGAM_RAMB_REGION_30_31; uint32_t DWB_OGAM_RAMB_REGION_32_33; + uint32_t DWBCP_DEBUG; + uint32_t DWBCP_TEST_DEBUG_INDEX; + uint32_t DWBCP_TEST_DEBUG_DATA; }; /* Internal enums / structs */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c index 152c9c5733..6a5af3da4b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c @@ -95,7 +95,7 @@ int hubbub3_init_dchub_sys_ctx(struct hubbub *hubbub, bool hubbub3_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h index 7b597908b9..ca6233e8f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h @@ -124,7 +124,7 @@ bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub, bool hubbub3_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c index 75547ce86c..60a64d2903 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c @@ -455,6 +455,9 @@ void hubp3_read_state(struct hubp *hubp) if (REG(DCHUBP_CNTL)) s->hubp_cntl = REG_READ(DCHUBP_CNTL); + if (REG(DCSURF_FLIP_CONTROL)) + s->flip_control = REG_READ(DCSURF_FLIP_CONTROL); + } void hubp3_setup( diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c index 5ebb573031..fca94e50ae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c @@ -1183,7 +1183,7 @@ void mpc3_get_gamut_remap(struct mpc *mpc, struct mpc_grph_gamut_adjustment *adjust) { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - uint16_t arr_reg_val[12]; + uint16_t arr_reg_val[12] = {0}; int select; read_gamut_remap(mpc30, mpcc_id, arr_reg_val, &select); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h index ed9a5549c3..466ba20b9c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h @@ -26,6 +26,7 @@ #ifndef __DAL_DCN30_VPG_H__ #define __DAL_DCN30_VPG_H__ +#include "vpg.h" #define DCN30_VPG_FROM_VPG(vpg)\ container_of(vpg, struct dcn30_vpg, base) @@ -132,28 +133,6 @@ struct dcn30_vpg_mask { VPG_DCN3_REG_FIELD_LIST(uint32_t); }; -struct vpg; - -struct vpg_funcs { - void (*update_generic_info_packet)( - struct vpg *vpg, - uint32_t packet_index, - const struct dc_info_packet *info_packet, - bool immediate_update); - - void (*vpg_poweron)( - struct vpg *vpg); - - void (*vpg_powerdown)( - struct vpg *vpg); -}; - -struct vpg { - const struct vpg_funcs *funcs; - struct dc_context *ctx; - int inst; -}; - struct dcn30_vpg { struct vpg base; const struct dcn30_vpg_registers *regs; diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h index 73db962dbc..067e49cb23 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h @@ -56,10 +56,4 @@ struct dccg *dccg301_create( const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask); -struct dccg *dccg301_create( - struct dc_context *ctx, - const struct dccg_registers *regs, - const struct dccg_shift *dccg_shift, - const struct dccg_mask *dccg_mask); - #endif //__DCN301_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c index a046664e20..c1959672df 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c @@ -63,6 +63,7 @@ static const struct hubbub_funcs hubbub301_funcs = { .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, .force_pstate_change_control = hubbub3_force_pstate_change_control, + .init_watermarks = hubbub3_init_watermarks, .hubbub_read_state = hubbub2_read_state, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h index e3caaacf74..e3be0bab40 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h @@ -34,12 +34,14 @@ DCCG_SRII(DTO_PARAM, DPPCLK, 1),\ DCCG_SRII(DTO_PARAM, DPPCLK, 2),\ DCCG_SRII(DTO_PARAM, DPPCLK, 3),\ + DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0),\ SR(PHYASYMCLK_CLOCK_CNTL),\ SR(PHYBSYMCLK_CLOCK_CNTL),\ SR(PHYCSYMCLK_CLOCK_CNTL),\ SR(PHYDSYMCLK_CLOCK_CNTL),\ SR(PHYESYMCLK_CLOCK_CNTL),\ SR(DPSTREAMCLK_CNTL),\ + SR(HDMISTREAMCLK_CNTL),\ SR(SYMCLK32_SE_CNTL),\ SR(SYMCLK32_LE_CNTL),\ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\ @@ -78,6 +80,8 @@ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 3, mask_sh),\ DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\ DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\ + DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\ + DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\ @@ -92,6 +96,8 @@ DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE1_EN, mask_sh),\ DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE2_EN, mask_sh),\ DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE3_EN, mask_sh),\ + DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_SRC_SEL, mask_sh),\ + DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_DTO_FORCE_DIS, mask_sh),\ DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, mask_sh),\ DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, mask_sh),\ DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE2_SRC_SEL, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index 26be5fee74..b2cea59ba5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -205,7 +205,7 @@ void dcn31_link_encoder_set_dio_phy_mux( } } -static void enc31_hw_init(struct link_encoder *enc) +void enc31_hw_init(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h index 221671563a..ee78ba8079 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h @@ -89,6 +89,7 @@ SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \ + SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \ SR(RDPCSTX0_RDPCSTX_SCRATCH), \ SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\ SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id) @@ -222,6 +223,7 @@ SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \ + SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \ SR(RDPCSTX0_RDPCSTX_SCRATCH), \ SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\ SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id) @@ -283,4 +285,6 @@ bool dcn31_link_encoder_is_in_alt_mode( void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings); +void enc31_hw_init(struct link_encoder *enc); + #endif /* __DC_LINK_ENCODER__DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c index 65e45a0b4f..03b4ac2f19 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c @@ -377,7 +377,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table( */ REG_WAIT(DP_DPHY_SYM32_STATUS, SAT_UPDATE_PENDING, 0, - 10, DP_SAT_UPDATE_MAX_RETRY); + 100, DP_SAT_UPDATE_MAX_RETRY); } void dcn31_hpo_dp_link_enc_set_throttled_vcp_size( diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c index 45143459ee..678db949cf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -474,6 +474,10 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets( &info_frame->hdrsmd, true); + /* packetIndex 4 is used for send immediate sdp message, and please + * use other packetIndex (such as 5,6) for other info packet + */ + if (info_frame->adaptive_sync.valid) enc->vpg->funcs->update_generic_info_packet( enc->vpg, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 5b5b5e0775..b906db6e73 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -172,7 +172,7 @@ static uint32_t convert_and_clamp( static bool hubbub31_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -362,7 +362,7 @@ static bool hubbub31_program_urgent_watermarks( static bool hubbub31_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -635,7 +635,7 @@ static bool hubbub31_program_stutter_watermarks( static bool hubbub31_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -717,7 +717,7 @@ static bool hubbub31_program_pstate_watermarks( static bool hubbub31_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c index 281be20b1a..20c6fe4856 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c @@ -173,5 +173,12 @@ void dcn31_panel_cntl_construct( break; } - dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst; + if (dcn31_panel_cntl->base.ctx->dc->config.support_edp0_on_dp1) + //If supported, power sequencer mapping shall follow the DIG instance + dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst; + else + /* If not supported, pwrseq will be assigned in order, + * so first pwrseq will be assigned to first panel instance (legacy behavior) + */ + dcn31_panel_cntl->base.pwrseq_inst = dcn31_panel_cntl->base.inst; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c index f1deb1c3c3..cfb923d856 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c @@ -63,7 +63,12 @@ void vpg31_poweron(struct vpg *vpg) { struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg); - if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false) + uint32_t vpg_gsp_mem_pwr_state; + + REG_GET(VPG_MEM_PWR, VPG_GSP_MEM_PWR_STATE, &vpg_gsp_mem_pwr_state); + + if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false && + vpg_gsp_mem_pwr_state == 0) return; REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 1, VPG_GSP_LIGHT_SLEEP_FORCE, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h index 0e76eabce4..609e58dbd0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h @@ -26,6 +26,7 @@ #ifndef __DAL_DCN31_VPG_H__ #define __DAL_DCN31_VPG_H__ +#include "vpg.h" #define DCN31_VPG_FROM_VPG(vpg)\ container_of(vpg, struct dcn31_vpg, base) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile index 5314770fff..a58c37165f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile @@ -11,7 +11,7 @@ # Makefile for dcn32. DCN32 = dcn32_hubbub.o dcn32_dccg.o \ - dcn32_mmhubbub.o dcn32_dpp.o dcn32_hubp.o dcn32_mpc.o \ + dcn32_mmhubbub.o dcn32_hubp.o dcn32_mpc.o \ dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \ dcn32_hpo_dp_link_encoder.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c index 8a0460e863..d9ff95cd2d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c @@ -248,13 +248,13 @@ void dcn32_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; - enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; enc10->base.features = *enc_features; + if (enc10->base.connector.id == CONNECTOR_ID_USBC) + enc10->base.features.flags.bits.DP_IS_USB_C = 1; + enc10->base.transmitter = init_data->transmitter; /* set the flag to indicate whether driver poll the I2C data pin diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h index 2d5f25290e..35d23d9db4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h @@ -26,15 +26,7 @@ #ifndef __DC_LINK_ENCODER__DCN32_H__ #define __DC_LINK_ENCODER__DCN32_H__ -#include "dcn31/dcn31_dio_link_encoder.h" - -#define LE_DCN32_REG_LIST(id)\ - LE_DCN31_REG_LIST(id),\ - SRI(DIG_FIFO_CTRL0, DIG, id) - -#define LINK_ENCODER_MASK_SH_LIST_DCN32(mask_sh) \ - LINK_ENCODER_MASK_SH_LIST_DCN31(mask_sh),\ - LE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh) +#include "dcn30/dcn30_dio_link_encoder.h" void dcn32_link_encoder_construct( struct dcn20_link_encoder *enc20, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h index 1be5410cce..ca53d39561 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h @@ -177,11 +177,12 @@ SE_SF(DIG0_DIG_FE_CNTL, DIG_SYMCLK_FE_ON, mask_sh),\ SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh),\ SE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh),\ SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, mask_sh),\ SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, mask_sh),\ SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET, mask_sh),\ - SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh),\ - SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh) + SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh) + void dcn32_dio_stream_encoder_construct( struct dcn10_stream_encoder *enc1, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c deleted file mode 100644 index 681e75c6db..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "core_types.h" -#include "reg_helper.h" -#include "dcn32_dpp.h" -#include "basics/conversion.h" -#include "dcn30/dcn30_cm_common.h" - -/* Compute the maximum number of lines that we can fit in the line buffer */ -static void dscl32_calc_lb_num_partitions( - const struct scaler_data *scl_data, - enum lb_memory_config lb_config, - int *num_part_y, - int *num_part_c) -{ - int memory_line_size_y, memory_line_size_c, memory_line_size_a, - lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a; - - int line_size = scl_data->viewport.width < scl_data->recout.width ? - scl_data->viewport.width : scl_data->recout.width; - int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? - scl_data->viewport_c.width : scl_data->recout.width; - - if (line_size == 0) - line_size = 1; - - if (line_size_c == 0) - line_size_c = 1; - - memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */ - memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */ - memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ - - if (lb_config == LB_MEMORY_CONFIG_1) { - lb_memory_size = 970; - lb_memory_size_c = 970; - lb_memory_size_a = 970; - } else if (lb_config == LB_MEMORY_CONFIG_2) { - lb_memory_size = 1290; - lb_memory_size_c = 1290; - lb_memory_size_a = 1290; - } else if (lb_config == LB_MEMORY_CONFIG_3) { - if (scl_data->viewport.width == scl_data->h_active && - scl_data->viewport.height == scl_data->v_active) { - /* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */ - /* use increased LB size for calculation only if Scaler not enabled */ - lb_memory_size = 970 + 1290 + 1170 + 1170 + 1170; - lb_memory_size_c = 970 + 1290; - lb_memory_size_a = 970 + 1290 + 1170; - } else { - /* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */ - lb_memory_size = 970 + 1290 + 484 + 484 + 484; - lb_memory_size_c = 970 + 1290; - lb_memory_size_a = 970 + 1290 + 484; - } - } else { - if (scl_data->viewport.width == scl_data->h_active && - scl_data->viewport.height == scl_data->v_active) { - /* use increased LB size for calculation only if Scaler not enabled */ - lb_memory_size = 970 + 1290 + 1170; - lb_memory_size_c = 970 + 1290 + 1170; - lb_memory_size_a = 970 + 1290 + 1170; - } else { - lb_memory_size = 970 + 1290 + 484; - lb_memory_size_c = 970 + 1290 + 484; - lb_memory_size_a = 970 + 1290 + 484; - } - } - *num_part_y = lb_memory_size / memory_line_size_y; - *num_part_c = lb_memory_size_c / memory_line_size_c; - num_partitions_a = lb_memory_size_a / memory_line_size_a; - - if (scl_data->lb_params.alpha_en - && (num_partitions_a < *num_part_y)) - *num_part_y = num_partitions_a; - - if (*num_part_y > 32) - *num_part_y = 32; - if (*num_part_c > 32) - *num_part_c = 32; -} - -static struct dpp_funcs dcn32_dpp_funcs = { - .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, - .dpp_read_state = dpp30_read_state, - .dpp_reset = dpp_reset, - .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, - .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap, - .dpp_set_csc_adjustment = NULL, - .dpp_set_csc_default = NULL, - .dpp_program_regamma_pwl = NULL, - .dpp_set_pre_degam = dpp3_set_pre_degam, - .dpp_program_input_lut = NULL, - .dpp_full_bypass = dpp1_full_bypass, - .dpp_setup = dpp3_cnv_setup, - .dpp_program_degamma_pwl = NULL, - .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, - .dpp_program_cm_bias = dpp3_program_cm_bias, - - .dpp_program_blnd_lut = NULL, // BLNDGAM is removed completely in DCN3.2 DPP - .dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) - .dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) - - .dpp_program_bias_and_scale = NULL, - .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, - .set_cursor_attributes = dpp3_set_cursor_attributes, - .set_cursor_position = dpp1_set_cursor_position, - .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, - .dpp_dppclk_control = dpp1_dppclk_control, - .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, - .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, -}; - - -static struct dpp_caps dcn32_dpp_cap = { - .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, - .max_lb_partitions = 31, - .dscl_calc_lb_num_partitions = dscl32_calc_lb_num_partitions, -}; - -bool dpp32_construct( - struct dcn3_dpp *dpp, - struct dc_context *ctx, - uint32_t inst, - const struct dcn3_dpp_registers *tf_regs, - const struct dcn3_dpp_shift *tf_shift, - const struct dcn3_dpp_mask *tf_mask) -{ - dpp->base.ctx = ctx; - - dpp->base.inst = inst; - dpp->base.funcs = &dcn32_dpp_funcs; - dpp->base.caps = &dcn32_dpp_cap; - - dpp->tf_regs = tf_regs; - dpp->tf_shift = tf_shift; - dpp->tf_mask = tf_mask; - - return true; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h deleted file mode 100644 index 572958d287..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DCN32_DPP_H__ -#define __DCN32_DPP_H__ - -#include "dcn20/dcn20_dpp.h" -#include "dcn30/dcn30_dpp.h" - -bool dpp32_construct(struct dcn3_dpp *dpp3, - struct dc_context *ctx, - uint32_t inst, - const struct dcn3_dpp_registers *tf_regs, - const struct dcn3_dpp_shift *tf_shift, - const struct dcn3_dpp_mask *tf_mask); - -#endif /* __DCN32_DPP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c index 88dfc90755..515c4c2b4c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c @@ -167,7 +167,7 @@ static uint32_t convert_and_clamp( bool hubbub32_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -357,7 +357,7 @@ bool hubbub32_program_urgent_watermarks( bool hubbub32_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -503,7 +503,7 @@ bool hubbub32_program_stutter_watermarks( bool hubbub32_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -656,7 +656,7 @@ bool hubbub32_program_pstate_watermarks( bool hubbub32_program_usr_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -750,7 +750,7 @@ void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow) static bool hubbub32_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h index f073839a4b..e439ba0fa3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h @@ -118,25 +118,25 @@ bool hubbub32_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub32_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub32_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub32_program_usr_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index f98def6c8c..fbcd6f7bc9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -35,25 +35,6 @@ static bool is_dual_plane(enum surface_pixel_format format) return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; } - -uint32_t dcn32_helper_mall_bytes_to_ways( - struct dc *dc, - uint32_t total_size_in_mall_bytes) -{ - uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways; - - /* add 2 lines for worst case alignment */ - cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2; - - total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; - lines_per_way = total_cache_lines / dc->caps.cache_num_ways; - num_ways = cache_lines_used / lines_per_way; - if (cache_lines_used % lines_per_way > 0) - num_ways++; - - return num_ways; -} - uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( struct dc *dc, struct pipe_ctx *pipe_ctx, @@ -112,8 +93,10 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp( if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) { if (dc->debug.force_subvp_num_ways) { return dc->debug.force_subvp_num_ways; + } else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) { + return dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes); } else { - return dcn32_helper_mall_bytes_to_ways(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes); + return 0; } } else { return 0; @@ -399,7 +382,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting; for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c index 13be5f06d9..05783daa62 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c @@ -127,11 +127,6 @@ void dcn321_link_encoder_construct( * while doing the DP sink detect */ -/* if (dal_adapter_service_is_feature_supported(as, - FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) - enc10->base.features.flags.bits. - DP_SINK_DETECT_POLL_DATA_PIN = true;*/ - enc10->base.output_signals = SIGNAL_TYPE_DVI_SINGLE_LINK | SIGNAL_TYPE_DVI_DUAL_LINK | @@ -191,7 +186,6 @@ void dcn321_link_encoder_construct( __func__, result); } - if (enc10->base.ctx->dc->debug.hdmi20_disable) { + if (enc10->base.ctx->dc->debug.hdmi20_disable) enc10->base.features.flags.bits.HDMI_6GB_EN = 0; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile index 0e317e0c36..d5b4533d2f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile @@ -13,7 +13,7 @@ DCN35 = dcn35_dio_stream_encoder.o \ dcn35_dio_link_encoder.o dcn35_dccg.o \ dcn35_hubp.o dcn35_hubbub.o \ - dcn35_mmhubbub.o dcn35_opp.o dcn35_dpp.o dcn35_pg_cntl.o dcn35_dwb.o + dcn35_mmhubbub.o dcn35_opp.o dcn35_pg_cntl.o dcn35_dwb.o AMD_DAL_DCN35 = $(addprefix $(AMDDALPATH)/dc/dcn35/,$(DCN35)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c index f1ba7bb792..58dd3c5bbf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c @@ -49,15 +49,23 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg, switch (dpp_inst) { case 0: REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, enable); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable); break; case 1: REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, enable); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable); break; case 2: REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, enable); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable); break; case 3: REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, enable); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable); break; default: break; @@ -100,6 +108,32 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst, dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk; } +static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg, + uint32_t dpp_inst, uint32_t enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + return; + + switch (dpp_inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable); + break; + default: + break; + } +} + static void dccg35_get_pixel_rate_div( struct dccg *dccg, uint32_t otg_inst, @@ -333,21 +367,67 @@ static void dccg35_set_dpstreamclk( /* enabled to select one of the DTBCLKs for pipe */ switch (dp_hpo_inst) { case 0: - REG_UPDATE_2(DPSTREAMCLK_CNTL, - DPSTREAMCLK0_EN, + REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK0_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, otg_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1); break; case 1: REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, otg_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1); break; case 2: REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, otg_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1); break; case 3: REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, otg_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + + +static void dccg35_set_dpstreamclk_root_clock_gating( + struct dccg *dccg, + int dp_hpo_inst, + bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + switch (dp_hpo_inst) { + case 0: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, enable ? 1 : 0); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, enable ? 1 : 0); + } + break; + case 1: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, enable ? 1 : 0); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, enable ? 1 : 0); + } + break; + case 2: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, enable ? 1 : 0); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, enable ? 1 : 0); + } + break; + case 3: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, enable ? 1 : 0); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, enable ? 1 : 0); + } break; default: BREAK_TO_DEBUGGER(); @@ -355,6 +435,8 @@ static void dccg35_set_dpstreamclk( } } + + static void dccg35_set_physymclk_root_clock_gating( struct dccg *dccg, int phy_inst, @@ -369,22 +451,32 @@ static void dccg35_set_physymclk_root_clock_gating( case 0: REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYA_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); break; case 1: REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYB_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); break; case 2: REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYC_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); break; case 3: REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYD_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); break; case 4: REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYE_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); break; default: BREAK_TO_DEBUGGER(); @@ -407,10 +499,16 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_EN, 1, PHYASYMCLK_SRC_SEL, clk_src); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYA_REFCLK_ROOT_GATE_DISABLE, 0); } else { REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_EN, 0, PHYASYMCLK_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYA_REFCLK_ROOT_GATE_DISABLE, 1); } break; case 1: @@ -418,10 +516,16 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_EN, 1, PHYBSYMCLK_SRC_SEL, clk_src); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYB_REFCLK_ROOT_GATE_DISABLE, 0); } else { REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_EN, 0, PHYBSYMCLK_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYB_REFCLK_ROOT_GATE_DISABLE, 1); } break; case 2: @@ -429,10 +533,16 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_EN, 1, PHYCSYMCLK_SRC_SEL, clk_src); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYC_REFCLK_ROOT_GATE_DISABLE, 0); } else { REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_EN, 0, PHYCSYMCLK_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYC_REFCLK_ROOT_GATE_DISABLE, 1); } break; case 3: @@ -440,10 +550,16 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_EN, 1, PHYDSYMCLK_SRC_SEL, clk_src); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYD_REFCLK_ROOT_GATE_DISABLE, 0); } else { REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_EN, 0, PHYDSYMCLK_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYD_REFCLK_ROOT_GATE_DISABLE, 1); } break; case 4: @@ -451,10 +567,16 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_EN, 1, PHYESYMCLK_SRC_SEL, clk_src); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYE_REFCLK_ROOT_GATE_DISABLE, 0); } else { REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_EN, 0, PHYESYMCLK_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYE_REFCLK_ROOT_GATE_DISABLE, 1); } break; default: @@ -491,12 +613,12 @@ static void dccg35_dpp_root_clock_control( if (clock_on) { /* turn off the DTO and leave phase/modulo at max */ - dcn35_set_dppclk_enable(dccg, dpp_inst, 0); + dcn35_set_dppclk_enable(dccg, dpp_inst, 1); REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, DPPCLK0_DTO_PHASE, 0xFF, DPPCLK0_DTO_MODULO, 0xFF); } else { - dcn35_set_dppclk_enable(dccg, dpp_inst, 1); + dcn35_set_dppclk_enable(dccg, dpp_inst, 0); /* turn on the DTO to generate a 0hz clock */ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, DPPCLK0_DTO_PHASE, 0, @@ -575,18 +697,32 @@ void dccg35_init(struct dccg *dccg) dccg35_disable_symclk32_se(dccg, otg_inst); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - for (otg_inst = 0; otg_inst < 2; otg_inst++) + for (otg_inst = 0; otg_inst < 2; otg_inst++) { dccg31_disable_symclk32_le(dccg, otg_inst); + dccg31_set_symclk32_le_root_clock_gating(dccg, otg_inst, false); + } + +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// for (otg_inst = 0; otg_inst < 4; otg_inst++) +// dccg35_disable_symclk_se(dccg, otg_inst, otg_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) - for (otg_inst = 0; otg_inst < 4; otg_inst++) - dccg314_set_dpstreamclk(dccg, REFCLK, otg_inst, + for (otg_inst = 0; otg_inst < 4; otg_inst++) { + dccg35_set_dpstreamclk(dccg, REFCLK, otg_inst, otg_inst); + dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false); + } if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) for (otg_inst = 0; otg_inst < 5; otg_inst++) dccg35_set_physymclk_root_clock_gating(dccg, otg_inst, false); + + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + for (otg_inst = 0; otg_inst < 4; otg_inst++) + dccg35_set_dppclk_root_clock_gating(dccg, otg_inst, 0); + /* dccg35_enable_global_fgcg_rep( dccg, dccg->ctx->dc->debug.enable_fine_grain_clock_gating.bits @@ -611,24 +747,32 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst) DSCCLK0_DTO_PHASE, 0, DSCCLK0_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1); break; case 1: REG_UPDATE_2(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, 0, DSCCLK1_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1); break; case 2: REG_UPDATE_2(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, 0, DSCCLK2_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1); break; case 3: REG_UPDATE_2(DSCCLK3_DTO_PARAM, DSCCLK3_DTO_PHASE, 0, DSCCLK3_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1); break; default: BREAK_TO_DEBUGGER(); @@ -650,24 +794,32 @@ static void dccg35_disable_dscclk(struct dccg *dccg, REG_UPDATE_2(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, 0, DSCCLK0_DTO_MODULO, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 0); break; case 1: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 0); REG_UPDATE_2(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, 0, DSCCLK1_DTO_MODULO, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 0); break; case 2: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 0); REG_UPDATE_2(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, 0, DSCCLK2_DTO_MODULO, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 0); break; case 3: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 0); REG_UPDATE_2(DSCCLK3_DTO_PARAM, DSCCLK3_DTO_PHASE, 0, DSCCLK3_DTO_MODULO, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 0); break; default: return; @@ -682,22 +834,32 @@ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, case 0: REG_UPDATE(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 1); break; case 1: REG_UPDATE(SYMCLKB_CLOCK_ENABLE, SYMCLKB_CLOCK_ENABLE, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 1); break; case 2: REG_UPDATE(SYMCLKC_CLOCK_ENABLE, SYMCLKC_CLOCK_ENABLE, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 1); break; case 3: REG_UPDATE(SYMCLKD_CLOCK_ENABLE, SYMCLKD_CLOCK_ENABLE, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 1); break; case 4: REG_UPDATE(SYMCLKE_CLOCK_ENABLE, SYMCLKE_CLOCK_ENABLE, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, 1); break; } @@ -706,26 +868,36 @@ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_EN, 1, SYMCLKA_FE_SRC_SEL, link_enc_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, 1); break; case 1: REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_EN, 1, SYMCLKB_FE_SRC_SEL, link_enc_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, 1); break; case 2: REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_EN, 1, SYMCLKC_FE_SRC_SEL, link_enc_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, 1); break; case 3: REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_EN, 1, SYMCLKD_FE_SRC_SEL, link_enc_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, 1); break; case 4: REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_EN, 1, SYMCLKE_FE_SRC_SEL, link_enc_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 1); break; } } @@ -786,26 +958,36 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_EN, 0, SYMCLKA_FE_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, 0); break; case 1: REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_EN, 0, SYMCLKB_FE_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, 0); break; case 2: REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_EN, 0, SYMCLKC_FE_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, 0); break; case 3: REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_EN, 0, SYMCLKD_FE_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, 0); break; case 4: REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_EN, 0, SYMCLKE_FE_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 0); break; } @@ -818,22 +1000,32 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst case 0: REG_UPDATE(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 0); break; case 1: REG_UPDATE(SYMCLKB_CLOCK_ENABLE, SYMCLKB_CLOCK_ENABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 0); break; case 2: REG_UPDATE(SYMCLKC_CLOCK_ENABLE, SYMCLKC_CLOCK_ENABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 0); break; case 3: REG_UPDATE(SYMCLKD_CLOCK_ENABLE, SYMCLKD_CLOCK_ENABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 0); break; case 4: REG_UPDATE(SYMCLKE_CLOCK_ENABLE, SYMCLKE_CLOCK_ENABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, 0); break; } } @@ -845,6 +1037,7 @@ static const struct dccg_funcs dccg35_funcs = { .get_dccg_ref_freq = dccg31_get_dccg_ref_freq, .dccg_init = dccg35_init, .set_dpstreamclk = dccg35_set_dpstreamclk, + .set_dpstreamclk_root_clock_gating = dccg35_set_dpstreamclk_root_clock_gating, .enable_symclk32_se = dccg31_enable_symclk32_se, .disable_symclk32_se = dccg35_disable_symclk32_se, .enable_symclk32_le = dccg31_enable_symclk32_le, diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c index da94e5309f..20f810a664 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c @@ -80,7 +80,6 @@ enum signal_type dcn35_get_dig_mode( default: return SIGNAL_TYPE_NONE; } - return SIGNAL_TYPE_NONE; } void dcn35_link_encoder_setup( @@ -119,7 +118,7 @@ void dcn35_link_encoder_setup( void dcn35_link_encoder_init(struct link_encoder *enc) { - enc32_hw_init(enc); + enc31_hw_init(enc); dcn35_link_encoder_set_fgcg(enc, enc->ctx->dc->debug.enable_fine_grain_clock_gating.bits.dio); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h index e1e560732a..d546a36763 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h @@ -37,7 +37,9 @@ LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_MODE, mask_sh),\ LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_CLK_EN, mask_sh),\ LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SOFT_RESET, mask_sh),\ + LE_SF(DIG0_DIG_BE_CLK_CNTL, HDCP_SOFT_RESET, mask_sh),\ LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_CLOCK_ON, mask_sh),\ + LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_HDCP_CLOCK_ON, mask_sh),\ LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_TMDS_CLOCK_ON, mask_sh),\ LE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\ LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \ @@ -114,7 +116,15 @@ LE_SF(DIO_CLK_CNTL, SYMCLK_FE_G_GATE_DIS, mask_sh),\ LE_SF(DIO_CLK_CNTL, SYMCLK_R_GATE_DIS, mask_sh),\ LE_SF(DIO_CLK_CNTL, SYMCLK_G_GATE_DIS, mask_sh),\ - LE_SF(DIO_CLK_CNTL, DIO_FGCG_REP_DIS, mask_sh) + LE_SF(DIO_CLK_CNTL, DIO_FGCG_REP_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, DISPCLK_G_HDCP_GATE_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, SYMCLKA_G_HDCP_GATE_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, SYMCLKB_G_HDCP_GATE_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, SYMCLKC_G_HDCP_GATE_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, SYMCLKD_G_HDCP_GATE_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, SYMCLKE_G_HDCP_GATE_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, SYMCLKF_G_HDCP_GATE_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, SYMCLKG_G_HDCP_GATE_DIS, mask_sh) void dcn35_link_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h index 499052329e..1212fcee38 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h @@ -28,7 +28,6 @@ #include "dcn30/dcn30_vpg.h" #include "dcn30/dcn30_afmt.h" #include "stream_encoder.h" -#include "dcn10/dcn10_link_encoder.h" #include "dcn20/dcn20_stream_encoder.h" /* Register bit field name change */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c deleted file mode 100644 index 3341ef7100..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c +++ /dev/null @@ -1,53 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2023 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "core_types.h" -#include "dcn35_dpp.h" -#include "reg_helper.h" - -#define REG(reg) dpp->tf_regs->reg - -#define CTX dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - ((const struct dcn35_dpp_shift *)(dpp->tf_shift))->field_name, \ - ((const struct dcn35_dpp_mask *)(dpp->tf_mask))->field_name - -bool dpp35_construct(struct dcn3_dpp *dpp, struct dc_context *ctx, - uint32_t inst, const struct dcn3_dpp_registers *tf_regs, - const struct dcn35_dpp_shift *tf_shift, - const struct dcn35_dpp_mask *tf_mask) -{ - return dpp32_construct(dpp, ctx, inst, tf_regs, - (const struct dcn3_dpp_shift *)(tf_shift), - (const struct dcn3_dpp_mask *)(tf_mask)); -} - -void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable) -{ - REG_UPDATE(DPP_CONTROL, DPP_FGCG_REP_DIS, !enable); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h deleted file mode 100644 index 09b84307cd..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2023 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DCN35_DPP_H__ -#define __DCN35_DPP_H__ - -#include "dcn32/dcn32_dpp.h" - -#define DPP_REG_LIST_SH_MASK_DCN35(mask_sh) \ - DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh), \ - TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh) - -#define DPP_REG_FIELD_LIST_DCN35(type) \ - struct { \ - DPP_REG_FIELD_LIST_DCN3(type); \ - type DPP_FGCG_REP_DIS; \ - } - -struct dcn35_dpp_shift { - DPP_REG_FIELD_LIST_DCN35(uint8_t); -}; - -struct dcn35_dpp_mask { - DPP_REG_FIELD_LIST_DCN35(uint32_t); -}; - -bool dpp35_construct(struct dcn3_dpp *dpp3, struct dc_context *ctx, - uint32_t inst, const struct dcn3_dpp_registers *tf_regs, - const struct dcn35_dpp_shift *tf_shift, - const struct dcn35_dpp_mask *tf_mask); - -void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable); - -#endif // __DCN35_DPP_H diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c index 339bf0c722..6293173ba2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c @@ -111,7 +111,7 @@ static uint32_t convert_and_clamp( static bool hubbub35_program_stutter_z8_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -297,7 +297,7 @@ static void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub, static bool hubbub35_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 6d7a15dcf8..34adae7ab6 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -36,6 +36,7 @@ struct dc_dp_mst_stream_allocation_table; struct aux_payload; enum aux_return_code_type; +enum set_config_status; /* * Allocate memory accessible by the GPU @@ -200,7 +201,7 @@ int dm_helper_dmub_aux_transfer_sync( const struct dc_link *link, struct aux_payload *payload, enum aux_return_code_type *operation_result); -enum set_config_status; + int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, const struct dc_link *link, struct set_config_cmd_payload *payload, diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index d0eed3b477..9405c47ee2 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -274,6 +274,16 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc #define PERF_TRACE() dm_perf_trace_timestamp(__func__, __LINE__, CTX) #define PERF_TRACE_CTX(__CTX) dm_perf_trace_timestamp(__func__, __LINE__, __CTX) +/* + * SMU message tracing + */ +void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx); +void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx); + +#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_msg(msg_id, param_in, ctx) +#define TRACE_SMU_DELAY(response_delay, ctx) dm_trace_smu_delay(response_delay, ctx) + + /* * DMUB Interfaces */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index c4a5efd2dd..a94b6d546c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -25,40 +25,8 @@ # It provides the general basic services required by other DAL # subcomponents. -ifdef CONFIG_X86 -dml_ccflags-$(CONFIG_CC_IS_GCC) := -mhard-float -dml_ccflags := $(dml_ccflags-y) -msse -endif - -ifdef CONFIG_PPC64 -dml_ccflags := -mhard-float -maltivec -endif - -ifdef CONFIG_ARM64 -dml_rcflags := -mgeneral-regs-only -endif - -ifdef CONFIG_LOONGARCH -dml_ccflags := -mfpu=64 -dml_rcflags := -msoft-float -endif - -ifdef CONFIG_CC_IS_GCC -ifneq ($(call gcc-min-version, 70100),y) -IS_OLD_GCC = 1 -endif -endif - -ifdef CONFIG_X86 -ifdef IS_OLD_GCC -# Stack alignment mismatch, proceed with caution. -# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 -# (8B stack alignment). -dml_ccflags += -mpreferred-stack-boundary=4 -else -dml_ccflags += -msse2 -endif -endif +dml_ccflags := $(CC_FLAGS_FPU) +dml_rcflags := $(CC_FLAGS_NO_FPU) ifneq ($(CONFIG_FRAME_WARN),0) ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index 38ab9ad60e..74da9ebda0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -1085,6 +1085,9 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000; bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency; bool is_pwrseq0 = link->link_index == 0; + bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 || + link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr); + bool is_replay = link && link->replay_settings.replay_feature_enabled; /* Don't support multi-plane configurations */ if (stream_status->plane_count > 1) @@ -1092,8 +1095,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0) return DCN_ZSTATE_SUPPORT_ALLOW; - else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr) - return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY; + else if (is_pwrseq0 && (is_psr || is_replay)) + return DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY; else return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW; } else { @@ -2369,7 +2372,7 @@ validate_out: static struct _vcs_dpi_voltage_scaling_st construct_low_pstate_lvl(struct clk_limit_table *clk_table, unsigned int high_voltage_lvl) { - struct _vcs_dpi_voltage_scaling_st low_pstate_lvl; + struct _vcs_dpi_voltage_scaling_st low_pstate_lvl = {0}; int i; low_pstate_lvl.state = 1; @@ -2474,7 +2477,7 @@ void dcn201_populate_dml_writeback_from_context_fpu(struct dc *dc, int pipe_cnt, i, j; double max_calc_writeback_dispclk; double writeback_dispclk; - struct writeback_st dout_wb; + struct writeback_st dout_wb = {0}; dc_assert_fp_enabled(); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c index ccb4ad78f6..81f7b90849 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c @@ -260,7 +260,7 @@ void dcn30_fpu_populate_dml_writeback_from_context( int pipe_cnt, i, j; double max_calc_writeback_dispclk; double writeback_dispclk; - struct writeback_st dout_wb; + struct writeback_st dout_wb = {0}; dc_assert_fp_enabled(); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index e7f4a2d491..e0b52db2c2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -3535,7 +3535,6 @@ static double TruncToValidBPP( return DesiredBPP; } } - return BPP_INVALID; } void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index 7307b7b8d8..94317b2e4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -487,6 +487,7 @@ void dcn31_calculate_wm_and_dlg_fp( { int i, pipe_idx, total_det = 0, active_hubp_count = 0; double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; + uint32_t cstate_enter_plus_exit_z8_ns; dc_assert_fp_enabled(); @@ -506,6 +507,13 @@ void dcn31_calculate_wm_and_dlg_fp( pipes[0].clks_cfg.dcfclk_mhz = dcfclk; pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; + cstate_enter_plus_exit_z8_ns = + get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + if (get_stutter_period(&context->bw_ctx.dml, pipes, pipe_cnt) < dc->debug.minimum_z8_residency_time && + cstate_enter_plus_exit_z8_ns < dc->debug.minimum_z8_residency_time * 1000) + cstate_enter_plus_exit_z8_ns = dc->debug.minimum_z8_residency_time * 1000; + /* Set A: * All clocks min required * @@ -516,7 +524,7 @@ void dcn31_calculate_wm_and_dlg_fp( context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = cstate_enter_plus_exit_z8_ns; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h index 8f9c8faed2..d2ae43a82b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h @@ -30,6 +30,7 @@ #define DCN3_15_DEFAULT_DET_SIZE 192 #define DCN3_15_MIN_COMPBUF_SIZE_KB 128 #define DCN3_16_DEFAULT_DET_SIZE 192 +#define DCN3_16_MIN_COMPBUF_SIZE_KB 128 void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes, int pipe_cnt); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index adea459e7d..33cf824c5d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -3679,7 +3679,6 @@ static double TruncToValidBPP( return DesiredBPP; } } - return BPP_INVALID; } static noinline void CalculatePrefetchSchedulePerPlane( diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index fb21572750..21f637ae4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -310,7 +310,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; bool upscaled = false; const unsigned int max_allowed_vblank_nom = 1023; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index 88e56889a6..3242957d00 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -3788,7 +3788,6 @@ static double TruncToValidBPP( return DesiredBPP; } } - return BPP_INVALID; } static noinline void CalculatePrefetchSchedulePerPlane( diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index a0a65e0991..f6fe0a64be 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -180,6 +180,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = { .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, }; +static bool dcn32_apply_merge_split_flags_helper(struct dc *dc, struct dc_state *context, + bool *repopulate_pipes, int *split, bool *merge); + void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr) { /* defaults */ @@ -622,7 +625,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, * to combine this with SubVP can cause issues with the scheduling). * - Not TMZ surface */ - if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && + if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) && !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE && @@ -720,7 +723,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context */ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context) { - struct pipe_ctx *subvp_pipes[2]; + struct pipe_ctx *subvp_pipes[2] = {0}; struct dc_stream_state *phantom = NULL; uint32_t microschedule_lines = 0; uint32_t index = 0; @@ -1425,13 +1428,14 @@ static bool is_test_pattern_enabled( return false; } -static void dcn32_full_validate_bw_helper(struct dc *dc, +static bool dcn32_full_validate_bw_helper(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *vlevel, int *split, bool *merge, - int *pipe_cnt) + int *pipe_cnt, + bool *repopulate_pipes) { struct vba_vars_st *vba = &context->bw_ctx.dml.vba; unsigned int dc_pipe_idx = 0; @@ -1461,6 +1465,12 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, vba->VoltageLevel = *vlevel; } + /* Apply split and merge flags before checking for subvp */ + if (!dcn32_apply_merge_split_flags_helper(dc, context, repopulate_pipes, split, merge)) + return false; + memset(split, 0, MAX_PIPES * sizeof(int)); + memset(merge, 0, MAX_PIPES * sizeof(bool)); + /* Conditions for setting up phantom pipes for SubVP: * 1. Not force disable SubVP * 2. Full update (i.e. !fast_validate) @@ -1475,19 +1485,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported || dc->debug.force_subvp_mclk_switch)) { - dcn32_merge_pipes_for_subvp(dc, context); - memset(merge, 0, MAX_PIPES * sizeof(bool)); - vlevel_temp = *vlevel; - /* to re-initialize viewport after the pipe merge */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - if (!pipe_ctx->plane_state || !pipe_ctx->stream) - continue; - - resource_build_scaling_params(pipe_ctx); - } while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) && dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) { @@ -1576,8 +1574,6 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, * add phantom pipes. If pipe split (ODM / MPC) is required, both the main * and phantom pipes will be split in the regular pipe splitting sequence. */ - memset(split, 0, MAX_PIPES * sizeof(int)); - memset(merge, 0, MAX_PIPES * sizeof(bool)); *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge); vba->VoltageLevel = *vlevel; // Note: We can't apply the phantom pipes to hardware at this time. We have to wait @@ -1590,6 +1586,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, try_odm_power_optimization_and_revalidate( dc, context, pipes, split, merge, vlevel, *pipe_cnt); + return true; } static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) @@ -1929,106 +1926,23 @@ static bool dcn32_split_stream_for_mpc_or_odm( return true; } -bool dcn32_internal_validate_bw(struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *pipe_cnt_out, - int *vlevel_out, - bool fast_validate) +static bool dcn32_apply_merge_split_flags_helper( + struct dc *dc, + struct dc_state *context, + bool *repopulate_pipes, + int *split, + bool *merge) { - bool out = false; - bool repopulate_pipes = false; - int split[MAX_PIPES] = { 0 }; - bool merge[MAX_PIPES] = { false }; + int i, pipe_idx; bool newly_split[MAX_PIPES] = { false }; - int pipe_cnt, i, pipe_idx; - int vlevel = context->bw_ctx.dml.soc.num_states; struct vba_vars_st *vba = &context->bw_ctx.dml.vba; - dc_assert_fp_enabled(); - - ASSERT(pipes); - if (!pipes) - return false; - - // For each full update, remove all existing phantom pipes first - dc_state_remove_phantom_streams_and_planes(dc, context); - dc_state_release_phantom_streams_and_planes(dc, context); - - dc->res_pool->funcs->update_soc_for_wm_a(dc, context); - - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); - - if (!pipe_cnt) { - out = true; - goto validate_out; - } - - dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); - context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context); - - if (!fast_validate) - dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt); - - if (fast_validate || - (dc->debug.dml_disallow_alternate_prefetch_modes && - (vlevel == context->bw_ctx.dml.soc.num_states || - vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) { - /* - * If dml_disallow_alternate_prefetch_modes is false, then we have already - * tried alternate prefetch modes during full validation. - * - * If mode is unsupported or there is no p-state support, then - * fall back to favouring voltage. - * - * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try - * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2) - */ - context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = - dm_prefetch_support_none; - - context->bw_ctx.dml.validate_max_state = fast_validate; - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - - context->bw_ctx.dml.validate_max_state = false; - - if (vlevel < context->bw_ctx.dml.soc.num_states) { - memset(split, 0, sizeof(split)); - memset(merge, 0, sizeof(merge)); - vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); - // dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML - vba->VoltageLevel = vlevel; - } - } - - dml_log_mode_support_params(&context->bw_ctx.dml); - - if (vlevel == context->bw_ctx.dml.soc.num_states) - goto validate_fail; - - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; - - if (!pipe->stream) - continue; - - if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled - && !dc->config.enable_windowed_mpo_odm - && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_state->clip_rect, - &pipe->stream->src, - sizeof(struct rect)) != 0) { - ASSERT(mpo_pipe->plane_state != pipe->plane_state); - goto validate_fail; - } - pipe_idx++; - } - if (dc->config.enable_windowed_mpo_odm) { - repopulate_pipes = update_pipes_with_split_flags( - dc, context, vba, split, merge); + if (update_pipes_with_split_flags( + dc, context, vba, split, merge)) + *repopulate_pipes = true; } else { + /* the code below will be removed once windowed mpo odm is fully * enabled. */ @@ -2085,7 +1999,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); memset(&pipe->link_res, 0, sizeof(pipe->link_res)); - repopulate_pipes = true; + *repopulate_pipes = true; } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { struct pipe_ctx *top_pipe = pipe->top_pipe; struct pipe_ctx *bottom_pipe = pipe->bottom_pipe; @@ -2101,7 +2015,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); memset(&pipe->link_res, 0, sizeof(pipe->link_res)); - repopulate_pipes = true; + *repopulate_pipes = true; } else ASSERT(0); /* Should never try to merge master pipe */ @@ -2140,15 +2054,15 @@ bool dcn32_internal_validate_bw(struct dc *dc, hsplit_pipe = dcn32_find_split_pipe(dc, context, old_index); ASSERT(hsplit_pipe); if (!hsplit_pipe) - goto validate_fail; + return false; if (!dcn32_split_stream_for_mpc_or_odm( dc, &context->res_ctx, pipe, hsplit_pipe, odm)) - goto validate_fail; + return false; newly_split[hsplit_pipe->pipe_idx] = true; - repopulate_pipes = true; + *repopulate_pipes = true; } if (split[i] == 4) { struct pipe_ctx *pipe_4to1; @@ -2163,11 +2077,11 @@ bool dcn32_internal_validate_bw(struct dc *dc, pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index); ASSERT(pipe_4to1); if (!pipe_4to1) - goto validate_fail; + return false; if (!dcn32_split_stream_for_mpc_or_odm( dc, &context->res_ctx, pipe, pipe_4to1, odm)) - goto validate_fail; + return false; newly_split[pipe_4to1->pipe_idx] = true; if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe @@ -2182,11 +2096,11 @@ bool dcn32_internal_validate_bw(struct dc *dc, pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index); ASSERT(pipe_4to1); if (!pipe_4to1) - goto validate_fail; + return false; if (!dcn32_split_stream_for_mpc_or_odm( dc, &context->res_ctx, hsplit_pipe, pipe_4to1, odm)) - goto validate_fail; + return false; newly_split[pipe_4to1->pipe_idx] = true; } if (odm) @@ -2198,11 +2112,122 @@ bool dcn32_internal_validate_bw(struct dc *dc, if (pipe->plane_state) { if (!resource_build_scaling_params(pipe)) - goto validate_fail; + return false; } } + + for (i = 0; i < context->stream_count; i++) { + struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx, + context->streams[i]); + + if (otg_master) + resource_build_test_pattern_params(&context->res_ctx, otg_master); + } + } + return true; +} + +bool dcn32_internal_validate_bw(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *vlevel_out, + bool fast_validate) +{ + bool out = false; + bool repopulate_pipes = false; + int split[MAX_PIPES] = { 0 }; + bool merge[MAX_PIPES] = { false }; + int pipe_cnt, i, pipe_idx; + int vlevel = context->bw_ctx.dml.soc.num_states; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + + dc_assert_fp_enabled(); + + ASSERT(pipes); + if (!pipes) + return false; + + /* For each full update, remove all existing phantom pipes first */ + dc_state_remove_phantom_streams_and_planes(dc, context); + dc_state_release_phantom_streams_and_planes(dc, context); + + dc->res_pool->funcs->update_soc_for_wm_a(dc, context); + + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + + if (!pipe_cnt) { + out = true; + goto validate_out; + } + + dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); + context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context); + + if (!fast_validate) { + if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, + &pipe_cnt, &repopulate_pipes)) + goto validate_fail; + } + + if (fast_validate || + (dc->debug.dml_disallow_alternate_prefetch_modes && + (vlevel == context->bw_ctx.dml.soc.num_states || + vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) { + /* + * If dml_disallow_alternate_prefetch_modes is false, then we have already + * tried alternate prefetch modes during full validation. + * + * If mode is unsupported or there is no p-state support, then + * fall back to favouring voltage. + * + * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try + * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2) + */ + context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = + dm_prefetch_support_none; + + context->bw_ctx.dml.validate_max_state = fast_validate; + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + + context->bw_ctx.dml.validate_max_state = false; + + if (vlevel < context->bw_ctx.dml.soc.num_states) { + memset(split, 0, sizeof(split)); + memset(merge, 0, sizeof(merge)); + vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); + /* dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML */ + vba->VoltageLevel = vlevel; + } } + dml_log_mode_support_params(&context->bw_ctx.dml); + + if (vlevel == context->bw_ctx.dml.soc.num_states) + goto validate_fail; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; + + if (!pipe->stream) + continue; + + if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled + && !dc->config.enable_windowed_mpo_odm + && pipe->plane_state && mpo_pipe + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, + sizeof(struct rect)) != 0) { + ASSERT(mpo_pipe->plane_state != pipe->plane_state); + goto validate_fail; + } + pipe_idx++; + } + + if (!dcn32_apply_merge_split_flags_helper(dc, context, &repopulate_pipes, split, merge)) + goto validate_fail; + /* Actual dsc count per stream dsc validation*/ if (!dcn20_validate_dsc(dc, context)) { vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index 80fccd4999..ba1310c8fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -1650,6 +1650,8 @@ double dml32_TruncToValidBPP( MaxLinkBPP = 2 * MaxLinkBPP; } + *RequiredSlots = dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1); + if (DesiredBPP == 0) { if (DSCEnable) { if (MaxLinkBPP < MinDSCBPP) @@ -1676,10 +1678,6 @@ double dml32_TruncToValidBPP( else return DesiredBPP; } - - *RequiredSlots = dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1); - - return BPP_INVALID; } // TruncToValidBPP double dml32_RequiredDTBCLK( @@ -1975,8 +1973,8 @@ void dml32_CalculateVMRowAndSwath( unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX]; unsigned int PDEAndMetaPTEBytesFrameY; unsigned int PDEAndMetaPTEBytesFrameC; - unsigned int MetaRowByteY[DC__NUM_DPP__MAX]; - unsigned int MetaRowByteC[DC__NUM_DPP__MAX]; + unsigned int MetaRowByteY[DC__NUM_DPP__MAX] = {0}; + unsigned int MetaRowByteC[DC__NUM_DPP__MAX] = {0}; unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX]; unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX]; unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX]; @@ -4291,7 +4289,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( unsigned int i, j, k; unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0; unsigned int DRAMClockChangeSupportNumber = 0; - unsigned int LastSurfaceWithoutMargin; + unsigned int LastSurfaceWithoutMargin = 0; unsigned int DRAMClockChangeMethod = 0; bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false; double MinActiveFCLKChangeMargin = 0.; @@ -5656,9 +5654,9 @@ void dml32_CalculateStutterEfficiency( double LastZ8StutterPeriod = 0.0; double LastStutterPeriod = 0.0; unsigned int TotalNumberOfActiveOTG = 0; - double doublePixelClock; - unsigned int doubleHTotal; - unsigned int doubleVTotal; + double doublePixelClock = 0; + unsigned int doubleHTotal = 0; + unsigned int doubleVTotal = 0; bool SameTiming = true; double DETBufferingTimeY; double SwathWidthYCriticalSurface = 0.0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 6716696df7..beed7adbbd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -439,7 +439,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; bool upscaled = false; const unsigned int max_allowed_vblank_nom = 1023; @@ -577,6 +577,7 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context) { enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW; unsigned int i, plane_count = 0; + DC_LOGGER_INIT(dc->ctx->logger); for (i = 0; i < dc->res_pool->pipe_count; i++) { if (context->res_ctx.pipe_ctx[i].plane_state) @@ -602,11 +603,14 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context) if (is_pwrseq0 && allow_z10) support = DCN_ZSTATE_SUPPORT_ALLOW; else if (is_pwrseq0 && (is_psr || is_replay)) - support = allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY; + support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY; else if (allow_z8) support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY; } + DC_LOG_SMU("zstate_support: %d, StutterPeriod: %d\n", support, + (int)context->bw_ctx.dml.vba.StutterPeriod); + context->bw_ctx.bw.dcn.clk.zstate_support = support; } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index 40ca38dd1b..a201dbb743 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -237,7 +237,6 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = { .urgent_latency_adjustment_fabric_clock_component_us = 0, .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, .num_chans = 4, - .dram_clock_change_latency_us = 11.72, .dispclk_dppclk_vco_speed_mhz = 2400.0, }; @@ -474,7 +473,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; bool upscaled = false; const unsigned int max_allowed_vblank_nom = 1023; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile index acff3449b8..c576bb0c78 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile @@ -24,40 +24,8 @@ # # Makefile for dml2. -ifdef CONFIG_X86 -dml2_ccflags-$(CONFIG_CC_IS_GCC) := -mhard-float -dml2_ccflags := $(dml2_ccflags-y) -msse -endif - -ifdef CONFIG_PPC64 -dml2_ccflags := -mhard-float -maltivec -endif - -ifdef CONFIG_ARM64 -dml2_rcflags := -mgeneral-regs-only -endif - -ifdef CONFIG_LOONGARCH -dml2_ccflags := -mfpu=64 -dml2_rcflags := -msoft-float -endif - -ifdef CONFIG_CC_IS_GCC -ifeq ($(call cc-ifversion, -lt, 0701, y), y) -IS_OLD_GCC = 1 -endif -endif - -ifdef CONFIG_X86 -ifdef IS_OLD_GCC -# Stack alignment mismatch, proceed with caution. -# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 -# (8B stack alignment). -dml2_ccflags += -mpreferred-stack-boundary=4 -else -dml2_ccflags += -msse2 -endif -endif +dml2_ccflags := $(CC_FLAGS_FPU) +dml2_rcflags := $(CC_FLAGS_NO_FPU) ifneq ($(CONFIG_FRAME_WARN),0) ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y) @@ -67,6 +35,7 @@ frame_warn_flag := -Wframe-larger-than=2048 endif endif +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2 CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index 79cd4c4790..3e919f5c00 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -31,6 +31,8 @@ #include "dml_assert.h" #define DML2_MAX_FMT_420_BUFFER_WIDTH 4096 +#define TB_BORROWED_MAX 400 + // --------------------------- // Declaration Begins // --------------------------- @@ -2782,6 +2784,8 @@ static dml_float_t TruncToValidBPP( } } + *RequiredSlots = (dml_uint_t)(dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1)); + if (DesiredBPP == 0) { if (DSCEnable) { if (MaxLinkBPP < MinDSCBPP) { @@ -2810,10 +2814,6 @@ static dml_float_t TruncToValidBPP( return DesiredBPP; } } - - *RequiredSlots = (dml_uint_t)(dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1)); - - return __DML_DPP_INVALID__; } // TruncToValidBPP static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( @@ -3790,9 +3790,9 @@ static void CalculateStutterEfficiency(struct display_mode_lib_scratch_st *scrat dml_bool_t FoundCriticalSurface = false; dml_uint_t TotalNumberOfActiveOTG = 0; - dml_float_t SinglePixelClock; - dml_uint_t SingleHTotal; - dml_uint_t SingleVTotal; + dml_float_t SinglePixelClock = 0; + dml_uint_t SingleHTotal = 0; + dml_uint_t SingleVTotal = 0; dml_bool_t SameTiming = true; dml_float_t LastStutterPeriod = 0.0; @@ -10216,6 +10216,7 @@ dml_get_var_func(fraction_of_urgent_bandwidth_imm_flip, dml_float_t, mode_lib->m dml_get_var_func(urgent_latency, dml_float_t, mode_lib->mp.UrgentLatency); dml_get_var_func(clk_dcf_deepsleep, dml_float_t, mode_lib->mp.DCFCLKDeepSleep); dml_get_var_func(wm_writeback_dram_clock_change, dml_float_t, mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark); +dml_get_var_func(wm_writeback_urgent, dml_float_t, mode_lib->mp.Watermark.WritebackUrgentWatermark); dml_get_var_func(stutter_efficiency, dml_float_t, mode_lib->mp.StutterEfficiency); dml_get_var_func(stutter_efficiency_no_vblank, dml_float_t, mode_lib->mp.StutterEfficiencyNotIncludingVBlank); dml_get_var_func(stutter_efficiency_z8, dml_float_t, mode_lib->mp.Z8StutterEfficiency); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h index 8452485684..3116b88e99 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h @@ -94,6 +94,7 @@ dml_get_var_decl(wm_usr_retraining, dml_float_t); dml_get_var_decl(urgent_latency, dml_float_t); dml_get_var_decl(wm_writeback_dram_clock_change, dml_float_t); +dml_get_var_decl(wm_writeback_urgent, dml_float_t); dml_get_var_decl(stutter_efficiency_no_vblank, dml_float_t); dml_get_var_decl(stutter_efficiency, dml_float_t); dml_get_var_decl(stutter_efficiency_z8, dml_float_t); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h index de63364be0..14d3895252 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h @@ -41,6 +41,7 @@ #define DCN_DML__VM_PRESENT__1 1 #define DCN_DML__HOST_VM_PRESENT 1 #define DCN_DML__HOST_VM_PRESENT__1 1 +#define DCN_DML__DWB 1 #include "dml_depedencies.h" @@ -59,6 +60,7 @@ #define __DML_NUM_PLANES__ DCN_DML__NUM_PLANE #define __DML_NUM_CURSORS__ DCN_DML__NUM_CURSOR #define __DML_DPP_INVALID__ 0 +#define __DML_NUM_DMB__ DCN_DML__DWB #define __DML_PIPE_NO_PLANE__ 99 #define __DML_MAX_STATE_ARRAY_SIZE__ DCN_DML__NUM_PWR_STATE diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index e1f1b5dd13..ad2a6b4769 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -795,8 +795,8 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool, stream->stream_id, plane_index); } -static unsigned int get_mpc_factor(struct dml2_context *ctx, - const struct dc_state *state, +static unsigned int get_target_mpc_factor(struct dml2_context *ctx, + struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, const struct dc_stream_status *status, @@ -807,10 +807,10 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx, unsigned int cfg_idx; unsigned int mpc_factor; - get_plane_id(ctx, state, status->plane_states[plane_idx], - stream->stream_id, plane_idx, &plane_id); - cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id); if (ctx->architecture == dml2_architecture_20) { + get_plane_id(ctx, state, status->plane_states[plane_idx], + stream->stream_id, plane_idx, &plane_id); + cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id); mpc_factor = (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx]; } else { mpc_factor = 1; @@ -824,16 +824,18 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx, return mpc_factor; } -static unsigned int get_odm_factor( +static unsigned int get_target_odm_factor( const struct dml2_context *ctx, + struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, const struct dc_stream_state *stream) { - unsigned int cfg_idx = find_disp_cfg_idx_by_stream_id( - mapping, stream->stream_id); + unsigned int cfg_idx; - if (ctx->architecture == dml2_architecture_20) + if (ctx->architecture == dml2_architecture_20) { + cfg_idx = find_disp_cfg_idx_by_stream_id( + mapping, stream->stream_id); switch (disp_cfg->hw.ODMMode[cfg_idx]) { case dml_odm_mode_bypass: return 1; @@ -844,83 +846,122 @@ static unsigned int get_odm_factor( default: break; } + } ASSERT(false); return 1; } +static unsigned int get_source_odm_factor(const struct dml2_context *ctx, + struct dc_state *state, + const struct dc_stream_state *stream) +{ + struct pipe_ctx *otg_master = ctx->config.callbacks.get_otg_master_for_stream(&state->res_ctx, stream); + + return ctx->config.callbacks.get_odm_slice_count(otg_master); +} + +static unsigned int get_source_mpc_factor(const struct dml2_context *ctx, + struct dc_state *state, + const struct dc_plane_state *plane) +{ + struct pipe_ctx *dpp_pipes[MAX_PIPES] = {0}; + int dpp_pipe_count = ctx->config.callbacks.get_dpp_pipes_for_plane(plane, + &state->res_ctx, dpp_pipes); + + ASSERT(dpp_pipe_count > 0); + return ctx->config.callbacks.get_mpc_slice_count(dpp_pipes[0]); +} + + static void populate_mpc_factors_for_stream( struct dml2_context *ctx, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, - const struct dc_state *state, + struct dc_state *state, unsigned int stream_idx, - unsigned int odm_factor, - unsigned int mpc_factors[MAX_PIPES]) + struct dml2_pipe_combine_factor odm_factor, + struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES]) { const struct dc_stream_status *status = &state->stream_status[stream_idx]; int i; - for (i = 0; i < status->plane_count; i++) - if (odm_factor == 1) - mpc_factors[i] = get_mpc_factor( - ctx, state, disp_cfg, mapping, status, - state->streams[stream_idx], i); - else - mpc_factors[i] = 1; + for (i = 0; i < status->plane_count; i++) { + mpc_factors[i].source = get_source_mpc_factor(ctx, state, status->plane_states[i]); + mpc_factors[i].target = (odm_factor.target == 1) ? + get_target_mpc_factor(ctx, state, disp_cfg, mapping, status, state->streams[stream_idx], i) : 1; + } } static void populate_odm_factors(const struct dml2_context *ctx, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, - const struct dc_state *state, - unsigned int odm_factors[MAX_PIPES]) + struct dc_state *state, + struct dml2_pipe_combine_factor odm_factors[MAX_PIPES]) { int i; - for (i = 0; i < state->stream_count; i++) - odm_factors[i] = get_odm_factor( - ctx, disp_cfg, mapping, state->streams[i]); + for (i = 0; i < state->stream_count; i++) { + odm_factors[i].source = get_source_odm_factor(ctx, state, state->streams[i]); + odm_factors[i].target = get_target_odm_factor( + ctx, state, disp_cfg, mapping, state->streams[i]); + } } -static bool map_dc_pipes_for_stream(struct dml2_context *ctx, +static bool unmap_dc_pipes_for_stream(struct dml2_context *ctx, struct dc_state *state, const struct dc_state *existing_state, const struct dc_stream_state *stream, const struct dc_stream_status *status, - unsigned int odm_factor, - unsigned int mpc_factors[MAX_PIPES]) + struct dml2_pipe_combine_factor odm_factor, + struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES]) { int plane_idx; bool result = true; - if (odm_factor == 1) - /* - * ODM and MPC combines are by DML design mutually exclusive. - * ODM factor of 1 means MPC factors may be greater than 1. - * In this case, we want to set ODM factor to 1 first to free up - * pipe resources from previous ODM configuration before setting - * up MPC combine to acquire more pipe resources. - */ + for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++) + if (mpc_factors[plane_idx].target < mpc_factors[plane_idx].source) + result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count( + state, + existing_state, + ctx->config.callbacks.dc->res_pool, + status->plane_states[plane_idx], + mpc_factors[plane_idx].target); + if (odm_factor.target < odm_factor.source) result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count( state, existing_state, ctx->config.callbacks.dc->res_pool, stream, - odm_factor); + odm_factor.target); + return result; +} + +static bool map_dc_pipes_for_stream(struct dml2_context *ctx, + struct dc_state *state, + const struct dc_state *existing_state, + const struct dc_stream_state *stream, + const struct dc_stream_status *status, + struct dml2_pipe_combine_factor odm_factor, + struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES]) +{ + int plane_idx; + bool result = true; + for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++) - result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count( - state, - existing_state, - ctx->config.callbacks.dc->res_pool, - status->plane_states[plane_idx], - mpc_factors[plane_idx]); - if (odm_factor > 1) + if (mpc_factors[plane_idx].target > mpc_factors[plane_idx].source) + result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count( + state, + existing_state, + ctx->config.callbacks.dc->res_pool, + status->plane_states[plane_idx], + mpc_factors[plane_idx].target); + if (odm_factor.target > odm_factor.source) result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count( state, existing_state, ctx->config.callbacks.dc->res_pool, stream, - odm_factor); + odm_factor.target); return result; } @@ -930,20 +971,20 @@ static bool map_dc_pipes_with_callbacks(struct dml2_context *ctx, struct dml2_dml_to_dc_pipe_mapping *mapping, const struct dc_state *existing_state) { - unsigned int odm_factors[MAX_PIPES]; - unsigned int mpc_factors_for_stream[MAX_PIPES]; int i; bool result = true; - populate_odm_factors(ctx, disp_cfg, mapping, state, odm_factors); - for (i = 0; i < state->stream_count; i++) { + populate_odm_factors(ctx, disp_cfg, mapping, state, ctx->pipe_combine_scratch.odm_factors); + for (i = 0; i < state->stream_count; i++) populate_mpc_factors_for_stream(ctx, disp_cfg, mapping, state, - i, odm_factors[i], mpc_factors_for_stream); - result &= map_dc_pipes_for_stream(ctx, state, existing_state, - state->streams[i], - &state->stream_status[i], - odm_factors[i], mpc_factors_for_stream); - } + i, ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]); + for (i = 0; i < state->stream_count; i++) + result &= unmap_dc_pipes_for_stream(ctx, state, existing_state, state->streams[i], + &state->stream_status[i], ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]); + for (i = 0; i < state->stream_count; i++) + result &= map_dc_pipes_for_stream(ctx, state, existing_state, state->streams[i], + &state->stream_status[i], ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]); + return result; } @@ -1039,6 +1080,12 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s ASSERT(false); } } + + if (ctx->config.callbacks.build_test_pattern_params && + pipe->stream && + pipe->prev_odm_pipe == NULL && + pipe->top_pipe == NULL) + ctx->config.callbacks.build_test_pattern_params(&state->res_ctx, pipe); } return true; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h index 2f91244a7b..1538b708d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h @@ -30,6 +30,8 @@ #include "dml2_dc_types.h" struct dml2_context; +struct dml2_dml_to_dc_pipe_mapping; +struct dml_display_cfg_st; /* * dml2_map_dc_pipes - Creates a pipe linkage in dc_state based on current display config. diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h index 1cf8a884c0..9dab4e43c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h @@ -109,10 +109,21 @@ enum dml2_architecture { dml2_architecture_20, }; +struct dml2_pipe_combine_factor { + unsigned int source; + unsigned int target; +}; + +struct dml2_pipe_combine_scratch { + struct dml2_pipe_combine_factor odm_factors[MAX_PIPES]; + struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES][MAX_PIPES]; +}; + struct dml2_context { enum dml2_architecture architecture; struct dml2_configuration_options config; struct dml2_helper_det_policy_scratch det_helper_scratch; + struct dml2_pipe_combine_scratch pipe_combine_scratch; union { struct { struct display_mode_lib_st dml_core_ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c index 282d70e2b1..3d29169dd6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c @@ -750,6 +750,8 @@ static void enable_phantom_plane(struct dml2_context *ctx, ctx->config.svp_pstate.callbacks.dc, state, curr_pipe->plane_state); + if (!phantom_plane) + return; } memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 3af759dca6..edff6b4476 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -29,6 +29,7 @@ #include "dml2_translation_helper.h" #define NUM_DCFCLK_STAS 5 +#define NUM_DCFCLK_STAS_NEW 8 void dml2_init_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out) { @@ -250,12 +251,21 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, { struct dml2_policy_build_synthetic_soc_states_scratch *s = &dml2->v20.scratch.create_scratch.build_synthetic_socbb_scratch; struct dml2_policy_build_synthetic_soc_states_params *p = &dml2->v20.scratch.build_synthetic_socbb_params; - unsigned int dcfclk_stas_mhz[NUM_DCFCLK_STAS]; + unsigned int dcfclk_stas_mhz[NUM_DCFCLK_STAS] = {0}; + unsigned int dcfclk_stas_mhz_new[NUM_DCFCLK_STAS_NEW] = {0}; + unsigned int dml_project = dml2->v20.dml_core_ctx.project; + unsigned int i = 0; unsigned int transactions_per_mem_clock = 16; // project specific, depends on used Memory type - p->dcfclk_stas_mhz = dcfclk_stas_mhz; - p->num_dcfclk_stas = NUM_DCFCLK_STAS; + if (dml_project == dml_project_dcn351) { + p->dcfclk_stas_mhz = dcfclk_stas_mhz_new; + p->num_dcfclk_stas = NUM_DCFCLK_STAS_NEW; + } else { + p->dcfclk_stas_mhz = dcfclk_stas_mhz; + p->num_dcfclk_stas = NUM_DCFCLK_STAS; + } + p->in_bbox = in_bbox; p->out_states = out; p->in_states = &dml2->v20.scratch.create_scratch.in_states; @@ -433,8 +443,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, } dml2_policy_build_synthetic_soc_states(s, p); - if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 || - dml2->v20.dml_core_ctx.project == dml_project_dcn351) { + if (dml2->v20.dml_core_ctx.project == dml_project_dcn35) { // Override last out_state with data from last in_state // This will ensure that out_state contains max fclk memcpy(&p->out_states->state_array[p->out_states->num_states - 1], @@ -795,7 +804,7 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p } } -static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context) +static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context, struct scaler_data *out) { int i; struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe; @@ -816,7 +825,7 @@ static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state } ASSERT(i < MAX_PIPES); - return temp_pipe->plane_res.scl_data; + memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out)); } static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in) @@ -875,27 +884,31 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, struct dc_state *context) { - const struct scaler_data scaler_data = get_scaler_data_for_plane(in, context); + struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL); + if (!scaler_data) + return; + + get_scaler_data_for_plane(in, context, scaler_data); out->CursorBPP[location] = dml_cur_32bit; out->CursorWidth[location] = 256; out->GPUVMMinPageSizeKBytes[location] = 256; - out->ViewportWidth[location] = scaler_data.viewport.width; - out->ViewportHeight[location] = scaler_data.viewport.height; - out->ViewportWidthChroma[location] = scaler_data.viewport_c.width; - out->ViewportHeightChroma[location] = scaler_data.viewport_c.height; - out->ViewportXStart[location] = scaler_data.viewport.x; - out->ViewportYStart[location] = scaler_data.viewport.y; - out->ViewportXStartC[location] = scaler_data.viewport_c.x; - out->ViewportYStartC[location] = scaler_data.viewport_c.y; + out->ViewportWidth[location] = scaler_data->viewport.width; + out->ViewportHeight[location] = scaler_data->viewport.height; + out->ViewportWidthChroma[location] = scaler_data->viewport_c.width; + out->ViewportHeightChroma[location] = scaler_data->viewport_c.height; + out->ViewportXStart[location] = scaler_data->viewport.x; + out->ViewportYStart[location] = scaler_data->viewport.y; + out->ViewportXStartC[location] = scaler_data->viewport_c.x; + out->ViewportYStartC[location] = scaler_data->viewport_c.y; out->ViewportStationary[location] = false; - out->ScalerEnabled[location] = scaler_data.ratios.horz.value != dc_fixpt_one.value || - scaler_data.ratios.horz_c.value != dc_fixpt_one.value || - scaler_data.ratios.vert.value != dc_fixpt_one.value || - scaler_data.ratios.vert_c.value != dc_fixpt_one.value; + out->ScalerEnabled[location] = scaler_data->ratios.horz.value != dc_fixpt_one.value || + scaler_data->ratios.horz_c.value != dc_fixpt_one.value || + scaler_data->ratios.vert.value != dc_fixpt_one.value || + scaler_data->ratios.vert_c.value != dc_fixpt_one.value; /* Current driver code base uses LBBitPerPixel as 57. There is a discrepancy * from the HW/DML teams about this value. Initialize LBBitPerPixel with the @@ -911,25 +924,25 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out out->VRatioChroma[location] = 1; } else { /* Follow the original dml_wrapper.c code direction to fix scaling issues */ - out->HRatio[location] = (dml_float_t)scaler_data.ratios.horz.value / (1ULL << 32); - out->HRatioChroma[location] = (dml_float_t)scaler_data.ratios.horz_c.value / (1ULL << 32); - out->VRatio[location] = (dml_float_t)scaler_data.ratios.vert.value / (1ULL << 32); - out->VRatioChroma[location] = (dml_float_t)scaler_data.ratios.vert_c.value / (1ULL << 32); + out->HRatio[location] = (dml_float_t)scaler_data->ratios.horz.value / (1ULL << 32); + out->HRatioChroma[location] = (dml_float_t)scaler_data->ratios.horz_c.value / (1ULL << 32); + out->VRatio[location] = (dml_float_t)scaler_data->ratios.vert.value / (1ULL << 32); + out->VRatioChroma[location] = (dml_float_t)scaler_data->ratios.vert_c.value / (1ULL << 32); } - if (!scaler_data.taps.h_taps) { + if (!scaler_data->taps.h_taps) { out->HTaps[location] = 1; out->HTapsChroma[location] = 1; } else { - out->HTaps[location] = scaler_data.taps.h_taps; - out->HTapsChroma[location] = scaler_data.taps.h_taps_c; + out->HTaps[location] = scaler_data->taps.h_taps; + out->HTapsChroma[location] = scaler_data->taps.h_taps_c; } - if (!scaler_data.taps.v_taps) { + if (!scaler_data->taps.v_taps) { out->VTaps[location] = 1; out->VTapsChroma[location] = 1; } else { - out->VTaps[location] = scaler_data.taps.v_taps; - out->VTapsChroma[location] = scaler_data.taps.v_taps_c; + out->VTaps[location] = scaler_data->taps.v_taps; + out->VTapsChroma[location] = scaler_data->taps.v_taps_c; } out->SourceScan[location] = (enum dml_rotation_angle)in->rotation; @@ -940,6 +953,8 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out out->DynamicMetadataTransmittedBytes[location] = 0; out->NumberOfCursors[location] = 1; + + kfree(scaler_data); } static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml2, @@ -1053,7 +1068,46 @@ static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, plane_index = 0; } } - +static void populate_dml_writeback_cfg_from_stream_state(struct dml_writeback_cfg_st *out, + unsigned int location, const struct dc_stream_state *in) +{ + if (in->num_wb_info > 0) { + for (int i = 0; i < __DML_NUM_DMB__; i++) { + const struct dc_writeback_info *wb_info = &in->writeback_info[i]; + /*current dml support 1 dwb per stream, limitation*/ + if (wb_info->wb_enabled) { + out->WritebackEnable[location] = wb_info->wb_enabled; + out->ActiveWritebacksPerSurface[location] = wb_info->dwb_params.cnv_params.src_width; + out->WritebackDestinationWidth[location] = wb_info->dwb_params.dest_width; + out->WritebackDestinationHeight[location] = wb_info->dwb_params.dest_height; + + out->WritebackSourceWidth[location] = wb_info->dwb_params.cnv_params.crop_en ? + wb_info->dwb_params.cnv_params.crop_width : + wb_info->dwb_params.cnv_params.src_width; + + out->WritebackSourceHeight[location] = wb_info->dwb_params.cnv_params.crop_en ? + wb_info->dwb_params.cnv_params.crop_height : + wb_info->dwb_params.cnv_params.src_height; + /*current design does not have chroma scaling, need to follow up*/ + out->WritebackHTaps[location] = wb_info->dwb_params.scaler_taps.h_taps > 0 ? + wb_info->dwb_params.scaler_taps.h_taps : 1; + out->WritebackVTaps[location] = wb_info->dwb_params.scaler_taps.v_taps > 0 ? + wb_info->dwb_params.scaler_taps.v_taps : 1; + + out->WritebackHRatio[location] = wb_info->dwb_params.cnv_params.crop_en ? + (double)wb_info->dwb_params.cnv_params.crop_width / + (double)wb_info->dwb_params.dest_width : + (double)wb_info->dwb_params.cnv_params.src_width / + (double)wb_info->dwb_params.dest_width; + out->WritebackVRatio[location] = wb_info->dwb_params.cnv_params.crop_en ? + (double)wb_info->dwb_params.cnv_params.crop_height / + (double)wb_info->dwb_params.dest_height : + (double)wb_info->dwb_params.cnv_params.src_height / + (double)wb_info->dwb_params.dest_height; + } + } + } +} void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg) { int i = 0, j = 0, k = 0; @@ -1098,6 +1152,10 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]); populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context); + /*Call site for populate_dml_writeback_cfg_from_stream_state*/ + populate_dml_writeback_cfg_from_stream_state(&dml_dispcfg->writeback, + disp_cfg_stream_location, context->streams[i]); + switch (context->streams[i]->debug.force_odm_combine_segments) { case 2: dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_stream_location] = dml_odm_use_policy_combine_2to1; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index bb4e812248..cbd1c1f26b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -224,7 +224,7 @@ static int find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state, const struct dc_plane_state *plane, unsigned int stream_id, unsigned int plane_index, unsigned int *plane_id) { - int i, j; + unsigned int i, j; bool is_plane_duplicate = dml2->v20.scratch.plane_duplicate_exists; if (!plane_id) @@ -376,10 +376,16 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = in_ctx->v20.dml_core_ctx.states.state_array[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx].dppclk_mhz * 1000; context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = in_ctx->v20.dml_core_ctx.states.state_array[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx].dispclk_mhz * 1000; + + if (dc->config.forced_clocks || dc->debug.max_disp_clk) { + context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz; + context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz ; + } } void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx) @@ -398,6 +404,71 @@ void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display watermark->cstate_pstate.cstate_exit_z8_ns = dml_get_wm_z8_stutter(dml_core_ctx) * 1000; } +unsigned int dml2_calc_max_scaled_time( + unsigned int time_per_pixel, + enum mmhubbub_wbif_mode mode, + unsigned int urgent_watermark) +{ + unsigned int time_per_byte = 0; + unsigned int total_free_entry = 0xb40; + unsigned int buf_lh_capability; + unsigned int max_scaled_time; + + if (mode == PACKED_444) /* packed mode 32 bpp */ + time_per_byte = time_per_pixel/4; + else if (mode == PACKED_444_FP16) /* packed mode 64 bpp */ + time_per_byte = time_per_pixel/8; + + if (time_per_byte == 0) + time_per_byte = 1; + + buf_lh_capability = (total_free_entry*time_per_byte*32) >> 6; /* time_per_byte is in u6.6*/ + max_scaled_time = buf_lh_capability - urgent_watermark; + return max_scaled_time; +} + +void dml2_extract_writeback_wm(struct dc_state *context, struct display_mode_lib_st *dml_core_ctx) +{ + int i, j = 0;; + struct mcif_arb_params *wb_arb_params = NULL; + struct dcn_bw_writeback *bw_writeback = NULL; + enum mmhubbub_wbif_mode wbif_mode = PACKED_444_FP16; /*for now*/ + + if (context->stream_count != 0) { + for (i = 0; i < context->stream_count; i++) { + if (context->streams[i]->num_wb_info != 0) + j++; + } + } + if (j == 0) /*no dwb */ + return; + for (i = 0; i < __DML_NUM_DMB__; i++) { + bw_writeback = &context->bw_ctx.bw.dcn.bw_writeback; + wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[i]; + + for (j = 0 ; j < 4; j++) { + /*current dml only has one set of watermark, need to follow up*/ + bw_writeback->mcif_wb_arb[i].cli_watermark[j] = + dml_get_wm_writeback_urgent(dml_core_ctx) * 1000; + bw_writeback->mcif_wb_arb[i].pstate_watermark[j] = + dml_get_wm_writeback_dram_clock_change(dml_core_ctx) * 1000; + } + if (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk != 0) { + /* time_per_pixel should be in u6.6 format */ + bw_writeback->mcif_wb_arb[i].time_per_pixel = + (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; + } + bw_writeback->mcif_wb_arb[i].slice_lines = 32; + bw_writeback->mcif_wb_arb[i].arbitration_slice = 2; + bw_writeback->mcif_wb_arb[i].max_scaled_time = + dml2_calc_max_scaled_time(wb_arb_params->time_per_pixel, + wbif_mode, wb_arb_params->cli_watermark[0]); + /*not required any more*/ + bw_writeback->mcif_wb_arb[i].dram_speed_change_duration = + dml_get_wm_writeback_dram_clock_change(dml_core_ctx) * 1000; + + } +} void dml2_initialize_det_scratch(struct dml2_context *in_ctx) { int i; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h index 5842d6d3c4..04fcfe6371 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h @@ -40,9 +40,14 @@ void dml2_util_copy_dml_output(struct dml_output_cfg_st *dml_output_array, unsig unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, enum dml_output_encoder_class encoder, bool dsc_enabled); void dml2_copy_clocks_to_dc_state(struct dml2_dcn_clocks *out_clks, struct dc_state *context); void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx); +void dml2_extract_writeback_wm(struct dc_state *context, struct display_mode_lib_st *dml_core_ctx); int dml2_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id); bool is_dtbclk_required(const struct dc *dc, struct dc_state *context); bool dml2_is_stereo_timing(const struct dc_stream_state *stream); +unsigned int dml2_calc_max_scaled_time( + unsigned int time_per_pixel, + enum mmhubbub_wbif_mode mode, + unsigned int urgent_watermark); /* * dml2_dc_construct_pipes - This function will determine if we need additional pipes based diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 72cca36706..9412d5384a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -570,6 +570,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s struct dml2_dcn_clocks out_clks; unsigned int result = 0; bool need_recalculation = false; + uint32_t cstate_enter_plus_exit_z8_ns; if (!context || context->stream_count == 0) return true; @@ -639,8 +640,17 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx); memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c)); dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx); + dml2_extract_writeback_wm(context, &dml2->v20.dml_core_ctx); //copy for deciding zstate use context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod; + + cstate_enter_plus_exit_z8_ns = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns; + + if (context->bw_ctx.dml.vba.StutterPeriod < in_dc->debug.minimum_z8_residency_time && + cstate_enter_plus_exit_z8_ns < in_dc->debug.minimum_z8_residency_time * 1000) + cstate_enter_plus_exit_z8_ns = in_dc->debug.minimum_z8_residency_time * 1000; + + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = cstate_enter_plus_exit_z8_ns; } return result; @@ -681,13 +691,13 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d } } -bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_validate) +bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate) { bool out = false; - if (!(context->bw_ctx.dml2)) + if (!dml2) return false; - dml2_apply_debug_options(in_dc, context->bw_ctx.dml2); + dml2_apply_debug_options(in_dc, dml2); /* Use dml_validate_only for fast_validate path */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index cc662d682f..4a8bd2f419 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -71,6 +71,7 @@ struct dml2_dcn_clocks { struct dml2_dc_callbacks { struct dc *dc; bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx); + void (*build_test_pattern_params)(struct resource_context *res_ctx, struct pipe_ctx *otg_master); bool (*can_support_mclk_switch_using_fw_based_vblank_stretch)(struct dc *dc, struct dc_state *context); bool (*acquire_secondary_pipe_for_mpc_odm)(const struct dc *dc, struct dc_state *state, struct pipe_ctx *pri_pipe, struct pipe_ctx *sec_pipe, bool odm); bool (*update_pipes_for_stream_with_slice_count)( @@ -86,8 +87,23 @@ struct dml2_dc_callbacks { const struct dc_plane_state *plane, int slice_count); int (*get_odm_slice_index)(const struct pipe_ctx *opp_head); + int (*get_odm_slice_count)(const struct pipe_ctx *opp_head); int (*get_mpc_slice_index)(const struct pipe_ctx *dpp_pipe); + int (*get_mpc_slice_count)(const struct pipe_ctx *dpp_pipe); struct pipe_ctx *(*get_opp_head)(const struct pipe_ctx *pipe_ctx); + struct pipe_ctx *(*get_otg_master_for_stream)( + struct resource_context *res_ctx, + const struct dc_stream_state *stream); + int (*get_opp_heads_for_otg_master)(const struct pipe_ctx *otg_master, + struct resource_context *res_ctx, + struct pipe_ctx *opp_heads[MAX_PIPES]); + int (*get_dpp_pipes_for_plane)(const struct dc_plane_state *plane, + struct resource_context *res_ctx, + struct pipe_ctx *dpp_pipes[MAX_PIPES]); + struct dc_stream_status *(*get_stream_status)( + struct dc_state *state, + const struct dc_stream_state *stream); + struct dc_stream_state *(*get_stream_from_id)(const struct dc_state *state, unsigned int id); }; struct dml2_dc_svp_callbacks { @@ -96,10 +112,10 @@ struct dml2_dc_svp_callbacks { struct dc_stream_state* (*create_phantom_stream)(const struct dc *dc, struct dc_state *state, struct dc_stream_state *main_stream); - struct dc_plane_state* (*create_phantom_plane)(struct dc *dc, + struct dc_plane_state* (*create_phantom_plane)(const struct dc *dc, struct dc_state *state, struct dc_plane_state *main_plane); - enum dc_status (*add_phantom_stream)(struct dc *dc, + enum dc_status (*add_phantom_stream)(const struct dc *dc, struct dc_state *state, struct dc_stream_state *phantom_stream, struct dc_stream_state *main_stream); @@ -108,7 +124,7 @@ struct dml2_dc_svp_callbacks { struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context); - enum dc_status (*remove_phantom_stream)(struct dc *dc, + enum dc_status (*remove_phantom_stream)(const struct dc *dc, struct dc_state *state, struct dc_stream_state *stream); void (*release_phantom_plane)(const struct dc *dc, @@ -121,6 +137,15 @@ struct dml2_dc_svp_callbacks { enum mall_stream_type (*get_pipe_subvp_type)(const struct dc_state *state, const struct pipe_ctx *pipe_ctx); enum mall_stream_type (*get_stream_subvp_type)(const struct dc_state *state, const struct dc_stream_state *stream); struct dc_stream_state *(*get_paired_subvp_stream)(const struct dc_state *state, const struct dc_stream_state *stream); + bool (*remove_phantom_streams_and_planes)( + const struct dc *dc, + struct dc_state *state); + void (*release_phantom_streams_and_planes)( + const struct dc *dc, + struct dc_state *state); + unsigned int (*calculate_mall_ways_from_bytes)( + const struct dc *dc, + unsigned int total_size_in_mall_bytes); }; struct dml2_clks_table_entry { @@ -191,6 +216,8 @@ struct dml2_configuration_options { unsigned int max_segments_per_hubp; unsigned int det_segment_size; bool map_dc_pipes_with_callbacks; + + bool use_clock_dc_limits; }; /* @@ -244,6 +271,7 @@ void dml2_reinit(const struct dc *in_dc, */ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, + struct dml2_context *dml2, bool fast_validate); /* diff --git a/drivers/gpu/drm/amd/display/dc/dpp/Makefile b/drivers/gpu/drm/amd/display/dc/dpp/Makefile new file mode 100644 index 0000000000..99bd360735 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/Makefile @@ -0,0 +1,77 @@ + +# Copyright 2022 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the 'dpp' sub-component of DAL. +# +ifdef CONFIG_DRM_AMD_DC_FP +############################################################################### +# DCN +############################################################################### + +DPP_DCN10 = dcn10_dpp.o dcn10_dpp_dscl.o dcn10_dpp_cm.o + +AMD_DAL_DPP_DCN10 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn10/,$(DPP_DCN10)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN10) + +############################################################################### + +DPP_DCN20 = dcn20_dpp.o dcn20_dpp_cm.o + +AMD_DAL_DPP_DCN20 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn20/,$(DPP_DCN20)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN20) + +############################################################################### + +DPP_DCN201 = dcn201_dpp.o + +AMD_DAL_DPP_DCN201 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn201/,$(DPP_DCN201)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN201) + +############################################################################### + +DPP_DCN30 = dcn30_dpp.o dcn30_dpp_cm.o + +AMD_DAL_DPP_DCN30 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn30/,$(DPP_DCN30)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN30) + +############################################################################### + +DPP_DCN32 = dcn32_dpp.o + +AMD_DAL_DPP_DCN32 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn32/,$(DPP_DCN32)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN32) + +############################################################################### + +DPP_DCN35 = dcn35_dpp.o + +AMD_DAL_DPP_DCN35 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn35/,$(DPP_DCN35)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN35) + +############################################################################### + +endif \ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt new file mode 100644 index 0000000000..1318c6fba3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt @@ -0,0 +1,6 @@ +dal3_subdirectory_sources( + dcn10_dpp.c + dcn10_dpp_cm.c + dcn10_dpp_dscl.c + dcn10_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c new file mode 100644 index 0000000000..e1da48b05d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c @@ -0,0 +1,585 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "core_types.h" + +#include "reg_helper.h" +#include "dcn10/dcn10_dpp.h" +#include "basics/conversion.h" + +#define NUM_PHASES 64 +#define HORZ_MAX_TAPS 8 +#define VERT_MAX_TAPS 8 + +#define BLACK_OFFSET_RGB_Y 0x0 +#define BLACK_OFFSET_CBCR 0x8000 + +#define REG(reg)\ + dpp->tf_regs->reg + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + +enum pixel_format_description { + PIXEL_FORMAT_FIXED = 0, + PIXEL_FORMAT_FIXED16, + PIXEL_FORMAT_FLOAT + +}; + +enum dcn10_coef_filter_type_sel { + SCL_COEF_LUMA_VERT_FILTER = 0, + SCL_COEF_LUMA_HORZ_FILTER = 1, + SCL_COEF_CHROMA_VERT_FILTER = 2, + SCL_COEF_CHROMA_HORZ_FILTER = 3, + SCL_COEF_ALPHA_VERT_FILTER = 4, + SCL_COEF_ALPHA_HORZ_FILTER = 5 +}; + +enum dscl_autocal_mode { + AUTOCAL_MODE_OFF = 0, + + /* Autocal calculate the scaling ratio and initial phase and the + * DSCL_MODE_SEL must be set to 1 + */ + AUTOCAL_MODE_AUTOSCALE = 1, + /* Autocal perform auto centering without replication and the + * DSCL_MODE_SEL must be set to 0 + */ + AUTOCAL_MODE_AUTOCENTER = 2, + /* Autocal perform auto centering and auto replication and the + * DSCL_MODE_SEL must be set to 0 + */ + AUTOCAL_MODE_AUTOREPLICATE = 3 +}; + +enum dscl_mode_sel { + DSCL_MODE_SCALING_444_BYPASS = 0, + DSCL_MODE_SCALING_444_RGB_ENABLE = 1, + DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, + DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, + DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, + DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, + DSCL_MODE_DSCL_BYPASS = 6 +}; + +void dpp_read_state(struct dpp *dpp_base, + struct dcn_dpp_state *s) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_GET(DPP_CONTROL, + DPP_CLOCK_ENABLE, &s->is_enabled); + REG_GET(CM_IGAM_CONTROL, + CM_IGAM_LUT_MODE, &s->igam_lut_mode); + REG_GET(CM_IGAM_CONTROL, + CM_IGAM_INPUT_FORMAT, &s->igam_input_format); + REG_GET(CM_DGAM_CONTROL, + CM_DGAM_LUT_MODE, &s->dgam_lut_mode); + REG_GET(CM_RGAM_CONTROL, + CM_RGAM_LUT_MODE, &s->rgam_lut_mode); + REG_GET(CM_GAMUT_REMAP_CONTROL, + CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode); + + if (s->gamut_remap_mode) { + s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12); + s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14); + s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22); + s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24); + s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32); + s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34); + } +} + +#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) + +bool dpp1_get_optimal_number_of_taps( + struct dpp *dpp, + struct scaler_data *scl_data, + const struct scaling_taps *in_taps) +{ + /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ + if (scl_data->format == PIXEL_FORMAT_FP16 && + dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && + scl_data->ratios.horz.value != dc_fixpt_one.value && + scl_data->ratios.vert.value != dc_fixpt_one.value) + return false; + + if (scl_data->viewport.width > scl_data->h_active && + dpp->ctx->dc->debug.max_downscale_src_width != 0 && + scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) + return false; + + /* TODO: add lb check */ + + /* No support for programming ratio of 4, drop to 3.99999.. */ + if (scl_data->ratios.horz.value == (4ll << 32)) + scl_data->ratios.horz.value--; + if (scl_data->ratios.vert.value == (4ll << 32)) + scl_data->ratios.vert.value--; + if (scl_data->ratios.horz_c.value == (4ll << 32)) + scl_data->ratios.horz_c.value--; + if (scl_data->ratios.vert_c.value == (4ll << 32)) + scl_data->ratios.vert_c.value--; + + /* Set default taps if none are provided */ + if (in_taps->h_taps == 0) + scl_data->taps.h_taps = 4; + else + scl_data->taps.h_taps = in_taps->h_taps; + if (in_taps->v_taps == 0) + scl_data->taps.v_taps = 4; + else + scl_data->taps.v_taps = in_taps->v_taps; + if (in_taps->v_taps_c == 0) + scl_data->taps.v_taps_c = 2; + else + scl_data->taps.v_taps_c = in_taps->v_taps_c; + if (in_taps->h_taps_c == 0) + scl_data->taps.h_taps_c = 2; + /* Only 1 and even h_taps_c are supported by hw */ + else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) + scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; + else + scl_data->taps.h_taps_c = in_taps->h_taps_c; + + if (!dpp->ctx->dc->debug.always_scale) { + if (IDENTITY_RATIO(scl_data->ratios.horz)) + scl_data->taps.h_taps = 1; + if (IDENTITY_RATIO(scl_data->ratios.vert)) + scl_data->taps.v_taps = 1; + if (IDENTITY_RATIO(scl_data->ratios.horz_c)) + scl_data->taps.h_taps_c = 1; + if (IDENTITY_RATIO(scl_data->ratios.vert_c)) + scl_data->taps.v_taps_c = 1; + } + + return true; +} + +void dpp_reset(struct dpp *dpp_base) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + dpp->filter_h_c = NULL; + dpp->filter_v_c = NULL; + dpp->filter_h = NULL; + dpp->filter_v = NULL; + + memset(&dpp->scl_data, 0, sizeof(dpp->scl_data)); + memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data)); +} + + + +static void dpp1_cm_set_regamma_pwl( + struct dpp *dpp_base, const struct pwl_params *params, enum opp_regamma mode) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + uint32_t re_mode = 0; + + switch (mode) { + case OPP_REGAMMA_BYPASS: + re_mode = 0; + break; + case OPP_REGAMMA_SRGB: + re_mode = 1; + break; + case OPP_REGAMMA_XVYCC: + re_mode = 2; + break; + case OPP_REGAMMA_USER: + re_mode = dpp->is_write_to_ram_a_safe ? 4 : 3; + if (memcmp(&dpp->pwl_data, params, sizeof(*params)) == 0) + break; + + dpp1_cm_power_on_regamma_lut(dpp_base, true); + dpp1_cm_configure_regamma_lut(dpp_base, dpp->is_write_to_ram_a_safe); + + if (dpp->is_write_to_ram_a_safe) + dpp1_cm_program_regamma_luta_settings(dpp_base, params); + else + dpp1_cm_program_regamma_lutb_settings(dpp_base, params); + + dpp1_cm_program_regamma_lut(dpp_base, params->rgb_resulted, + params->hw_points_num); + dpp->pwl_data = *params; + + re_mode = dpp->is_write_to_ram_a_safe ? 3 : 4; + dpp->is_write_to_ram_a_safe = !dpp->is_write_to_ram_a_safe; + break; + default: + break; + } + REG_SET(CM_RGAM_CONTROL, 0, CM_RGAM_LUT_MODE, re_mode); +} + +static void dpp1_setup_format_flags(enum surface_pixel_format input_format,\ + enum pixel_format_description *fmt) +{ + + if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F || + input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) + *fmt = PIXEL_FORMAT_FLOAT; + else if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 || + input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616) + *fmt = PIXEL_FORMAT_FIXED16; + else + *fmt = PIXEL_FORMAT_FIXED; +} + +static void dpp1_set_degamma_format_float( + struct dpp *dpp_base, + bool is_float) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + if (is_float) { + REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 3); + REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 1); + } else { + REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 2); + REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 0); + } +} + +void dpp1_cnv_setup ( + struct dpp *dpp_base, + enum surface_pixel_format format, + enum expansion_mode mode, + struct dc_csc_transform input_csc_color_matrix, + enum dc_color_space input_color_space, + struct cnv_alpha_2bit_lut *alpha_2bit_lut) +{ + uint32_t pixel_format; + uint32_t alpha_en; + enum pixel_format_description fmt ; + enum dc_color_space color_space; + enum dcn10_input_csc_select select; + bool is_float; + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + bool force_disable_cursor = false; + struct out_csc_color_matrix tbl_entry; + int i = 0; + + dpp1_setup_format_flags(format, &fmt); + alpha_en = 1; + pixel_format = 0; + color_space = COLOR_SPACE_SRGB; + select = INPUT_CSC_SELECT_BYPASS; + is_float = false; + + switch (fmt) { + case PIXEL_FORMAT_FIXED: + case PIXEL_FORMAT_FIXED16: + /*when output is float then FORMAT_CONTROL__OUTPUT_FP=1*/ + REG_SET_3(FORMAT_CONTROL, 0, + CNVC_BYPASS, 0, + FORMAT_EXPANSION_MODE, mode, + OUTPUT_FP, 0); + break; + case PIXEL_FORMAT_FLOAT: + REG_SET_3(FORMAT_CONTROL, 0, + CNVC_BYPASS, 0, + FORMAT_EXPANSION_MODE, mode, + OUTPUT_FP, 1); + is_float = true; + break; + default: + + break; + } + + dpp1_set_degamma_format_float(dpp_base, is_float); + + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + pixel_format = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + pixel_format = 3; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + pixel_format = 8; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + pixel_format = 10; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + force_disable_cursor = false; + pixel_format = 65; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + force_disable_cursor = true; + pixel_format = 64; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + force_disable_cursor = true; + pixel_format = 67; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + force_disable_cursor = true; + pixel_format = 66; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + pixel_format = 26; /* ARGB16161616_UNORM */ + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + pixel_format = 24; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + pixel_format = 25; + break; + default: + break; + } + + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + + REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, + CNVC_SURFACE_PIXEL_FORMAT, pixel_format); + REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); + + // if input adjustments exist, program icsc with those values + + if (input_csc_color_matrix.enable_adjustment + == true) { + for (i = 0; i < 12; i++) + tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; + + tbl_entry.color_space = color_space; + + if (color_space >= COLOR_SPACE_YCBCR601) + select = INPUT_CSC_SELECT_ICSC; + else + select = INPUT_CSC_SELECT_BYPASS; + + dpp1_program_input_csc(dpp_base, color_space, select, &tbl_entry); + } else + dpp1_program_input_csc(dpp_base, color_space, select, NULL); + + if (force_disable_cursor) { + REG_UPDATE(CURSOR_CONTROL, + CURSOR_ENABLE, 0); + REG_UPDATE(CURSOR0_CONTROL, + CUR0_ENABLE, 0); + } +} + +void dpp1_set_cursor_attributes( + struct dpp *dpp_base, + struct dc_cursor_attributes *cursor_attributes) +{ + enum dc_cursor_color_format color_format = cursor_attributes->color_format; + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_UPDATE_2(CURSOR0_CONTROL, + CUR0_MODE, color_format, + CUR0_EXPANSION_MODE, 0); + + if (color_format == CURSOR_MODE_MONO) { + /* todo: clarify what to program these to */ + REG_UPDATE(CURSOR0_COLOR0, + CUR0_COLOR0, 0x00000000); + REG_UPDATE(CURSOR0_COLOR1, + CUR0_COLOR1, 0xFFFFFFFF); + } +} + + +void dpp1_set_cursor_position( + struct dpp *dpp_base, + const struct dc_cursor_position *pos, + const struct dc_cursor_mi_param *param, + uint32_t width, + uint32_t height) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + int x_pos = pos->x - param->viewport.x; + int y_pos = pos->y - param->viewport.y; + int x_hotspot = pos->x_hotspot; + int y_hotspot = pos->y_hotspot; + int src_x_offset = x_pos - pos->x_hotspot; + int src_y_offset = y_pos - pos->y_hotspot; + int cursor_height = (int)height; + int cursor_width = (int)width; + uint32_t cur_en = pos->enable ? 1 : 0; + + // Transform cursor width / height and hotspots for offset calculations + if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) { + swap(cursor_height, cursor_width); + swap(x_hotspot, y_hotspot); + + if (param->rotation == ROTATION_ANGLE_90) { + // hotspot = (-y, x) + src_x_offset = x_pos - (cursor_width - x_hotspot); + src_y_offset = y_pos - y_hotspot; + } else if (param->rotation == ROTATION_ANGLE_270) { + // hotspot = (y, -x) + src_x_offset = x_pos - x_hotspot; + src_y_offset = y_pos - (cursor_height - y_hotspot); + } + } else if (param->rotation == ROTATION_ANGLE_180) { + // hotspot = (-x, -y) + if (!param->mirror) + src_x_offset = x_pos - (cursor_width - x_hotspot); + + src_y_offset = y_pos - (cursor_height - y_hotspot); + } + + if (src_x_offset >= (int)param->viewport.width) + cur_en = 0; /* not visible beyond right edge*/ + + if (src_x_offset + cursor_width <= 0) + cur_en = 0; /* not visible beyond left edge*/ + + if (src_y_offset >= (int)param->viewport.height) + cur_en = 0; /* not visible beyond bottom edge*/ + + if (src_y_offset + cursor_height <= 0) + cur_en = 0; /* not visible beyond top edge*/ + + REG_UPDATE(CURSOR0_CONTROL, + CUR0_ENABLE, cur_en); + + dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; +} + +void dpp1_cnv_set_optional_cursor_attributes( + struct dpp *dpp_base, + struct dpp_cursor_attributes *attr) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + if (attr) { + REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias); + REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale); + } +} + +void dpp1_dppclk_control( + struct dpp *dpp_base, + bool dppclk_div, + bool enable) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + if (enable) { + if (dpp->tf_mask->DPPCLK_RATE_CONTROL) + REG_UPDATE_2(DPP_CONTROL, + DPPCLK_RATE_CONTROL, dppclk_div, + DPP_CLOCK_ENABLE, 1); + else + REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 1); + } else + REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0); +} + +static const struct dpp_funcs dcn10_dpp_funcs = { + .dpp_read_state = dpp_read_state, + .dpp_reset = dpp_reset, + .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, + .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, + .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, + .dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment, + .dpp_set_csc_default = dpp1_cm_set_output_csc_default, + .dpp_power_on_regamma_lut = dpp1_cm_power_on_regamma_lut, + .dpp_program_regamma_lut = dpp1_cm_program_regamma_lut, + .dpp_configure_regamma_lut = dpp1_cm_configure_regamma_lut, + .dpp_program_regamma_lutb_settings = dpp1_cm_program_regamma_lutb_settings, + .dpp_program_regamma_luta_settings = dpp1_cm_program_regamma_luta_settings, + .dpp_program_regamma_pwl = dpp1_cm_set_regamma_pwl, + .dpp_program_bias_and_scale = dpp1_program_bias_and_scale, + .dpp_set_degamma = dpp1_set_degamma, + .dpp_program_input_lut = dpp1_program_input_lut, + .dpp_program_degamma_pwl = dpp1_set_degamma_pwl, + .dpp_setup = dpp1_cnv_setup, + .dpp_full_bypass = dpp1_full_bypass, + .set_cursor_attributes = dpp1_set_cursor_attributes, + .set_cursor_position = dpp1_set_cursor_position, + .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, + .dpp_dppclk_control = dpp1_dppclk_control, + .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier, + .dpp_program_blnd_lut = NULL, + .dpp_program_shaper_lut = NULL, + .dpp_program_3dlut = NULL, + .dpp_get_gamut_remap = dpp1_cm_get_gamut_remap, +}; + +static struct dpp_caps dcn10_dpp_cap = { + .dscl_data_proc_format = DSCL_DATA_PRCESSING_FIXED_FORMAT, + .dscl_calc_lb_num_partitions = dpp1_dscl_calc_lb_num_partitions, +}; + +/*****************************************/ +/* Constructor, Destructor */ +/*****************************************/ + +void dpp1_construct( + struct dcn10_dpp *dpp, + struct dc_context *ctx, + uint32_t inst, + const struct dcn_dpp_registers *tf_regs, + const struct dcn_dpp_shift *tf_shift, + const struct dcn_dpp_mask *tf_mask) +{ + dpp->base.ctx = ctx; + + dpp->base.inst = inst; + dpp->base.funcs = &dcn10_dpp_funcs; + dpp->base.caps = &dcn10_dpp_cap; + + dpp->tf_regs = tf_regs; + dpp->tf_shift = tf_shift; + dpp->tf_mask = tf_mask; + + dpp->lb_pixel_depth_supported = + LB_PIXEL_DEPTH_18BPP | + LB_PIXEL_DEPTH_24BPP | + LB_PIXEL_DEPTH_30BPP | + LB_PIXEL_DEPTH_36BPP; + + dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY; + dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/ +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h new file mode 100644 index 0000000000..c48139bed1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h @@ -0,0 +1,1528 @@ +/* Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DPP_DCN10_H__ +#define __DAL_DPP_DCN10_H__ + +#include "dpp.h" + +#define TO_DCN10_DPP(dpp)\ + container_of(dpp, struct dcn10_dpp, base) + +/* TODO: Use correct number of taps. Using polaris values for now */ +#define LB_TOTAL_NUMBER_OF_ENTRIES 5124 +#define LB_BITS_PER_ENTRY 144 + +#define TF_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +//Used to resolve corner case +#define TF2_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## _ ## field_name ## post_fix + +#define TF_REG_LIST_DCN(id) \ + SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\ + SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\ + SRI(CM_GAMUT_REMAP_C13_C14, CM, id),\ + SRI(CM_GAMUT_REMAP_C21_C22, CM, id),\ + SRI(CM_GAMUT_REMAP_C23_C24, CM, id),\ + SRI(CM_GAMUT_REMAP_C31_C32, CM, id),\ + SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\ + SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \ + SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \ + SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \ + SRI(DSCL_MEM_PWR_CTRL, DSCL, id), \ + SRI(OTG_H_BLANK, DSCL, id), \ + SRI(OTG_V_BLANK, DSCL, id), \ + SRI(SCL_MODE, DSCL, id), \ + SRI(LB_DATA_FORMAT, DSCL, id), \ + SRI(LB_MEMORY_CTRL, DSCL, id), \ + SRI(DSCL_AUTOCAL, DSCL, id), \ + SRI(DSCL_CONTROL, DSCL, id), \ + SRI(SCL_BLACK_OFFSET, DSCL, id), \ + SRI(SCL_TAP_CONTROL, DSCL, id), \ + SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \ + SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \ + SRI(DSCL_2TAP_CONTROL, DSCL, id), \ + SRI(MPC_SIZE, DSCL, id), \ + SRI(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \ + SRI(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \ + SRI(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \ + SRI(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \ + SRI(SCL_HORZ_FILTER_INIT, DSCL, id), \ + SRI(SCL_HORZ_FILTER_INIT_C, DSCL, id), \ + SRI(SCL_VERT_FILTER_INIT, DSCL, id), \ + SRI(SCL_VERT_FILTER_INIT_BOT, DSCL, id), \ + SRI(SCL_VERT_FILTER_INIT_C, DSCL, id), \ + SRI(SCL_VERT_FILTER_INIT_BOT_C, DSCL, id), \ + SRI(RECOUT_START, DSCL, id), \ + SRI(RECOUT_SIZE, DSCL, id), \ + SRI(CM_ICSC_CONTROL, CM, id), \ + SRI(CM_ICSC_C11_C12, CM, id), \ + SRI(CM_ICSC_C33_C34, CM, id), \ + SRI(CM_DGAM_RAMB_START_CNTL_B, CM, id), \ + SRI(CM_DGAM_RAMB_START_CNTL_G, CM, id), \ + SRI(CM_DGAM_RAMB_START_CNTL_R, CM, id), \ + SRI(CM_DGAM_RAMB_SLOPE_CNTL_B, CM, id), \ + SRI(CM_DGAM_RAMB_SLOPE_CNTL_G, CM, id), \ + SRI(CM_DGAM_RAMB_SLOPE_CNTL_R, CM, id), \ + SRI(CM_DGAM_RAMB_END_CNTL1_B, CM, id), \ + SRI(CM_DGAM_RAMB_END_CNTL2_B, CM, id), \ + SRI(CM_DGAM_RAMB_END_CNTL1_G, CM, id), \ + SRI(CM_DGAM_RAMB_END_CNTL2_G, CM, id), \ + SRI(CM_DGAM_RAMB_END_CNTL1_R, CM, id), \ + SRI(CM_DGAM_RAMB_END_CNTL2_R, CM, id), \ + SRI(CM_DGAM_RAMB_REGION_0_1, CM, id), \ + SRI(CM_DGAM_RAMB_REGION_14_15, CM, id), \ + SRI(CM_DGAM_RAMA_START_CNTL_B, CM, id), \ + SRI(CM_DGAM_RAMA_START_CNTL_G, CM, id), \ + SRI(CM_DGAM_RAMA_START_CNTL_R, CM, id), \ + SRI(CM_DGAM_RAMA_SLOPE_CNTL_B, CM, id), \ + SRI(CM_DGAM_RAMA_SLOPE_CNTL_G, CM, id), \ + SRI(CM_DGAM_RAMA_SLOPE_CNTL_R, CM, id), \ + SRI(CM_DGAM_RAMA_END_CNTL1_B, CM, id), \ + SRI(CM_DGAM_RAMA_END_CNTL2_B, CM, id), \ + SRI(CM_DGAM_RAMA_END_CNTL1_G, CM, id), \ + SRI(CM_DGAM_RAMA_END_CNTL2_G, CM, id), \ + SRI(CM_DGAM_RAMA_END_CNTL1_R, CM, id), \ + SRI(CM_DGAM_RAMA_END_CNTL2_R, CM, id), \ + SRI(CM_DGAM_RAMA_REGION_0_1, CM, id), \ + SRI(CM_DGAM_RAMA_REGION_14_15, CM, id), \ + SRI(CM_MEM_PWR_CTRL, CM, id), \ + SRI(CM_DGAM_LUT_WRITE_EN_MASK, CM, id), \ + SRI(CM_DGAM_LUT_INDEX, CM, id), \ + SRI(CM_DGAM_LUT_DATA, CM, id), \ + SRI(CM_CONTROL, CM, id), \ + SRI(CM_DGAM_CONTROL, CM, id), \ + SRI(CM_TEST_DEBUG_INDEX, CM, id), \ + SRI(CM_TEST_DEBUG_DATA, CM, id), \ + SRI(FORMAT_CONTROL, CNVC_CFG, id), \ + SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ + SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ + SRI(CURSOR0_COLOR0, CNVC_CUR, id), \ + SRI(CURSOR0_COLOR1, CNVC_CUR, id), \ + SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \ + SRI(DPP_CONTROL, DPP_TOP, id), \ + SRI(CM_HDR_MULT_COEF, CM, id) + + + +#define TF_REG_LIST_DCN10(id) \ + TF_REG_LIST_DCN(id), \ + SRI(CM_COMA_C11_C12, CM, id),\ + SRI(CM_COMA_C33_C34, CM, id),\ + SRI(CM_COMB_C11_C12, CM, id),\ + SRI(CM_COMB_C33_C34, CM, id),\ + SRI(CM_OCSC_CONTROL, CM, id), \ + SRI(CM_OCSC_C11_C12, CM, id), \ + SRI(CM_OCSC_C33_C34, CM, id), \ + SRI(CM_BNS_VALUES_R, CM, id), \ + SRI(CM_BNS_VALUES_G, CM, id), \ + SRI(CM_BNS_VALUES_B, CM, id), \ + SRI(CM_MEM_PWR_CTRL, CM, id), \ + SRI(CM_RGAM_LUT_DATA, CM, id), \ + SRI(CM_RGAM_LUT_WRITE_EN_MASK, CM, id),\ + SRI(CM_RGAM_LUT_INDEX, CM, id), \ + SRI(CM_RGAM_RAMB_START_CNTL_B, CM, id), \ + SRI(CM_RGAM_RAMB_START_CNTL_G, CM, id), \ + SRI(CM_RGAM_RAMB_START_CNTL_R, CM, id), \ + SRI(CM_RGAM_RAMB_SLOPE_CNTL_B, CM, id), \ + SRI(CM_RGAM_RAMB_SLOPE_CNTL_G, CM, id), \ + SRI(CM_RGAM_RAMB_SLOPE_CNTL_R, CM, id), \ + SRI(CM_RGAM_RAMB_END_CNTL1_B, CM, id), \ + SRI(CM_RGAM_RAMB_END_CNTL2_B, CM, id), \ + SRI(CM_RGAM_RAMB_END_CNTL1_G, CM, id), \ + SRI(CM_RGAM_RAMB_END_CNTL2_G, CM, id), \ + SRI(CM_RGAM_RAMB_END_CNTL1_R, CM, id), \ + SRI(CM_RGAM_RAMB_END_CNTL2_R, CM, id), \ + SRI(CM_RGAM_RAMB_REGION_0_1, CM, id), \ + SRI(CM_RGAM_RAMB_REGION_32_33, CM, id), \ + SRI(CM_RGAM_RAMA_START_CNTL_B, CM, id), \ + SRI(CM_RGAM_RAMA_START_CNTL_G, CM, id), \ + SRI(CM_RGAM_RAMA_START_CNTL_R, CM, id), \ + SRI(CM_RGAM_RAMA_SLOPE_CNTL_B, CM, id), \ + SRI(CM_RGAM_RAMA_SLOPE_CNTL_G, CM, id), \ + SRI(CM_RGAM_RAMA_SLOPE_CNTL_R, CM, id), \ + SRI(CM_RGAM_RAMA_END_CNTL1_B, CM, id), \ + SRI(CM_RGAM_RAMA_END_CNTL2_B, CM, id), \ + SRI(CM_RGAM_RAMA_END_CNTL1_G, CM, id), \ + SRI(CM_RGAM_RAMA_END_CNTL2_G, CM, id), \ + SRI(CM_RGAM_RAMA_END_CNTL1_R, CM, id), \ + SRI(CM_RGAM_RAMA_END_CNTL2_R, CM, id), \ + SRI(CM_RGAM_RAMA_REGION_0_1, CM, id), \ + SRI(CM_RGAM_RAMA_REGION_32_33, CM, id), \ + SRI(CM_RGAM_CONTROL, CM, id), \ + SRI(CM_IGAM_CONTROL, CM, id), \ + SRI(CM_IGAM_LUT_RW_CONTROL, CM, id), \ + SRI(CM_IGAM_LUT_RW_INDEX, CM, id), \ + SRI(CM_IGAM_LUT_SEQ_COLOR, CM, id), \ + SRI(CURSOR_CONTROL, CURSOR, id), \ + SRI(CM_CMOUT_CONTROL, CM, id) + + +#define TF_REG_LIST_SH_MASK_DCN(mask_sh)\ + TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C13, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C14, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C21, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C22, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C23, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C24, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C31, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C32, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh),\ + TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_START, mask_sh),\ + TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_END, mask_sh),\ + TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_START, mask_sh),\ + TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_END, mask_sh),\ + TF_SF(DSCL0_LB_DATA_FORMAT, INTERLEAVE_EN, mask_sh),\ + TF2_SF(DSCL0, LB_DATA_FORMAT__ALPHA_EN, mask_sh),\ + TF_SF(DSCL0_LB_MEMORY_CTRL, MEMORY_CONFIG, mask_sh),\ + TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\ + TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\ + TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\ + TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\ + TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\ + TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_RGB_Y, mask_sh),\ + TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_CBCR, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS_C, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS_C, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_PHASE, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_FILTER_TYPE, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_FACTOR, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_FACTOR, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, DSCL_MODE, mask_sh),\ + TF_SF(DSCL0_RECOUT_START, RECOUT_START_X, mask_sh),\ + TF_SF(DSCL0_RECOUT_START, RECOUT_START_Y, mask_sh),\ + TF_SF(DSCL0_RECOUT_SIZE, RECOUT_WIDTH, mask_sh),\ + TF_SF(DSCL0_RECOUT_SIZE, RECOUT_HEIGHT, mask_sh),\ + TF_SF(DSCL0_MPC_SIZE, MPC_WIDTH, mask_sh),\ + TF_SF(DSCL0_MPC_SIZE, MPC_HEIGHT, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C, SCL_H_SCALE_RATIO_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C, SCL_V_SCALE_RATIO_C, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_FRAC_C, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_INT_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT, SCL_V_INIT_FRAC_BOT, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT, SCL_V_INIT_INT_BOT, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_FRAC_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_INT_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT_C, SCL_V_INIT_FRAC_BOT_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT_C, SCL_V_INIT_INT_BOT_C, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, SCL_CHROMA_COEF_MODE, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT_CURRENT, mask_sh), \ + TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh), \ + TF_SF(DSCL0_DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, mask_sh), \ + TF_SF(CM0_CM_ICSC_CONTROL, CM_ICSC_MODE, mask_sh), \ + TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C11, mask_sh), \ + TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C12, mask_sh), \ + TF_SF(CM0_CM_ICSC_C33_C34, CM_ICSC_C33, mask_sh), \ + TF_SF(CM0_CM_ICSC_C33_C34, CM_ICSC_C34, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_B, CM_DGAM_RAMB_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_B, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_G, CM_DGAM_RAMB_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_G, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_R, CM_DGAM_RAMB_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_R, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_B, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_G, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_R, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_B, CM_DGAM_RAMB_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_B, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_B, CM_DGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_G, CM_DGAM_RAMB_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_G, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_G, CM_DGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_R, CM_DGAM_RAMB_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_R, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_R, CM_DGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_B, CM_DGAM_RAMA_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_B, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_G, CM_DGAM_RAMA_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_G, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_R, CM_DGAM_RAMA_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_R, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_B, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_G, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_R, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_B, CM_DGAM_RAMA_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_B, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_B, CM_DGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_G, CM_DGAM_RAMA_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_G, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_G, CM_DGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_R, CM_DGAM_RAMA_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_R, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_R, CM_DGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_MEM_PWR_CTRL, SHARED_MEM_PWR_DIS, mask_sh), \ + TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, mask_sh), \ + TF_SF(CM0_CM_DGAM_LUT_INDEX, CM_DGAM_LUT_INDEX, mask_sh), \ + TF_SF(CM0_CM_DGAM_LUT_DATA, CM_DGAM_LUT_DATA, mask_sh), \ + TF_SF(CM0_CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, mask_sh), \ + TF_SF(CM0_CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_INDEX, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \ + TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \ + TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \ + TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \ + TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh) + +#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\ + TF_REG_LIST_SH_MASK_DCN(mask_sh),\ + TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_DEPTH, mask_sh),\ + TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_EXPAN_MODE, mask_sh),\ + TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_REDUCE_MODE, mask_sh),\ + TF_SF(DSCL0_LB_DATA_FORMAT, DYNAMIC_PIXEL_DEPTH, mask_sh),\ + TF_SF(DSCL0_LB_DATA_FORMAT, DITHER_EN, mask_sh),\ + TF_SF(CM0_CM_COMA_C11_C12, CM_COMA_C11, mask_sh),\ + TF_SF(CM0_CM_COMA_C11_C12, CM_COMA_C12, mask_sh),\ + TF_SF(CM0_CM_COMA_C33_C34, CM_COMA_C33, mask_sh),\ + TF_SF(CM0_CM_COMA_C33_C34, CM_COMA_C34, mask_sh),\ + TF_SF(CM0_CM_COMB_C11_C12, CM_COMB_C11, mask_sh),\ + TF_SF(CM0_CM_COMB_C11_C12, CM_COMB_C12, mask_sh),\ + TF_SF(CM0_CM_COMB_C33_C34, CM_COMB_C33, mask_sh),\ + TF_SF(CM0_CM_COMB_C33_C34, CM_COMB_C34, mask_sh),\ + TF_SF(CM0_CM_OCSC_CONTROL, CM_OCSC_MODE, mask_sh), \ + TF_SF(CM0_CM_OCSC_C11_C12, CM_OCSC_C11, mask_sh), \ + TF_SF(CM0_CM_OCSC_C11_C12, CM_OCSC_C12, mask_sh), \ + TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C33, mask_sh), \ + TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C34, mask_sh), \ + TF_SF(CM0_CM_BNS_VALUES_R, CM_BNS_BIAS_R, mask_sh), \ + TF_SF(CM0_CM_BNS_VALUES_G, CM_BNS_BIAS_G, mask_sh), \ + TF_SF(CM0_CM_BNS_VALUES_B, CM_BNS_BIAS_B, mask_sh), \ + TF_SF(CM0_CM_BNS_VALUES_R, CM_BNS_SCALE_R, mask_sh), \ + TF_SF(CM0_CM_BNS_VALUES_G, CM_BNS_SCALE_G, mask_sh), \ + TF_SF(CM0_CM_BNS_VALUES_B, CM_BNS_SCALE_B, mask_sh), \ + TF_SF(CM0_CM_MEM_PWR_CTRL, RGAM_MEM_PWR_FORCE, mask_sh), \ + TF_SF(CM0_CM_RGAM_LUT_DATA, CM_RGAM_LUT_DATA, mask_sh), \ + TF_SF(CM0_CM_RGAM_LUT_WRITE_EN_MASK, CM_RGAM_LUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_RGAM_LUT_WRITE_EN_MASK, CM_RGAM_LUT_WRITE_SEL, mask_sh), \ + TF_SF(CM0_CM_RGAM_LUT_INDEX, CM_RGAM_LUT_INDEX, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_B, CM_RGAM_RAMB_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_B, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_G, CM_RGAM_RAMB_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_G, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_R, CM_RGAM_RAMB_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_R, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_B, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_G, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_R, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_B, CM_RGAM_RAMB_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_B, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_B, CM_RGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_G, CM_RGAM_RAMB_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_G, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_G, CM_RGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_R, CM_RGAM_RAMB_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_R, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_R, CM_RGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_B, CM_RGAM_RAMA_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_B, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_G, CM_RGAM_RAMA_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_G, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_R, CM_RGAM_RAMA_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_R, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_B, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_G, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_R, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_B, CM_RGAM_RAMA_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_B, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_B, CM_RGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_G, CM_RGAM_RAMA_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_G, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_G, CM_RGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_R, CM_RGAM_RAMA_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_R, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_R, CM_RGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_CONTROL, CM_RGAM_LUT_MODE, mask_sh), \ + TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, mask_sh), \ + TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_R, mask_sh), \ + TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_G, mask_sh), \ + TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_B, mask_sh), \ + TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, mask_sh), \ + TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, mask_sh), \ + TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, mask_sh), \ + TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, mask_sh), \ + TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, mask_sh), \ + TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, mask_sh), \ + TF_SF(CM0_CM_CONTROL, CM_BYPASS_EN, mask_sh), \ + TF_SF(CM0_CM_IGAM_LUT_SEQ_COLOR, CM_IGAM_LUT_SEQ_COLOR, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh), \ + TF_SF(CM0_CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, mask_sh), \ + TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ + TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \ + TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \ + TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ + TF_SF(DPP_TOP0_DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh) + +/* + * + DCN1 CM debug status register definition + + register :ID9_CM_STATUS do + implement_ref :cm + map to: :cmdebugind, at: j + width 32 + disclosure NEVER + + field :ID9_VUPDATE_CFG, [0], R + field :ID9_IGAM_LUT_MODE, [2..1], R + field :ID9_BNS_BYPASS, [3], R + field :ID9_ICSC_MODE, [5..4], R + field :ID9_DGAM_LUT_MODE, [8..6], R + field :ID9_HDR_BYPASS, [9], R + field :ID9_GAMUT_REMAP_MODE, [11..10], R + field :ID9_RGAM_LUT_MODE, [14..12], R + #1 free bit + field :ID9_OCSC_MODE, [18..16], R + field :ID9_DENORM_MODE, [21..19], R + field :ID9_ROUND_TRUNC_MODE, [25..22], R + field :ID9_DITHER_EN, [26], R + field :ID9_DITHER_MODE, [28..27], R + end +*/ + +#define TF_DEBUG_REG_LIST_SH_DCN10 \ + .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 4, \ + .CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 16 + +#define TF_DEBUG_REG_LIST_MASK_DCN10 \ + .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x30, \ + .CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 0x70000 + +#define TF_REG_FIELD_LIST(type) \ + type EXT_OVERSCAN_LEFT; \ + type EXT_OVERSCAN_RIGHT; \ + type EXT_OVERSCAN_BOTTOM; \ + type EXT_OVERSCAN_TOP; \ + type OTG_H_BLANK_START; \ + type OTG_H_BLANK_END; \ + type OTG_V_BLANK_START; \ + type OTG_V_BLANK_END; \ + type PIXEL_DEPTH; \ + type PIXEL_EXPAN_MODE; \ + type PIXEL_REDUCE_MODE; \ + type DYNAMIC_PIXEL_DEPTH; \ + type DITHER_EN; \ + type INTERLEAVE_EN; \ + type LB_DATA_FORMAT__ALPHA_EN; \ + type MEMORY_CONFIG; \ + type LB_MAX_PARTITIONS; \ + type AUTOCAL_MODE; \ + type AUTOCAL_NUM_PIPE; \ + type AUTOCAL_PIPE_ID; \ + type SCL_BOUNDARY_MODE; \ + type SCL_BLACK_OFFSET_RGB_Y; \ + type SCL_BLACK_OFFSET_CBCR; \ + type SCL_V_NUM_TAPS; \ + type SCL_H_NUM_TAPS; \ + type SCL_V_NUM_TAPS_C; \ + type SCL_H_NUM_TAPS_C; \ + type SCL_COEF_RAM_TAP_PAIR_IDX; \ + type SCL_COEF_RAM_PHASE; \ + type SCL_COEF_RAM_FILTER_TYPE; \ + type SCL_COEF_RAM_EVEN_TAP_COEF; \ + type SCL_COEF_RAM_EVEN_TAP_COEF_EN; \ + type SCL_COEF_RAM_ODD_TAP_COEF; \ + type SCL_COEF_RAM_ODD_TAP_COEF_EN; \ + type SCL_H_2TAP_HARDCODE_COEF_EN; \ + type SCL_H_2TAP_SHARP_EN; \ + type SCL_H_2TAP_SHARP_FACTOR; \ + type SCL_V_2TAP_HARDCODE_COEF_EN; \ + type SCL_V_2TAP_SHARP_EN; \ + type SCL_V_2TAP_SHARP_FACTOR; \ + type SCL_COEF_RAM_SELECT; \ + type DSCL_MODE; \ + type RECOUT_START_X; \ + type RECOUT_START_Y; \ + type RECOUT_WIDTH; \ + type RECOUT_HEIGHT; \ + type MPC_WIDTH; \ + type MPC_HEIGHT; \ + type SCL_H_SCALE_RATIO; \ + type SCL_V_SCALE_RATIO; \ + type SCL_H_SCALE_RATIO_C; \ + type SCL_V_SCALE_RATIO_C; \ + type SCL_H_INIT_FRAC; \ + type SCL_H_INIT_INT; \ + type SCL_H_INIT_FRAC_C; \ + type SCL_H_INIT_INT_C; \ + type SCL_V_INIT_FRAC; \ + type SCL_V_INIT_INT; \ + type SCL_V_INIT_FRAC_BOT; \ + type SCL_V_INIT_INT_BOT; \ + type SCL_V_INIT_FRAC_C; \ + type SCL_V_INIT_INT_C; \ + type SCL_V_INIT_FRAC_BOT_C; \ + type SCL_V_INIT_INT_BOT_C; \ + type SCL_CHROMA_COEF_MODE; \ + type SCL_COEF_RAM_SELECT_CURRENT; \ + type LUT_MEM_PWR_FORCE; \ + type LUT_MEM_PWR_STATE; \ + type CM_GAMUT_REMAP_MODE; \ + type CM_GAMUT_REMAP_C11; \ + type CM_GAMUT_REMAP_C12; \ + type CM_GAMUT_REMAP_C13; \ + type CM_GAMUT_REMAP_C14; \ + type CM_GAMUT_REMAP_C21; \ + type CM_GAMUT_REMAP_C22; \ + type CM_GAMUT_REMAP_C23; \ + type CM_GAMUT_REMAP_C24; \ + type CM_GAMUT_REMAP_C31; \ + type CM_GAMUT_REMAP_C32; \ + type CM_GAMUT_REMAP_C33; \ + type CM_GAMUT_REMAP_C34; \ + type CM_COMA_C11; \ + type CM_COMA_C12; \ + type CM_COMA_C33; \ + type CM_COMA_C34; \ + type CM_COMB_C11; \ + type CM_COMB_C12; \ + type CM_COMB_C33; \ + type CM_COMB_C34; \ + type CM_OCSC_MODE; \ + type CM_OCSC_C11; \ + type CM_OCSC_C12; \ + type CM_OCSC_C33; \ + type CM_OCSC_C34; \ + type RGAM_MEM_PWR_FORCE; \ + type CM_RGAM_LUT_DATA; \ + type CM_RGAM_LUT_WRITE_EN_MASK; \ + type CM_RGAM_LUT_WRITE_SEL; \ + type CM_RGAM_LUT_INDEX; \ + type CM_RGAM_RAMB_EXP_REGION_START_B; \ + type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; \ + type CM_RGAM_RAMB_EXP_REGION_START_G; \ + type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_G; \ + type CM_RGAM_RAMB_EXP_REGION_START_R; \ + type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_R; \ + type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \ + type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \ + type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \ + type CM_RGAM_RAMB_EXP_REGION_END_B; \ + type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; \ + type CM_RGAM_RAMB_EXP_REGION_END_BASE_B; \ + type CM_RGAM_RAMB_EXP_REGION_END_G; \ + type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_G; \ + type CM_RGAM_RAMB_EXP_REGION_END_BASE_G; \ + type CM_RGAM_RAMB_EXP_REGION_END_R; \ + type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_R; \ + type CM_RGAM_RAMB_EXP_REGION_END_BASE_R; \ + type CM_RGAM_RAMB_EXP_REGION0_LUT_OFFSET; \ + type CM_RGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \ + type CM_RGAM_RAMB_EXP_REGION1_LUT_OFFSET; \ + type CM_RGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \ + type CM_RGAM_RAMB_EXP_REGION32_LUT_OFFSET; \ + type CM_RGAM_RAMB_EXP_REGION32_NUM_SEGMENTS; \ + type CM_RGAM_RAMB_EXP_REGION33_LUT_OFFSET; \ + type CM_RGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; \ + type CM_RGAM_RAMA_EXP_REGION_START_B; \ + type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_B; \ + type CM_RGAM_RAMA_EXP_REGION_START_G; \ + type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_G; \ + type CM_RGAM_RAMA_EXP_REGION_START_R; \ + type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_R; \ + type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \ + type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \ + type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \ + type CM_RGAM_RAMA_EXP_REGION_END_B; \ + type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_B; \ + type CM_RGAM_RAMA_EXP_REGION_END_BASE_B; \ + type CM_RGAM_RAMA_EXP_REGION_END_G; \ + type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_G; \ + type CM_RGAM_RAMA_EXP_REGION_END_BASE_G; \ + type CM_RGAM_RAMA_EXP_REGION_END_R; \ + type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_R; \ + type CM_RGAM_RAMA_EXP_REGION_END_BASE_R; \ + type CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; \ + type CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \ + type CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; \ + type CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \ + type CM_RGAM_RAMA_EXP_REGION32_LUT_OFFSET; \ + type CM_RGAM_RAMA_EXP_REGION32_NUM_SEGMENTS; \ + type CM_RGAM_RAMA_EXP_REGION33_LUT_OFFSET; \ + type CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \ + type CM_RGAM_LUT_MODE; \ + type CM_CMOUT_ROUND_TRUNC_MODE; \ + type CM_BLNDGAM_LUT_MODE; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_B; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_G; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_R; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R; \ + type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \ + type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \ + type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_B; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_G; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_R; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R; \ + type CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_B; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_G; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_R; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R; \ + type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \ + type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \ + type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_B; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_G; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_R; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R; \ + type CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \ + type CM_BLNDGAM_LUT_WRITE_EN_MASK; \ + type CM_BLNDGAM_LUT_WRITE_SEL; \ + type CM_BLNDGAM_CONFIG_STATUS; \ + type CM_BLNDGAM_LUT_INDEX; \ + type BLNDGAM_MEM_PWR_FORCE; \ + type CM_3DLUT_MODE; \ + type CM_3DLUT_SIZE; \ + type CM_3DLUT_INDEX; \ + type CM_3DLUT_DATA0; \ + type CM_3DLUT_DATA1; \ + type CM_3DLUT_DATA_30BIT; \ + type CM_3DLUT_WRITE_EN_MASK; \ + type CM_3DLUT_RAM_SEL; \ + type CM_3DLUT_30BIT_EN; \ + type CM_3DLUT_CONFIG_STATUS; \ + type CM_3DLUT_READ_SEL; \ + type CM_SHAPER_LUT_MODE; \ + type CM_SHAPER_RAMB_EXP_REGION_START_B; \ + type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B; \ + type CM_SHAPER_RAMB_EXP_REGION_START_G; \ + type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G; \ + type CM_SHAPER_RAMB_EXP_REGION_START_R; \ + type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R; \ + type CM_SHAPER_RAMB_EXP_REGION_END_B; \ + type CM_SHAPER_RAMB_EXP_REGION_END_BASE_B; \ + type CM_SHAPER_RAMB_EXP_REGION_END_G; \ + type CM_SHAPER_RAMB_EXP_REGION_END_BASE_G; \ + type CM_SHAPER_RAMB_EXP_REGION_END_R; \ + type CM_SHAPER_RAMB_EXP_REGION_END_BASE_R; \ + type CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION_START_B; \ + type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B; \ + type CM_SHAPER_RAMA_EXP_REGION_START_G; \ + type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G; \ + type CM_SHAPER_RAMA_EXP_REGION_START_R; \ + type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R; \ + type CM_SHAPER_RAMA_EXP_REGION_END_B; \ + type CM_SHAPER_RAMA_EXP_REGION_END_BASE_B; \ + type CM_SHAPER_RAMA_EXP_REGION_END_G; \ + type CM_SHAPER_RAMA_EXP_REGION_END_BASE_G; \ + type CM_SHAPER_RAMA_EXP_REGION_END_R; \ + type CM_SHAPER_RAMA_EXP_REGION_END_BASE_R; \ + type CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS; \ + type CM_SHAPER_LUT_WRITE_EN_MASK; \ + type CM_SHAPER_CONFIG_STATUS; \ + type CM_SHAPER_LUT_WRITE_SEL; \ + type CM_SHAPER_LUT_INDEX; \ + type CM_SHAPER_LUT_DATA; \ + type CM_DGAM_CONFIG_STATUS; \ + type CM_ICSC_MODE; \ + type CM_ICSC_C11; \ + type CM_ICSC_C12; \ + type CM_ICSC_C33; \ + type CM_ICSC_C34; \ + type CM_BNS_BIAS_R; \ + type CM_BNS_BIAS_G; \ + type CM_BNS_BIAS_B; \ + type CM_BNS_SCALE_R; \ + type CM_BNS_SCALE_G; \ + type CM_BNS_SCALE_B; \ + type CM_DGAM_RAMB_EXP_REGION_START_B; \ + type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; \ + type CM_DGAM_RAMB_EXP_REGION_START_G; \ + type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G; \ + type CM_DGAM_RAMB_EXP_REGION_START_R; \ + type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R; \ + type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \ + type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \ + type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \ + type CM_DGAM_RAMB_EXP_REGION_END_B; \ + type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; \ + type CM_DGAM_RAMB_EXP_REGION_END_BASE_B; \ + type CM_DGAM_RAMB_EXP_REGION_END_G; \ + type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G; \ + type CM_DGAM_RAMB_EXP_REGION_END_BASE_G; \ + type CM_DGAM_RAMB_EXP_REGION_END_R; \ + type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R; \ + type CM_DGAM_RAMB_EXP_REGION_END_BASE_R; \ + type CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET; \ + type CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \ + type CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET; \ + type CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \ + type CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET; \ + type CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS; \ + type CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET; \ + type CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS; \ + type CM_DGAM_RAMA_EXP_REGION_START_B; \ + type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B; \ + type CM_DGAM_RAMA_EXP_REGION_START_G; \ + type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G; \ + type CM_DGAM_RAMA_EXP_REGION_START_R; \ + type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R; \ + type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \ + type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \ + type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \ + type CM_DGAM_RAMA_EXP_REGION_END_B; \ + type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B; \ + type CM_DGAM_RAMA_EXP_REGION_END_BASE_B; \ + type CM_DGAM_RAMA_EXP_REGION_END_G; \ + type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G; \ + type CM_DGAM_RAMA_EXP_REGION_END_BASE_G; \ + type CM_DGAM_RAMA_EXP_REGION_END_R; \ + type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R; \ + type CM_DGAM_RAMA_EXP_REGION_END_BASE_R; \ + type CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; \ + type CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \ + type CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; \ + type CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \ + type CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET; \ + type CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS; \ + type CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET; \ + type CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS; \ + type SHARED_MEM_PWR_DIS; \ + type CM_IGAM_LUT_FORMAT_R; \ + type CM_IGAM_LUT_FORMAT_G; \ + type CM_IGAM_LUT_FORMAT_B; \ + type CM_IGAM_LUT_HOST_EN; \ + type CM_IGAM_LUT_RW_MODE; \ + type CM_IGAM_LUT_WRITE_EN_MASK; \ + type CM_IGAM_LUT_SEL; \ + type CM_IGAM_LUT_SEQ_COLOR; \ + type CM_IGAM_DGAM_CONFIG_STATUS; \ + type CM_DGAM_LUT_WRITE_EN_MASK; \ + type CM_DGAM_LUT_WRITE_SEL; \ + type CM_DGAM_LUT_INDEX; \ + type CM_DGAM_LUT_DATA; \ + type CM_DGAM_LUT_MODE; \ + type CM_IGAM_LUT_MODE; \ + type CM_IGAM_INPUT_FORMAT; \ + type CM_IGAM_LUT_RW_INDEX; \ + type CM_BYPASS_EN; \ + type FORMAT_EXPANSION_MODE; \ + type CNVC_BYPASS; \ + type OUTPUT_FP; \ + type CNVC_SURFACE_PIXEL_FORMAT; \ + type CURSOR_MODE; \ + type CURSOR_PITCH; \ + type CURSOR_LINES_PER_CHUNK; \ + type CURSOR_ENABLE; \ + type CUR0_MODE; \ + type CUR0_EXPANSION_MODE; \ + type CUR0_ENABLE; \ + type CM_BYPASS; \ + type CM_TEST_DEBUG_INDEX; \ + type CM_TEST_DEBUG_DATA_ID9_ICSC_MODE; \ + type CM_TEST_DEBUG_DATA_ID9_OCSC_MODE;\ + type FORMAT_CONTROL__ALPHA_EN; \ + type CUR0_COLOR0; \ + type CUR0_COLOR1; \ + type DPPCLK_RATE_CONTROL; \ + type DPP_CLOCK_ENABLE; \ + type CM_HDR_MULT_COEF; \ + type CUR0_FP_BIAS; \ + type CUR0_FP_SCALE;\ + type DISPCLK_R_GATE_DISABLE; + +struct dcn_dpp_shift { + TF_REG_FIELD_LIST(uint8_t) +}; + +struct dcn_dpp_mask { + TF_REG_FIELD_LIST(uint32_t) +}; + +#define DPP_COMMON_REG_VARIABLE_LIST \ + uint32_t DSCL_EXT_OVERSCAN_LEFT_RIGHT; \ + uint32_t DSCL_EXT_OVERSCAN_TOP_BOTTOM; \ + uint32_t OTG_H_BLANK; \ + uint32_t OTG_V_BLANK; \ + uint32_t DSCL_MEM_PWR_CTRL; \ + uint32_t DSCL_MEM_PWR_STATUS; \ + uint32_t SCL_MODE; \ + uint32_t LB_DATA_FORMAT; \ + uint32_t LB_MEMORY_CTRL; \ + uint32_t DSCL_AUTOCAL; \ + uint32_t DSCL_CONTROL; \ + uint32_t SCL_BLACK_OFFSET; \ + uint32_t SCL_TAP_CONTROL; \ + uint32_t SCL_COEF_RAM_TAP_SELECT; \ + uint32_t SCL_COEF_RAM_TAP_DATA; \ + uint32_t DSCL_2TAP_CONTROL; \ + uint32_t MPC_SIZE; \ + uint32_t SCL_HORZ_FILTER_SCALE_RATIO; \ + uint32_t SCL_VERT_FILTER_SCALE_RATIO; \ + uint32_t SCL_HORZ_FILTER_SCALE_RATIO_C; \ + uint32_t SCL_VERT_FILTER_SCALE_RATIO_C; \ + uint32_t SCL_HORZ_FILTER_INIT; \ + uint32_t SCL_HORZ_FILTER_INIT_C; \ + uint32_t SCL_VERT_FILTER_INIT; \ + uint32_t SCL_VERT_FILTER_INIT_BOT; \ + uint32_t SCL_VERT_FILTER_INIT_C; \ + uint32_t SCL_VERT_FILTER_INIT_BOT_C; \ + uint32_t RECOUT_START; \ + uint32_t RECOUT_SIZE; \ + uint32_t CM_GAMUT_REMAP_CONTROL; \ + uint32_t CM_GAMUT_REMAP_C11_C12; \ + uint32_t CM_GAMUT_REMAP_C13_C14; \ + uint32_t CM_GAMUT_REMAP_C21_C22; \ + uint32_t CM_GAMUT_REMAP_C23_C24; \ + uint32_t CM_GAMUT_REMAP_C31_C32; \ + uint32_t CM_GAMUT_REMAP_C33_C34; \ + uint32_t CM_COMA_C11_C12; \ + uint32_t CM_COMA_C33_C34; \ + uint32_t CM_COMB_C11_C12; \ + uint32_t CM_COMB_C33_C34; \ + uint32_t CM_OCSC_CONTROL; \ + uint32_t CM_OCSC_C11_C12; \ + uint32_t CM_OCSC_C33_C34; \ + uint32_t CM_MEM_PWR_CTRL; \ + uint32_t CM_RGAM_LUT_DATA; \ + uint32_t CM_RGAM_LUT_WRITE_EN_MASK; \ + uint32_t CM_RGAM_LUT_INDEX; \ + uint32_t CM_RGAM_RAMB_START_CNTL_B; \ + uint32_t CM_RGAM_RAMB_START_CNTL_G; \ + uint32_t CM_RGAM_RAMB_START_CNTL_R; \ + uint32_t CM_RGAM_RAMB_SLOPE_CNTL_B; \ + uint32_t CM_RGAM_RAMB_SLOPE_CNTL_G; \ + uint32_t CM_RGAM_RAMB_SLOPE_CNTL_R; \ + uint32_t CM_RGAM_RAMB_END_CNTL1_B; \ + uint32_t CM_RGAM_RAMB_END_CNTL2_B; \ + uint32_t CM_RGAM_RAMB_END_CNTL1_G; \ + uint32_t CM_RGAM_RAMB_END_CNTL2_G; \ + uint32_t CM_RGAM_RAMB_END_CNTL1_R; \ + uint32_t CM_RGAM_RAMB_END_CNTL2_R; \ + uint32_t CM_RGAM_RAMB_REGION_0_1; \ + uint32_t CM_RGAM_RAMB_REGION_32_33; \ + uint32_t CM_RGAM_RAMA_START_CNTL_B; \ + uint32_t CM_RGAM_RAMA_START_CNTL_G; \ + uint32_t CM_RGAM_RAMA_START_CNTL_R; \ + uint32_t CM_RGAM_RAMA_SLOPE_CNTL_B; \ + uint32_t CM_RGAM_RAMA_SLOPE_CNTL_G; \ + uint32_t CM_RGAM_RAMA_SLOPE_CNTL_R; \ + uint32_t CM_RGAM_RAMA_END_CNTL1_B; \ + uint32_t CM_RGAM_RAMA_END_CNTL2_B; \ + uint32_t CM_RGAM_RAMA_END_CNTL1_G; \ + uint32_t CM_RGAM_RAMA_END_CNTL2_G; \ + uint32_t CM_RGAM_RAMA_END_CNTL1_R; \ + uint32_t CM_RGAM_RAMA_END_CNTL2_R; \ + uint32_t CM_RGAM_RAMA_REGION_0_1; \ + uint32_t CM_RGAM_RAMA_REGION_32_33; \ + uint32_t CM_RGAM_CONTROL; \ + uint32_t CM_CMOUT_CONTROL; \ + uint32_t CM_BLNDGAM_LUT_WRITE_EN_MASK; \ + uint32_t CM_BLNDGAM_CONTROL; \ + uint32_t CM_BLNDGAM_RAMB_START_CNTL_B; \ + uint32_t CM_BLNDGAM_RAMB_START_CNTL_G; \ + uint32_t CM_BLNDGAM_RAMB_START_CNTL_R; \ + uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_B; \ + uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_G; \ + uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_R; \ + uint32_t CM_BLNDGAM_RAMB_END_CNTL1_B; \ + uint32_t CM_BLNDGAM_RAMB_END_CNTL2_B; \ + uint32_t CM_BLNDGAM_RAMB_END_CNTL1_G; \ + uint32_t CM_BLNDGAM_RAMB_END_CNTL2_G; \ + uint32_t CM_BLNDGAM_RAMB_END_CNTL1_R; \ + uint32_t CM_BLNDGAM_RAMB_END_CNTL2_R; \ + uint32_t CM_BLNDGAM_RAMB_REGION_0_1; \ + uint32_t CM_BLNDGAM_RAMB_REGION_2_3; \ + uint32_t CM_BLNDGAM_RAMB_REGION_4_5; \ + uint32_t CM_BLNDGAM_RAMB_REGION_6_7; \ + uint32_t CM_BLNDGAM_RAMB_REGION_8_9; \ + uint32_t CM_BLNDGAM_RAMB_REGION_10_11; \ + uint32_t CM_BLNDGAM_RAMB_REGION_12_13; \ + uint32_t CM_BLNDGAM_RAMB_REGION_14_15; \ + uint32_t CM_BLNDGAM_RAMB_REGION_16_17; \ + uint32_t CM_BLNDGAM_RAMB_REGION_18_19; \ + uint32_t CM_BLNDGAM_RAMB_REGION_20_21; \ + uint32_t CM_BLNDGAM_RAMB_REGION_22_23; \ + uint32_t CM_BLNDGAM_RAMB_REGION_24_25; \ + uint32_t CM_BLNDGAM_RAMB_REGION_26_27; \ + uint32_t CM_BLNDGAM_RAMB_REGION_28_29; \ + uint32_t CM_BLNDGAM_RAMB_REGION_30_31; \ + uint32_t CM_BLNDGAM_RAMB_REGION_32_33; \ + uint32_t CM_BLNDGAM_RAMA_START_CNTL_B; \ + uint32_t CM_BLNDGAM_RAMA_START_CNTL_G; \ + uint32_t CM_BLNDGAM_RAMA_START_CNTL_R; \ + uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_B; \ + uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_G; \ + uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_R; \ + uint32_t CM_BLNDGAM_RAMA_END_CNTL1_B; \ + uint32_t CM_BLNDGAM_RAMA_END_CNTL2_B; \ + uint32_t CM_BLNDGAM_RAMA_END_CNTL1_G; \ + uint32_t CM_BLNDGAM_RAMA_END_CNTL2_G; \ + uint32_t CM_BLNDGAM_RAMA_END_CNTL1_R; \ + uint32_t CM_BLNDGAM_RAMA_END_CNTL2_R; \ + uint32_t CM_BLNDGAM_RAMA_REGION_0_1; \ + uint32_t CM_BLNDGAM_RAMA_REGION_2_3; \ + uint32_t CM_BLNDGAM_RAMA_REGION_4_5; \ + uint32_t CM_BLNDGAM_RAMA_REGION_6_7; \ + uint32_t CM_BLNDGAM_RAMA_REGION_8_9; \ + uint32_t CM_BLNDGAM_RAMA_REGION_10_11; \ + uint32_t CM_BLNDGAM_RAMA_REGION_12_13; \ + uint32_t CM_BLNDGAM_RAMA_REGION_14_15; \ + uint32_t CM_BLNDGAM_RAMA_REGION_16_17; \ + uint32_t CM_BLNDGAM_RAMA_REGION_18_19; \ + uint32_t CM_BLNDGAM_RAMA_REGION_20_21; \ + uint32_t CM_BLNDGAM_RAMA_REGION_22_23; \ + uint32_t CM_BLNDGAM_RAMA_REGION_24_25; \ + uint32_t CM_BLNDGAM_RAMA_REGION_26_27; \ + uint32_t CM_BLNDGAM_RAMA_REGION_28_29; \ + uint32_t CM_BLNDGAM_RAMA_REGION_30_31; \ + uint32_t CM_BLNDGAM_RAMA_REGION_32_33; \ + uint32_t CM_BLNDGAM_LUT_INDEX; \ + uint32_t CM_3DLUT_MODE; \ + uint32_t CM_3DLUT_INDEX; \ + uint32_t CM_3DLUT_DATA; \ + uint32_t CM_3DLUT_DATA_30BIT; \ + uint32_t CM_3DLUT_READ_WRITE_CONTROL; \ + uint32_t CM_SHAPER_LUT_WRITE_EN_MASK; \ + uint32_t CM_SHAPER_CONTROL; \ + uint32_t CM_SHAPER_RAMB_START_CNTL_B; \ + uint32_t CM_SHAPER_RAMB_START_CNTL_G; \ + uint32_t CM_SHAPER_RAMB_START_CNTL_R; \ + uint32_t CM_SHAPER_RAMB_END_CNTL_B; \ + uint32_t CM_SHAPER_RAMB_END_CNTL_G; \ + uint32_t CM_SHAPER_RAMB_END_CNTL_R; \ + uint32_t CM_SHAPER_RAMB_REGION_0_1; \ + uint32_t CM_SHAPER_RAMB_REGION_2_3; \ + uint32_t CM_SHAPER_RAMB_REGION_4_5; \ + uint32_t CM_SHAPER_RAMB_REGION_6_7; \ + uint32_t CM_SHAPER_RAMB_REGION_8_9; \ + uint32_t CM_SHAPER_RAMB_REGION_10_11; \ + uint32_t CM_SHAPER_RAMB_REGION_12_13; \ + uint32_t CM_SHAPER_RAMB_REGION_14_15; \ + uint32_t CM_SHAPER_RAMB_REGION_16_17; \ + uint32_t CM_SHAPER_RAMB_REGION_18_19; \ + uint32_t CM_SHAPER_RAMB_REGION_20_21; \ + uint32_t CM_SHAPER_RAMB_REGION_22_23; \ + uint32_t CM_SHAPER_RAMB_REGION_24_25; \ + uint32_t CM_SHAPER_RAMB_REGION_26_27; \ + uint32_t CM_SHAPER_RAMB_REGION_28_29; \ + uint32_t CM_SHAPER_RAMB_REGION_30_31; \ + uint32_t CM_SHAPER_RAMB_REGION_32_33; \ + uint32_t CM_SHAPER_RAMA_START_CNTL_B; \ + uint32_t CM_SHAPER_RAMA_START_CNTL_G; \ + uint32_t CM_SHAPER_RAMA_START_CNTL_R; \ + uint32_t CM_SHAPER_RAMA_END_CNTL_B; \ + uint32_t CM_SHAPER_RAMA_END_CNTL_G; \ + uint32_t CM_SHAPER_RAMA_END_CNTL_R; \ + uint32_t CM_SHAPER_RAMA_REGION_0_1; \ + uint32_t CM_SHAPER_RAMA_REGION_2_3; \ + uint32_t CM_SHAPER_RAMA_REGION_4_5; \ + uint32_t CM_SHAPER_RAMA_REGION_6_7; \ + uint32_t CM_SHAPER_RAMA_REGION_8_9; \ + uint32_t CM_SHAPER_RAMA_REGION_10_11; \ + uint32_t CM_SHAPER_RAMA_REGION_12_13; \ + uint32_t CM_SHAPER_RAMA_REGION_14_15; \ + uint32_t CM_SHAPER_RAMA_REGION_16_17; \ + uint32_t CM_SHAPER_RAMA_REGION_18_19; \ + uint32_t CM_SHAPER_RAMA_REGION_20_21; \ + uint32_t CM_SHAPER_RAMA_REGION_22_23; \ + uint32_t CM_SHAPER_RAMA_REGION_24_25; \ + uint32_t CM_SHAPER_RAMA_REGION_26_27; \ + uint32_t CM_SHAPER_RAMA_REGION_28_29; \ + uint32_t CM_SHAPER_RAMA_REGION_30_31; \ + uint32_t CM_SHAPER_RAMA_REGION_32_33; \ + uint32_t CM_SHAPER_LUT_INDEX; \ + uint32_t CM_SHAPER_LUT_DATA; \ + uint32_t CM_ICSC_CONTROL; \ + uint32_t CM_ICSC_C11_C12; \ + uint32_t CM_ICSC_C33_C34; \ + uint32_t CM_BNS_VALUES_R; \ + uint32_t CM_BNS_VALUES_G; \ + uint32_t CM_BNS_VALUES_B; \ + uint32_t CM_DGAM_RAMB_START_CNTL_B; \ + uint32_t CM_DGAM_RAMB_START_CNTL_G; \ + uint32_t CM_DGAM_RAMB_START_CNTL_R; \ + uint32_t CM_DGAM_RAMB_SLOPE_CNTL_B; \ + uint32_t CM_DGAM_RAMB_SLOPE_CNTL_G; \ + uint32_t CM_DGAM_RAMB_SLOPE_CNTL_R; \ + uint32_t CM_DGAM_RAMB_END_CNTL1_B; \ + uint32_t CM_DGAM_RAMB_END_CNTL2_B; \ + uint32_t CM_DGAM_RAMB_END_CNTL1_G; \ + uint32_t CM_DGAM_RAMB_END_CNTL2_G; \ + uint32_t CM_DGAM_RAMB_END_CNTL1_R; \ + uint32_t CM_DGAM_RAMB_END_CNTL2_R; \ + uint32_t CM_DGAM_RAMB_REGION_0_1; \ + uint32_t CM_DGAM_RAMB_REGION_14_15; \ + uint32_t CM_DGAM_RAMA_START_CNTL_B; \ + uint32_t CM_DGAM_RAMA_START_CNTL_G; \ + uint32_t CM_DGAM_RAMA_START_CNTL_R; \ + uint32_t CM_DGAM_RAMA_SLOPE_CNTL_B; \ + uint32_t CM_DGAM_RAMA_SLOPE_CNTL_G; \ + uint32_t CM_DGAM_RAMA_SLOPE_CNTL_R; \ + uint32_t CM_DGAM_RAMA_END_CNTL1_B; \ + uint32_t CM_DGAM_RAMA_END_CNTL2_B; \ + uint32_t CM_DGAM_RAMA_END_CNTL1_G; \ + uint32_t CM_DGAM_RAMA_END_CNTL2_G; \ + uint32_t CM_DGAM_RAMA_END_CNTL1_R; \ + uint32_t CM_DGAM_RAMA_END_CNTL2_R; \ + uint32_t CM_DGAM_RAMA_REGION_0_1; \ + uint32_t CM_DGAM_RAMA_REGION_14_15; \ + uint32_t CM_DGAM_LUT_WRITE_EN_MASK; \ + uint32_t CM_DGAM_LUT_INDEX; \ + uint32_t CM_DGAM_LUT_DATA; \ + uint32_t CM_CONTROL; \ + uint32_t CM_DGAM_CONTROL; \ + uint32_t CM_IGAM_CONTROL; \ + uint32_t CM_IGAM_LUT_RW_CONTROL; \ + uint32_t CM_IGAM_LUT_RW_INDEX; \ + uint32_t CM_IGAM_LUT_SEQ_COLOR; \ + uint32_t CM_TEST_DEBUG_INDEX; \ + uint32_t CM_TEST_DEBUG_DATA; \ + uint32_t FORMAT_CONTROL; \ + uint32_t CNVC_SURFACE_PIXEL_FORMAT; \ + uint32_t CURSOR_CONTROL; \ + uint32_t CURSOR0_CONTROL; \ + uint32_t CURSOR0_COLOR0; \ + uint32_t CURSOR0_COLOR1; \ + uint32_t DPP_CONTROL; \ + uint32_t CM_HDR_MULT_COEF; \ + uint32_t CURSOR0_FP_SCALE_BIAS; + +struct dcn_dpp_registers { + DPP_COMMON_REG_VARIABLE_LIST +}; + +struct dcn10_dpp { + struct dpp base; + + const struct dcn_dpp_registers *tf_regs; + const struct dcn_dpp_shift *tf_shift; + const struct dcn_dpp_mask *tf_mask; + + const uint16_t *filter_v; + const uint16_t *filter_h; + const uint16_t *filter_v_c; + const uint16_t *filter_h_c; + int lb_pixel_depth_supported; + int lb_memory_size; + int lb_bits_per_entry; + bool is_write_to_ram_a_safe; + struct scaler_data scl_data; + struct pwl_params pwl_data; +}; + +enum dcn10_input_csc_select { + INPUT_CSC_SELECT_BYPASS = 0, + INPUT_CSC_SELECT_ICSC = 1, + INPUT_CSC_SELECT_COMA = 2 +}; + +void dpp1_set_cursor_attributes( + struct dpp *dpp_base, + struct dc_cursor_attributes *cursor_attributes); + +void dpp1_set_cursor_position( + struct dpp *dpp_base, + const struct dc_cursor_position *pos, + const struct dc_cursor_mi_param *param, + uint32_t width, + uint32_t height); + +void dpp1_cnv_set_optional_cursor_attributes( + struct dpp *dpp_base, + struct dpp_cursor_attributes *attr); + +bool dpp1_dscl_is_lb_conf_valid( + int ceil_vratio, + int num_partitions, + int vtaps); + +void dpp1_dscl_calc_lb_num_partitions( + const struct scaler_data *scl_data, + enum lb_memory_config lb_config, + int *num_part_y, + int *num_part_c); + +void dpp1_degamma_ram_select( + struct dpp *dpp_base, + bool use_ram_a); + +void dpp1_program_degamma_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params); + +void dpp1_program_degamma_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params); + +void dpp1_program_degamma_lut( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num, + bool is_ram_a); + +void dpp1_power_on_degamma_lut( + struct dpp *dpp_base, + bool power_on); + +void dpp1_program_input_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn10_input_csc_select select, + const struct out_csc_color_matrix *tbl_entry); + +void dpp1_program_bias_and_scale( + struct dpp *dpp_base, + struct dc_bias_and_scale *params); + +void dpp1_program_input_lut( + struct dpp *dpp_base, + const struct dc_gamma *gamma); + +void dpp1_full_bypass(struct dpp *dpp_base); + +void dpp1_set_degamma( + struct dpp *dpp_base, + enum ipp_degamma_mode mode); + +void dpp1_set_degamma_pwl(struct dpp *dpp_base, + const struct pwl_params *params); + + +void dpp_read_state(struct dpp *dpp_base, + struct dcn_dpp_state *s); + +void dpp_reset(struct dpp *dpp_base); + +void dpp1_cm_program_regamma_lut( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num); + +void dpp1_cm_power_on_regamma_lut( + struct dpp *dpp_base, + bool power_on); + +void dpp1_cm_configure_regamma_lut( + struct dpp *dpp_base, + bool is_ram_a); + +/*program re gamma RAM A*/ +void dpp1_cm_program_regamma_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params); + +/*program re gamma RAM B*/ +void dpp1_cm_program_regamma_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params); +void dpp1_cm_set_output_csc_adjustment( + struct dpp *dpp_base, + const uint16_t *regval); + +void dpp1_cm_set_output_csc_default( + struct dpp *dpp_base, + enum dc_color_space colorspace); + +void dpp1_cm_set_gamut_remap( + struct dpp *dpp, + const struct dpp_grph_csc_adjustment *adjust); + +void dpp1_dscl_set_scaler_manual_scale( + struct dpp *dpp_base, + const struct scaler_data *scl_data); + +void dpp1_cnv_setup ( + struct dpp *dpp_base, + enum surface_pixel_format format, + enum expansion_mode mode, + struct dc_csc_transform input_csc_color_matrix, + enum dc_color_space input_color_space, + struct cnv_alpha_2bit_lut *alpha_2bit_lut); + +void dpp1_dppclk_control( + struct dpp *dpp_base, + bool dppclk_div, + bool enable); + +void dpp1_set_hdr_multiplier( + struct dpp *dpp_base, + uint32_t multiplier); + +bool dpp1_get_optimal_number_of_taps( + struct dpp *dpp, + struct scaler_data *scl_data, + const struct scaling_taps *in_taps); + +void dpp1_construct(struct dcn10_dpp *dpp1, + struct dc_context *ctx, + uint32_t inst, + const struct dcn_dpp_registers *tf_regs, + const struct dcn_dpp_shift *tf_shift, + const struct dcn_dpp_mask *tf_mask); + +void dpp1_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust); +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c new file mode 100644 index 0000000000..006e238420 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c @@ -0,0 +1,884 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "core_types.h" + +#include "reg_helper.h" +#include "dcn10/dcn10_dpp.h" +#include "basics/conversion.h" +#include "dcn10/dcn10_cm_common.h" + +#define NUM_PHASES 64 +#define HORZ_MAX_TAPS 8 +#define VERT_MAX_TAPS 8 + +#define BLACK_OFFSET_RGB_Y 0x0 +#define BLACK_OFFSET_CBCR 0x8000 + +#define REG(reg)\ + dpp->tf_regs->reg + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + +#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) + + +enum dcn10_coef_filter_type_sel { + SCL_COEF_LUMA_VERT_FILTER = 0, + SCL_COEF_LUMA_HORZ_FILTER = 1, + SCL_COEF_CHROMA_VERT_FILTER = 2, + SCL_COEF_CHROMA_HORZ_FILTER = 3, + SCL_COEF_ALPHA_VERT_FILTER = 4, + SCL_COEF_ALPHA_HORZ_FILTER = 5 +}; + +enum dscl_autocal_mode { + AUTOCAL_MODE_OFF = 0, + + /* Autocal calculate the scaling ratio and initial phase and the + * DSCL_MODE_SEL must be set to 1 + */ + AUTOCAL_MODE_AUTOSCALE = 1, + /* Autocal perform auto centering without replication and the + * DSCL_MODE_SEL must be set to 0 + */ + AUTOCAL_MODE_AUTOCENTER = 2, + /* Autocal perform auto centering and auto replication and the + * DSCL_MODE_SEL must be set to 0 + */ + AUTOCAL_MODE_AUTOREPLICATE = 3 +}; + +enum dscl_mode_sel { + DSCL_MODE_SCALING_444_BYPASS = 0, + DSCL_MODE_SCALING_444_RGB_ENABLE = 1, + DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, + DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, + DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, + DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, + DSCL_MODE_DSCL_BYPASS = 6 +}; + +static void program_gamut_remap( + struct dcn10_dpp *dpp, + const uint16_t *regval, + enum gamut_remap_select select) +{ + uint16_t selection = 0; + struct color_matrices_reg gam_regs; + + if (regval == NULL || select == GAMUT_REMAP_BYPASS) { + REG_SET(CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, 0); + return; + } + switch (select) { + case GAMUT_REMAP_COEFF: + selection = 1; + break; + case GAMUT_REMAP_COMA_COEFF: + selection = 2; + break; + case GAMUT_REMAP_COMB_COEFF: + selection = 3; + break; + default: + break; + } + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + + if (select == GAMUT_REMAP_COEFF) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } else if (select == GAMUT_REMAP_COMA_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } else { + + gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + } + + REG_SET( + CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, selection); + +} + +void dpp1_cm_set_gamut_remap( + struct dpp *dpp_base, + const struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + int i = 0; + + if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) + /* Bypass if type is bypass or hw */ + program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS); + else { + struct fixed31_32 arr_matrix[12]; + uint16_t arr_reg_val[12]; + + for (i = 0; i < 12; i++) + arr_matrix[i] = adjust->temperature_matrix[i]; + + convert_float_matrix( + arr_reg_val, arr_matrix, 12); + + program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF); + } +} + +static void read_gamut_remap(struct dcn10_dpp *dpp, + uint16_t *regval, + enum gamut_remap_select *select) +{ + struct color_matrices_reg gam_regs; + uint32_t selection; + + REG_GET(CM_GAMUT_REMAP_CONTROL, + CM_GAMUT_REMAP_MODE, &selection); + + *select = selection; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (*select == GAMUT_REMAP_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_read_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == GAMUT_REMAP_COMA_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); + + cm_helper_read_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == GAMUT_REMAP_COMB_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); + + cm_helper_read_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + } +} + +void dpp1_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + uint16_t arr_reg_val[12] = {0}; + enum gamut_remap_select select; + + read_gamut_remap(dpp, arr_reg_val, &select); + + if (select == GAMUT_REMAP_BYPASS) { + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + return; + } + + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + convert_hw_matrix(adjust->temperature_matrix, + arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} + +static void dpp1_cm_program_color_matrix( + struct dcn10_dpp *dpp, + const uint16_t *regval) +{ + uint32_t ocsc_mode; + uint32_t cur_mode; + struct color_matrices_reg gam_regs; + + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + + /* determine which CSC matrix (ocsc or comb) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + REG_SET(CM_TEST_DEBUG_INDEX, 0, + CM_TEST_DEBUG_INDEX, 9); + + REG_GET(CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_ID9_OCSC_MODE, &cur_mode); + + if (cur_mode != 4) + ocsc_mode = 4; + else + ocsc_mode = 5; + + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12; + + if (ocsc_mode == 4) { + + gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34); + + } else { + + gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); + + } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode); + +} + +void dpp1_cm_set_output_csc_default( + struct dpp *dpp_base, + enum dc_color_space colorspace) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + const uint16_t *regval = NULL; + int arr_size; + + regval = find_color_matrix(colorspace, &arr_size); + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + + dpp1_cm_program_color_matrix(dpp, regval); +} + +static void dpp1_cm_get_reg_field( + struct dcn10_dpp *dpp, + struct xfer_func_reg *reg) +{ + reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + + reg->shifts.field_region_end = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_B; + reg->masks.field_region_end = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_B; + reg->shifts.field_region_end_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; + reg->masks.field_region_end_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; + reg->shifts.field_region_end_base = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_BASE_B; + reg->masks.field_region_end_base = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_BASE_B; + reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; + reg->masks.field_region_linear_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; + reg->shifts.exp_region_start = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_B; + reg->masks.exp_region_start = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_B; + reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; + reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; +} + +static void dpp1_cm_get_degamma_reg_field( + struct dcn10_dpp *dpp, + struct xfer_func_reg *reg) +{ + reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + + reg->shifts.field_region_end = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_B; + reg->masks.field_region_end = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_B; + reg->shifts.field_region_end_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; + reg->masks.field_region_end_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; + reg->shifts.field_region_end_base = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_BASE_B; + reg->masks.field_region_end_base = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_BASE_B; + reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; + reg->masks.field_region_linear_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; + reg->shifts.exp_region_start = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_B; + reg->masks.exp_region_start = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_B; + reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; + reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; +} +void dpp1_cm_set_output_csc_adjustment( + struct dpp *dpp_base, + const uint16_t *regval) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + dpp1_cm_program_color_matrix(dpp, regval); +} + +void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base, + bool power_on) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_SET(CM_MEM_PWR_CTRL, 0, + RGAM_MEM_PWR_FORCE, power_on == true ? 0:1); + +} + +void dpp1_cm_program_regamma_lut(struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num) +{ + uint32_t i; + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_SEQ_START(); + + for (i = 0 ; i < num; i++) { + REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg); + REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg); + REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].blue_reg); + + REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_red_reg); + REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_green_reg); + REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg); + + } + +} + +void dpp1_cm_configure_regamma_lut( + struct dpp *dpp_base, + bool is_ram_a) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK, + CM_RGAM_LUT_WRITE_EN_MASK, 7); + REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK, + CM_RGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1); + REG_SET(CM_RGAM_LUT_INDEX, 0, CM_RGAM_LUT_INDEX, 0); +} + +/*program re gamma RAM A*/ +void dpp1_cm_program_regamma_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + struct xfer_func_reg gam_regs; + + dpp1_cm_get_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_RGAM_RAMA_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_RGAM_RAMA_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_RGAM_RAMA_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMA_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMA_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMA_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMA_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMA_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMA_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMA_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMA_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMA_END_CNTL2_R); + gam_regs.region_start = REG(CM_RGAM_RAMA_REGION_0_1); + gam_regs.region_end = REG(CM_RGAM_RAMA_REGION_32_33); + + cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); + +} + +/*program re gamma RAM B*/ +void dpp1_cm_program_regamma_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + struct xfer_func_reg gam_regs; + + dpp1_cm_get_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_RGAM_RAMB_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_RGAM_RAMB_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_RGAM_RAMB_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMB_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMB_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMB_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMB_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMB_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMB_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMB_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMB_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMB_END_CNTL2_R); + gam_regs.region_start = REG(CM_RGAM_RAMB_REGION_0_1); + gam_regs.region_end = REG(CM_RGAM_RAMB_REGION_32_33); + + cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); +} + +void dpp1_program_input_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn10_input_csc_select input_select, + const struct out_csc_color_matrix *tbl_entry) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + int i; + int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); + const uint16_t *regval = NULL; + uint32_t cur_select = 0; + enum dcn10_input_csc_select select; + struct color_matrices_reg gam_regs; + + if (input_select == INPUT_CSC_SELECT_BYPASS) { + REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); + return; + } + + if (tbl_entry == NULL) { + for (i = 0; i < arr_size; i++) + if (dpp_input_csc_matrix[i].color_space == color_space) { + regval = dpp_input_csc_matrix[i].regval; + break; + } + + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + } else { + regval = tbl_entry->regval; + } + + /* determine which CSC matrix (icsc or coma) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + REG_SET(CM_TEST_DEBUG_INDEX, 0, + CM_TEST_DEBUG_INDEX, 9); + + REG_GET(CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_ID9_ICSC_MODE, &cur_select); + + if (cur_select != INPUT_CSC_SELECT_ICSC) + select = INPUT_CSC_SELECT_ICSC; + else + select = INPUT_CSC_SELECT_COMA; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; + + if (select == INPUT_CSC_SELECT_ICSC) { + + gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); + + } else { + + gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); + + } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + REG_SET(CM_ICSC_CONTROL, 0, + CM_ICSC_MODE, select); +} + +//keep here for now, decide multi dce support later +void dpp1_program_bias_and_scale( + struct dpp *dpp_base, + struct dc_bias_and_scale *params) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_SET_2(CM_BNS_VALUES_R, 0, + CM_BNS_SCALE_R, params->scale_red, + CM_BNS_BIAS_R, params->bias_red); + + REG_SET_2(CM_BNS_VALUES_G, 0, + CM_BNS_SCALE_G, params->scale_green, + CM_BNS_BIAS_G, params->bias_green); + + REG_SET_2(CM_BNS_VALUES_B, 0, + CM_BNS_SCALE_B, params->scale_blue, + CM_BNS_BIAS_B, params->bias_blue); + +} + +/*program de gamma RAM B*/ +void dpp1_program_degamma_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + struct xfer_func_reg gam_regs; + + dpp1_cm_get_degamma_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_DGAM_RAMB_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMB_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMB_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMB_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMB_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMB_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMB_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMB_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMB_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMB_END_CNTL2_R); + gam_regs.region_start = REG(CM_DGAM_RAMB_REGION_0_1); + gam_regs.region_end = REG(CM_DGAM_RAMB_REGION_14_15); + + + cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); +} + +/*program de gamma RAM A*/ +void dpp1_program_degamma_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + struct xfer_func_reg gam_regs; + + dpp1_cm_get_degamma_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_DGAM_RAMA_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMA_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMA_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMA_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMA_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMA_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMA_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMA_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMA_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMA_END_CNTL2_R); + gam_regs.region_start = REG(CM_DGAM_RAMA_REGION_0_1); + gam_regs.region_end = REG(CM_DGAM_RAMA_REGION_14_15); + + cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); +} + +void dpp1_power_on_degamma_lut( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_SET(CM_MEM_PWR_CTRL, 0, + SHARED_MEM_PWR_DIS, power_on ? 0:1); + +} + +static void dpp1_enable_cm_block( + struct dpp *dpp_base) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_UPDATE(CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, 8); + REG_UPDATE(CM_CONTROL, CM_BYPASS_EN, 0); +} + +void dpp1_set_degamma( + struct dpp *dpp_base, + enum ipp_degamma_mode mode) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + dpp1_enable_cm_block(dpp_base); + + switch (mode) { + case IPP_DEGAMMA_MODE_BYPASS: + /* Setting de gamma bypass for now */ + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0); + break; + case IPP_DEGAMMA_MODE_HW_sRGB: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1); + break; + case IPP_DEGAMMA_MODE_HW_xvYCC: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); + break; + case IPP_DEGAMMA_MODE_USER_PWL: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); + break; + default: + BREAK_TO_DEBUGGER(); + break; + } + + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); +} + +void dpp1_degamma_ram_select( + struct dpp *dpp_base, + bool use_ram_a) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + if (use_ram_a) + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); + else + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 4); + +} + +static bool dpp1_degamma_ram_inuse( + struct dpp *dpp_base, + bool *ram_a_inuse) +{ + bool ret = false; + uint32_t status_reg = 0; + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, + &status_reg); + + if (status_reg == 9) { + *ram_a_inuse = true; + ret = true; + } else if (status_reg == 10) { + *ram_a_inuse = false; + ret = true; + } + return ret; +} + +void dpp1_program_degamma_lut( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num, + bool is_ram_a) +{ + uint32_t i; + + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, 0); + REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, + CM_DGAM_LUT_WRITE_EN_MASK, 7); + REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, + is_ram_a == true ? 0:1); + + REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0); + for (i = 0 ; i < num; i++) { + REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg); + + REG_SET(CM_DGAM_LUT_DATA, 0, + CM_DGAM_LUT_DATA, rgb[i].delta_red_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, + CM_DGAM_LUT_DATA, rgb[i].delta_green_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, + CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg); + } +} + +void dpp1_set_degamma_pwl(struct dpp *dpp_base, + const struct pwl_params *params) +{ + bool is_ram_a = true; + + dpp1_power_on_degamma_lut(dpp_base, true); + dpp1_enable_cm_block(dpp_base); + dpp1_degamma_ram_inuse(dpp_base, &is_ram_a); + if (is_ram_a == true) + dpp1_program_degamma_lutb_settings(dpp_base, params); + else + dpp1_program_degamma_luta_settings(dpp_base, params); + + dpp1_program_degamma_lut(dpp_base, params->rgb_resulted, + params->hw_points_num, !is_ram_a); + dpp1_degamma_ram_select(dpp_base, !is_ram_a); +} + +void dpp1_full_bypass(struct dpp *dpp_base) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + /* Input pixel format: ARGB8888 */ + REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, + CNVC_SURFACE_PIXEL_FORMAT, 0x8); + + /* Zero expansion */ + REG_SET_3(FORMAT_CONTROL, 0, + CNVC_BYPASS, 0, + FORMAT_CONTROL__ALPHA_EN, 0, + FORMAT_EXPANSION_MODE, 0); + + /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */ + if (dpp->tf_mask->CM_BYPASS_EN) + REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1); + else + REG_SET(CM_CONTROL, 0, CM_BYPASS, 1); + + /* Setting degamma bypass for now */ + REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0); +} + +static bool dpp1_ingamma_ram_inuse(struct dpp *dpp_base, + bool *ram_a_inuse) +{ + bool in_use = false; + uint32_t status_reg = 0; + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, + &status_reg); + + // 1 => IGAM_RAMA, 3 => IGAM_RAMA & DGAM_ROMA, 4 => IGAM_RAMA & DGAM_ROMB + if (status_reg == 1 || status_reg == 3 || status_reg == 4) { + *ram_a_inuse = true; + in_use = true; + // 2 => IGAM_RAMB, 5 => IGAM_RAMB & DGAM_ROMA, 6 => IGAM_RAMB & DGAM_ROMB + } else if (status_reg == 2 || status_reg == 5 || status_reg == 6) { + *ram_a_inuse = false; + in_use = true; + } + return in_use; +} + +/* + * Input gamma LUT currently supports 256 values only. This means input color + * can have a maximum of 8 bits per channel (= 256 possible values) in order to + * have a one-to-one mapping with the LUT. Truncation will occur with color + * values greater than 8 bits. + * + * In the future, this function should support additional input gamma methods, + * such as piecewise linear mapping, and input gamma bypass. + */ +void dpp1_program_input_lut( + struct dpp *dpp_base, + const struct dc_gamma *gamma) +{ + int i; + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + bool rama_occupied = false; + uint32_t ram_num; + // Power on LUT memory. + REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 1); + dpp1_enable_cm_block(dpp_base); + // Determine whether to use RAM A or RAM B + dpp1_ingamma_ram_inuse(dpp_base, &rama_occupied); + if (!rama_occupied) + REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 0); + else + REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 1); + // RW mode is 256-entry LUT + REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, 0); + // IGAM Input format should be 8 bits per channel. + REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 0); + // Do not mask any R,G,B values + REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, 7); + // LUT-256, unsigned, integer, new u0.12 format + REG_UPDATE_3( + CM_IGAM_CONTROL, + CM_IGAM_LUT_FORMAT_R, 3, + CM_IGAM_LUT_FORMAT_G, 3, + CM_IGAM_LUT_FORMAT_B, 3); + // Start at index 0 of IGAM LUT + REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0); + for (i = 0; i < gamma->num_entries; i++) { + REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, + dc_fixpt_round( + gamma->entries.red[i])); + REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, + dc_fixpt_round( + gamma->entries.green[i])); + REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, + dc_fixpt_round( + gamma->entries.blue[i])); + } + // Power off LUT memory + REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 0); + // Enable IGAM LUT on ram we just wrote to. 2 => RAMA, 3 => RAMB + REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2); + REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num); +} + +void dpp1_set_hdr_multiplier( + struct dpp *dpp_base, + uint32_t multiplier) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier); +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c new file mode 100644 index 0000000000..808bca9fb8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c @@ -0,0 +1,696 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "core_types.h" + +#include "reg_helper.h" +#include "dcn10/dcn10_dpp.h" +#include "basics/conversion.h" + + +#define NUM_PHASES 64 +#define HORZ_MAX_TAPS 8 +#define VERT_MAX_TAPS 8 + +#define BLACK_OFFSET_RGB_Y 0x0 +#define BLACK_OFFSET_CBCR 0x8000 + + +#define REG(reg)\ + dpp->tf_regs->reg + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + +enum dcn10_coef_filter_type_sel { + SCL_COEF_LUMA_VERT_FILTER = 0, + SCL_COEF_LUMA_HORZ_FILTER = 1, + SCL_COEF_CHROMA_VERT_FILTER = 2, + SCL_COEF_CHROMA_HORZ_FILTER = 3, + SCL_COEF_ALPHA_VERT_FILTER = 4, + SCL_COEF_ALPHA_HORZ_FILTER = 5 +}; + +enum dscl_autocal_mode { + AUTOCAL_MODE_OFF = 0, + + /* Autocal calculate the scaling ratio and initial phase and the + * DSCL_MODE_SEL must be set to 1 + */ + AUTOCAL_MODE_AUTOSCALE = 1, + /* Autocal perform auto centering without replication and the + * DSCL_MODE_SEL must be set to 0 + */ + AUTOCAL_MODE_AUTOCENTER = 2, + /* Autocal perform auto centering and auto replication and the + * DSCL_MODE_SEL must be set to 0 + */ + AUTOCAL_MODE_AUTOREPLICATE = 3 +}; + +enum dscl_mode_sel { + DSCL_MODE_SCALING_444_BYPASS = 0, + DSCL_MODE_SCALING_444_RGB_ENABLE = 1, + DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, + DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, + DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, + DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, + DSCL_MODE_DSCL_BYPASS = 6 +}; + +static int dpp1_dscl_get_pixel_depth_val(enum lb_pixel_depth depth) +{ + if (depth == LB_PIXEL_DEPTH_30BPP) + return 0; /* 10 bpc */ + else if (depth == LB_PIXEL_DEPTH_24BPP) + return 1; /* 8 bpc */ + else if (depth == LB_PIXEL_DEPTH_18BPP) + return 2; /* 6 bpc */ + else if (depth == LB_PIXEL_DEPTH_36BPP) + return 3; /* 12 bpc */ + else { + ASSERT(0); + return -1; /* Unsupported */ + } +} + +static bool dpp1_dscl_is_video_format(enum pixel_format format) +{ + if (format >= PIXEL_FORMAT_VIDEO_BEGIN + && format <= PIXEL_FORMAT_VIDEO_END) + return true; + else + return false; +} + +static bool dpp1_dscl_is_420_format(enum pixel_format format) +{ + if (format == PIXEL_FORMAT_420BPP8 || + format == PIXEL_FORMAT_420BPP10) + return true; + else + return false; +} + +static enum dscl_mode_sel dpp1_dscl_get_dscl_mode( + struct dpp *dpp_base, + const struct scaler_data *data, + bool dbg_always_scale) +{ + const long long one = dc_fixpt_one.value; + + if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) { + /* DSCL is processing data in fixed format */ + if (data->format == PIXEL_FORMAT_FP16) + return DSCL_MODE_DSCL_BYPASS; + } + + if (data->ratios.horz.value == one + && data->ratios.vert.value == one + && data->ratios.horz_c.value == one + && data->ratios.vert_c.value == one + && !dbg_always_scale) + return DSCL_MODE_SCALING_444_BYPASS; + + if (!dpp1_dscl_is_420_format(data->format)) { + if (dpp1_dscl_is_video_format(data->format)) + return DSCL_MODE_SCALING_444_YCBCR_ENABLE; + else + return DSCL_MODE_SCALING_444_RGB_ENABLE; + } + if (data->ratios.horz.value == one && data->ratios.vert.value == one) + return DSCL_MODE_SCALING_420_LUMA_BYPASS; + if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one) + return DSCL_MODE_SCALING_420_CHROMA_BYPASS; + + return DSCL_MODE_SCALING_420_YCBCR_ENABLE; +} + +static void dpp1_power_on_dscl( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + if (dpp->tf_regs->DSCL_MEM_PWR_CTRL) { + if (power_on) { + REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 0); + REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 5); + } else { + if (dpp->base.ctx->dc->debug.enable_mem_low_power.bits.dscl) { + dpp->base.ctx->dc->optimized_required = true; + dpp->base.deferred_reg_writes.bits.disable_dscl = true; + } else { + REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3); + } + } + } +} + + +static void dpp1_dscl_set_lb( + struct dcn10_dpp *dpp, + const struct line_buffer_params *lb_params, + enum lb_memory_config mem_size_config) +{ + uint32_t max_partitions = 63; /* Currently hardcoded on all ASICs before DCN 3.2 */ + + /* LB */ + if (dpp->base.caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) { + /* DSCL caps: pixel data processed in fixed format */ + uint32_t pixel_depth = dpp1_dscl_get_pixel_depth_val(lb_params->depth); + uint32_t dyn_pix_depth = lb_params->dynamic_pixel_depth; + + REG_SET_7(LB_DATA_FORMAT, 0, + PIXEL_DEPTH, pixel_depth, /* Pixel depth stored in LB */ + PIXEL_EXPAN_MODE, lb_params->pixel_expan_mode, /* Pixel expansion mode */ + PIXEL_REDUCE_MODE, 1, /* Pixel reduction mode: Rounding */ + DYNAMIC_PIXEL_DEPTH, dyn_pix_depth, /* Dynamic expansion pixel depth */ + DITHER_EN, 0, /* Dithering enable: Disabled */ + INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ + LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ + } else { + /* DSCL caps: pixel data processed in float format */ + REG_SET_2(LB_DATA_FORMAT, 0, + INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ + LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ + } + + if (dpp->base.caps->max_lb_partitions == 31) + max_partitions = 31; + + REG_SET_2(LB_MEMORY_CTRL, 0, + MEMORY_CONFIG, mem_size_config, + LB_MAX_PARTITIONS, max_partitions); +} + +static const uint16_t *dpp1_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio) +{ + if (taps == 8) + return get_filter_8tap_64p(ratio); + else if (taps == 7) + return get_filter_7tap_64p(ratio); + else if (taps == 6) + return get_filter_6tap_64p(ratio); + else if (taps == 5) + return get_filter_5tap_64p(ratio); + else if (taps == 4) + return get_filter_4tap_64p(ratio); + else if (taps == 3) + return get_filter_3tap_64p(ratio); + else if (taps == 2) + return get_filter_2tap_64p(); + else if (taps == 1) + return NULL; + else { + /* should never happen, bug */ + BREAK_TO_DEBUGGER(); + return NULL; + } +} + +static void dpp1_dscl_set_scaler_filter( + struct dcn10_dpp *dpp, + uint32_t taps, + enum dcn10_coef_filter_type_sel filter_type, + const uint16_t *filter) +{ + const int tap_pairs = (taps + 1) / 2; + int phase; + int pair; + uint16_t odd_coef, even_coef; + + REG_SET_3(SCL_COEF_RAM_TAP_SELECT, 0, + SCL_COEF_RAM_TAP_PAIR_IDX, 0, + SCL_COEF_RAM_PHASE, 0, + SCL_COEF_RAM_FILTER_TYPE, filter_type); + + for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) { + for (pair = 0; pair < tap_pairs; pair++) { + even_coef = filter[phase * taps + 2 * pair]; + if ((pair * 2 + 1) < taps) + odd_coef = filter[phase * taps + 2 * pair + 1]; + else + odd_coef = 0; + + REG_SET_4(SCL_COEF_RAM_TAP_DATA, 0, + /* Even tap coefficient (bits 1:0 fixed to 0) */ + SCL_COEF_RAM_EVEN_TAP_COEF, even_coef, + /* Write/read control for even coefficient */ + SCL_COEF_RAM_EVEN_TAP_COEF_EN, 1, + /* Odd tap coefficient (bits 1:0 fixed to 0) */ + SCL_COEF_RAM_ODD_TAP_COEF, odd_coef, + /* Write/read control for odd coefficient */ + SCL_COEF_RAM_ODD_TAP_COEF_EN, 1); + } + } + +} + +static void dpp1_dscl_set_scl_filter( + struct dcn10_dpp *dpp, + const struct scaler_data *scl_data, + bool chroma_coef_mode) +{ + bool h_2tap_hardcode_coef_en = false; + bool v_2tap_hardcode_coef_en = false; + bool h_2tap_sharp_en = false; + bool v_2tap_sharp_en = false; + uint32_t h_2tap_sharp_factor = scl_data->sharpness.horz; + uint32_t v_2tap_sharp_factor = scl_data->sharpness.vert; + bool coef_ram_current; + const uint16_t *filter_h = NULL; + const uint16_t *filter_v = NULL; + const uint16_t *filter_h_c = NULL; + const uint16_t *filter_v_c = NULL; + + h_2tap_hardcode_coef_en = scl_data->taps.h_taps < 3 + && scl_data->taps.h_taps_c < 3 + && (scl_data->taps.h_taps > 1 && scl_data->taps.h_taps_c > 1); + v_2tap_hardcode_coef_en = scl_data->taps.v_taps < 3 + && scl_data->taps.v_taps_c < 3 + && (scl_data->taps.v_taps > 1 && scl_data->taps.v_taps_c > 1); + + h_2tap_sharp_en = h_2tap_hardcode_coef_en && h_2tap_sharp_factor != 0; + v_2tap_sharp_en = v_2tap_hardcode_coef_en && v_2tap_sharp_factor != 0; + + REG_UPDATE_6(DSCL_2TAP_CONTROL, + SCL_H_2TAP_HARDCODE_COEF_EN, h_2tap_hardcode_coef_en, + SCL_H_2TAP_SHARP_EN, h_2tap_sharp_en, + SCL_H_2TAP_SHARP_FACTOR, h_2tap_sharp_factor, + SCL_V_2TAP_HARDCODE_COEF_EN, v_2tap_hardcode_coef_en, + SCL_V_2TAP_SHARP_EN, v_2tap_sharp_en, + SCL_V_2TAP_SHARP_FACTOR, v_2tap_sharp_factor); + + if (!v_2tap_hardcode_coef_en || !h_2tap_hardcode_coef_en) { + bool filter_updated = false; + + filter_h = dpp1_dscl_get_filter_coeffs_64p( + scl_data->taps.h_taps, scl_data->ratios.horz); + filter_v = dpp1_dscl_get_filter_coeffs_64p( + scl_data->taps.v_taps, scl_data->ratios.vert); + + filter_updated = (filter_h && (filter_h != dpp->filter_h)) + || (filter_v && (filter_v != dpp->filter_v)); + + if (chroma_coef_mode) { + filter_h_c = dpp1_dscl_get_filter_coeffs_64p( + scl_data->taps.h_taps_c, scl_data->ratios.horz_c); + filter_v_c = dpp1_dscl_get_filter_coeffs_64p( + scl_data->taps.v_taps_c, scl_data->ratios.vert_c); + filter_updated = filter_updated || (filter_h_c && (filter_h_c != dpp->filter_h_c)) + || (filter_v_c && (filter_v_c != dpp->filter_v_c)); + } + + if (filter_updated) { + uint32_t scl_mode = REG_READ(SCL_MODE); + + if (!h_2tap_hardcode_coef_en && filter_h) { + dpp1_dscl_set_scaler_filter( + dpp, scl_data->taps.h_taps, + SCL_COEF_LUMA_HORZ_FILTER, filter_h); + } + dpp->filter_h = filter_h; + if (!v_2tap_hardcode_coef_en && filter_v) { + dpp1_dscl_set_scaler_filter( + dpp, scl_data->taps.v_taps, + SCL_COEF_LUMA_VERT_FILTER, filter_v); + } + dpp->filter_v = filter_v; + if (chroma_coef_mode) { + if (!h_2tap_hardcode_coef_en && filter_h_c) { + dpp1_dscl_set_scaler_filter( + dpp, scl_data->taps.h_taps_c, + SCL_COEF_CHROMA_HORZ_FILTER, filter_h_c); + } + if (!v_2tap_hardcode_coef_en && filter_v_c) { + dpp1_dscl_set_scaler_filter( + dpp, scl_data->taps.v_taps_c, + SCL_COEF_CHROMA_VERT_FILTER, filter_v_c); + } + } + dpp->filter_h_c = filter_h_c; + dpp->filter_v_c = filter_v_c; + + coef_ram_current = get_reg_field_value_ex( + scl_mode, dpp->tf_mask->SCL_COEF_RAM_SELECT_CURRENT, + dpp->tf_shift->SCL_COEF_RAM_SELECT_CURRENT); + + /* Swap coefficient RAM and set chroma coefficient mode */ + REG_SET_2(SCL_MODE, scl_mode, + SCL_COEF_RAM_SELECT, !coef_ram_current, + SCL_CHROMA_COEF_MODE, chroma_coef_mode); + } + } +} + +static int dpp1_dscl_get_lb_depth_bpc(enum lb_pixel_depth depth) +{ + if (depth == LB_PIXEL_DEPTH_30BPP) + return 10; + else if (depth == LB_PIXEL_DEPTH_24BPP) + return 8; + else if (depth == LB_PIXEL_DEPTH_18BPP) + return 6; + else if (depth == LB_PIXEL_DEPTH_36BPP) + return 12; + else { + BREAK_TO_DEBUGGER(); + return -1; /* Unsupported */ + } +} + +void dpp1_dscl_calc_lb_num_partitions( + const struct scaler_data *scl_data, + enum lb_memory_config lb_config, + int *num_part_y, + int *num_part_c) +{ + int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a, + lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a; + + int line_size = scl_data->viewport.width < scl_data->recout.width ? + scl_data->viewport.width : scl_data->recout.width; + int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? + scl_data->viewport_c.width : scl_data->recout.width; + + if (line_size == 0) + line_size = 1; + + if (line_size_c == 0) + line_size_c = 1; + + + lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth); + memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */ + memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */ + memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ + + if (lb_config == LB_MEMORY_CONFIG_1) { + lb_memory_size = 816; + lb_memory_size_c = 816; + lb_memory_size_a = 984; + } else if (lb_config == LB_MEMORY_CONFIG_2) { + lb_memory_size = 1088; + lb_memory_size_c = 1088; + lb_memory_size_a = 1312; + } else if (lb_config == LB_MEMORY_CONFIG_3) { + /* 420 mode: using 3rd mem from Y, Cr and Cb */ + lb_memory_size = 816 + 1088 + 848 + 848 + 848; + lb_memory_size_c = 816 + 1088; + lb_memory_size_a = 984 + 1312 + 456; + } else { + lb_memory_size = 816 + 1088 + 848; + lb_memory_size_c = 816 + 1088 + 848; + lb_memory_size_a = 984 + 1312 + 456; + } + *num_part_y = lb_memory_size / memory_line_size_y; + *num_part_c = lb_memory_size_c / memory_line_size_c; + num_partitions_a = lb_memory_size_a / memory_line_size_a; + + if (scl_data->lb_params.alpha_en + && (num_partitions_a < *num_part_y)) + *num_part_y = num_partitions_a; + + if (*num_part_y > 64) + *num_part_y = 64; + if (*num_part_c > 64) + *num_part_c = 64; + +} + +bool dpp1_dscl_is_lb_conf_valid(int ceil_vratio, int num_partitions, int vtaps) +{ + if (ceil_vratio > 2) + return vtaps <= (num_partitions - ceil_vratio + 2); + else + return vtaps <= num_partitions; +} + +/*find first match configuration which meets the min required lb size*/ +static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *dpp, + const struct scaler_data *scl_data) +{ + int num_part_y, num_part_c; + int vtaps = scl_data->taps.v_taps; + int vtaps_c = scl_data->taps.v_taps_c; + int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert); + int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c); + + if (dpp->base.ctx->dc->debug.use_max_lb) { + if (scl_data->format == PIXEL_FORMAT_420BPP8 + || scl_data->format == PIXEL_FORMAT_420BPP10) + return LB_MEMORY_CONFIG_3; + return LB_MEMORY_CONFIG_0; + } + + dpp->base.caps->dscl_calc_lb_num_partitions( + scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c); + + if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) + && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)) + return LB_MEMORY_CONFIG_1; + + dpp->base.caps->dscl_calc_lb_num_partitions( + scl_data, LB_MEMORY_CONFIG_2, &num_part_y, &num_part_c); + + if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) + && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)) + return LB_MEMORY_CONFIG_2; + + if (scl_data->format == PIXEL_FORMAT_420BPP8 + || scl_data->format == PIXEL_FORMAT_420BPP10) { + dpp->base.caps->dscl_calc_lb_num_partitions( + scl_data, LB_MEMORY_CONFIG_3, &num_part_y, &num_part_c); + + if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) + && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)) + return LB_MEMORY_CONFIG_3; + } + + dpp->base.caps->dscl_calc_lb_num_partitions( + scl_data, LB_MEMORY_CONFIG_0, &num_part_y, &num_part_c); + + /*Ensure we can support the requested number of vtaps*/ + ASSERT(dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) + && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)); + + return LB_MEMORY_CONFIG_0; +} + + +static void dpp1_dscl_set_manual_ratio_init( + struct dcn10_dpp *dpp, const struct scaler_data *data) +{ + uint32_t init_frac = 0; + uint32_t init_int = 0; + + REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0, + SCL_H_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.horz) << 5); + + REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0, + SCL_V_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.vert) << 5); + + REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0, + SCL_H_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.horz_c) << 5); + + REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0, + SCL_V_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.vert_c) << 5); + + /* + * 0.24 format for fraction, first five bits zeroed + */ + init_frac = dc_fixpt_u0d19(data->inits.h) << 5; + init_int = dc_fixpt_floor(data->inits.h); + REG_SET_2(SCL_HORZ_FILTER_INIT, 0, + SCL_H_INIT_FRAC, init_frac, + SCL_H_INIT_INT, init_int); + + init_frac = dc_fixpt_u0d19(data->inits.h_c) << 5; + init_int = dc_fixpt_floor(data->inits.h_c); + REG_SET_2(SCL_HORZ_FILTER_INIT_C, 0, + SCL_H_INIT_FRAC_C, init_frac, + SCL_H_INIT_INT_C, init_int); + + init_frac = dc_fixpt_u0d19(data->inits.v) << 5; + init_int = dc_fixpt_floor(data->inits.v); + REG_SET_2(SCL_VERT_FILTER_INIT, 0, + SCL_V_INIT_FRAC, init_frac, + SCL_V_INIT_INT, init_int); + + if (REG(SCL_VERT_FILTER_INIT_BOT)) { + struct fixed31_32 bot = dc_fixpt_add(data->inits.v, data->ratios.vert); + + init_frac = dc_fixpt_u0d19(bot) << 5; + init_int = dc_fixpt_floor(bot); + REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0, + SCL_V_INIT_FRAC_BOT, init_frac, + SCL_V_INIT_INT_BOT, init_int); + } + + init_frac = dc_fixpt_u0d19(data->inits.v_c) << 5; + init_int = dc_fixpt_floor(data->inits.v_c); + REG_SET_2(SCL_VERT_FILTER_INIT_C, 0, + SCL_V_INIT_FRAC_C, init_frac, + SCL_V_INIT_INT_C, init_int); + + if (REG(SCL_VERT_FILTER_INIT_BOT_C)) { + struct fixed31_32 bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c); + + init_frac = dc_fixpt_u0d19(bot) << 5; + init_int = dc_fixpt_floor(bot); + REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0, + SCL_V_INIT_FRAC_BOT_C, init_frac, + SCL_V_INIT_INT_BOT_C, init_int); + } +} + +/** + * dpp1_dscl_set_recout - Set the first pixel of RECOUT in the OTG active area + * + * @dpp: DPP data struct + * @recout: Rectangle information + * + * This function sets the MPC RECOUT_START and RECOUT_SIZE registers based on + * the values specified in the recount parameter. + * + * Note: This function only have effect if AutoCal is disabled. + */ +static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, + const struct rect *recout) +{ + REG_SET_2(RECOUT_START, 0, + /* First pixel of RECOUT in the active OTG area */ + RECOUT_START_X, recout->x, + /* First line of RECOUT in the active OTG area */ + RECOUT_START_Y, recout->y); + + REG_SET_2(RECOUT_SIZE, 0, + /* Number of RECOUT horizontal pixels */ + RECOUT_WIDTH, recout->width, + /* Number of RECOUT vertical lines */ + RECOUT_HEIGHT, recout->height); +} + +/** + * dpp1_dscl_set_scaler_manual_scale - Manually program scaler and line buffer + * + * @dpp_base: High level DPP struct + * @scl_data: scalaer_data info + * + * This is the primary function to program scaler and line buffer in manual + * scaling mode. To execute the required operations for manual scale, we need + * to disable AutoCal first. + */ +void dpp1_dscl_set_scaler_manual_scale(struct dpp *dpp_base, + const struct scaler_data *scl_data) +{ + enum lb_memory_config lb_config; + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode( + dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale); + bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN + && scl_data->format <= PIXEL_FORMAT_VIDEO_END; + + if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0) + return; + + PERF_TRACE(); + + dpp->scl_data = *scl_data; + + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) { + if (dscl_mode != DSCL_MODE_DSCL_BYPASS) + dpp1_power_on_dscl(dpp_base, true); + } + + /* Autocal off */ + REG_SET_3(DSCL_AUTOCAL, 0, + AUTOCAL_MODE, AUTOCAL_MODE_OFF, + AUTOCAL_NUM_PIPE, 0, + AUTOCAL_PIPE_ID, 0); + + /*clean scaler boundary mode when Autocal off*/ + REG_SET(DSCL_CONTROL, 0, + SCL_BOUNDARY_MODE, 0); + + /* Recout */ + dpp1_dscl_set_recout(dpp, &scl_data->recout); + + /* MPC Size */ + REG_SET_2(MPC_SIZE, 0, + /* Number of horizontal pixels of MPC */ + MPC_WIDTH, scl_data->h_active, + /* Number of vertical lines of MPC */ + MPC_HEIGHT, scl_data->v_active); + + /* SCL mode */ + REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode); + + if (dscl_mode == DSCL_MODE_DSCL_BYPASS) { + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) + dpp1_power_on_dscl(dpp_base, false); + return; + } + + /* LB */ + lb_config = dpp1_dscl_find_lb_memory_config(dpp, scl_data); + dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config); + + if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) + return; + + /* Black offsets */ + if (REG(SCL_BLACK_OFFSET)) { + if (ycbcr) + REG_SET_2(SCL_BLACK_OFFSET, 0, + SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, + SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR); + else + + REG_SET_2(SCL_BLACK_OFFSET, 0, + SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, + SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y); + } + + /* Manually calculate scale ratio and init values */ + dpp1_dscl_set_manual_ratio_init(dpp, scl_data); + + /* HTaps/VTaps */ + REG_SET_4(SCL_TAP_CONTROL, 0, + SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1, + SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1, + SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1, + SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1); + + dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr); + PERF_TRACE(); +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt new file mode 100644 index 0000000000..9c2d709634 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt @@ -0,0 +1,5 @@ +dal3_subdirectory_sources( + dcn20_dpp.c + dcn20_dpp_cm.c + dcn20_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c new file mode 100644 index 0000000000..56ebd7164d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c @@ -0,0 +1,435 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "core_types.h" + +#include "reg_helper.h" +#include "dcn20/dcn20_dpp.h" +#include "basics/conversion.h" + +#define NUM_PHASES 64 +#define HORZ_MAX_TAPS 8 +#define VERT_MAX_TAPS 8 + +#define BLACK_OFFSET_RGB_Y 0x0 +#define BLACK_OFFSET_CBCR 0x8000 + +#define REG(reg)\ + dpp->tf_regs->reg + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + +void dpp20_read_state(struct dpp *dpp_base, + struct dcn_dpp_state *s) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_GET(DPP_CONTROL, + DPP_CLOCK_ENABLE, &s->is_enabled); + + // Degamma LUT (RAM) + REG_GET(CM_DGAM_CONTROL, + CM_DGAM_LUT_MODE, &s->dgam_lut_mode); + + // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size) + REG_GET(CM_SHAPER_CONTROL, + CM_SHAPER_LUT_MODE, &s->shaper_lut_mode); + REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_CONFIG_STATUS, &s->lut3d_mode, + CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth); + REG_GET(CM_3DLUT_MODE, + CM_3DLUT_SIZE, &s->lut3d_size); + + // Blend/Out Gamma (RAM) + REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK, + CM_BLNDGAM_CONFIG_STATUS, &s->rgam_lut_mode); +} + +void dpp2_power_on_obuf( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_UPDATE(CM_MEM_PWR_CTRL, SHARED_MEM_PWR_DIS, power_on == true ? 1:0); + + REG_UPDATE(OBUF_MEM_PWR_CTRL, + OBUF_MEM_PWR_FORCE, power_on == true ? 0:1); + + REG_UPDATE(DSCL_MEM_PWR_CTRL, + LUT_MEM_PWR_FORCE, power_on == true ? 0:1); +} + +void dpp2_dummy_program_input_lut( + struct dpp *dpp_base, + const struct dc_gamma *gamma) +{} + +static void dpp2_cnv_setup ( + struct dpp *dpp_base, + enum surface_pixel_format format, + enum expansion_mode mode, + struct dc_csc_transform input_csc_color_matrix, + enum dc_color_space input_color_space, + struct cnv_alpha_2bit_lut *alpha_2bit_lut) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + uint32_t pixel_format = 0; + uint32_t alpha_en = 1; + enum dc_color_space color_space = COLOR_SPACE_SRGB; + enum dcn20_input_csc_select select = DCN2_ICSC_SELECT_BYPASS; + bool force_disable_cursor = false; + struct out_csc_color_matrix tbl_entry; + uint32_t is_2bit = 0; + int i = 0; + + REG_SET_2(FORMAT_CONTROL, 0, + CNVC_BYPASS, 0, + FORMAT_EXPANSION_MODE, mode); + + //hardcode default + //FORMAT_CONTROL. FORMAT_CNV16 default 0: U0.16/S.1.15; 1: U1.15/ S.1.14 + //FORMAT_CONTROL. CNVC_BYPASS_MSB_ALIGN default 0: disabled 1: enabled + //FORMAT_CONTROL. CLAMP_POSITIVE default 0: disabled 1: enabled + //FORMAT_CONTROL. CLAMP_POSITIVE_C default 0: disabled 1: enabled + REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0); + REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0); + REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0); + REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0); + + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + pixel_format = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + pixel_format = 3; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + pixel_format = 8; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + pixel_format = 10; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + force_disable_cursor = false; + pixel_format = 65; + color_space = COLOR_SPACE_YCBCR709; + select = DCN2_ICSC_SELECT_ICSC_A; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + force_disable_cursor = true; + pixel_format = 64; + color_space = COLOR_SPACE_YCBCR709; + select = DCN2_ICSC_SELECT_ICSC_A; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + force_disable_cursor = true; + pixel_format = 67; + color_space = COLOR_SPACE_YCBCR709; + select = DCN2_ICSC_SELECT_ICSC_A; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + force_disable_cursor = true; + pixel_format = 66; + color_space = COLOR_SPACE_YCBCR709; + select = DCN2_ICSC_SELECT_ICSC_A; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + pixel_format = 26; /* ARGB16161616_UNORM */ + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + pixel_format = 24; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + pixel_format = 25; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: + pixel_format = 12; + color_space = COLOR_SPACE_YCBCR709; + select = DCN2_ICSC_SELECT_ICSC_A; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: + pixel_format = 112; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: + pixel_format = 113; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: + pixel_format = 114; + color_space = COLOR_SPACE_YCBCR709; + select = DCN2_ICSC_SELECT_ICSC_A; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: + pixel_format = 115; + color_space = COLOR_SPACE_YCBCR709; + select = DCN2_ICSC_SELECT_ICSC_A; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: + pixel_format = 118; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: + pixel_format = 119; + alpha_en = 0; + break; + default: + break; + } + + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + + if (is_2bit == 1 && alpha_2bit_lut != NULL) { + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3); + } + + REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, + CNVC_SURFACE_PIXEL_FORMAT, pixel_format); + REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); + + // if input adjustments exist, program icsc with those values + if (input_csc_color_matrix.enable_adjustment + == true) { + for (i = 0; i < 12; i++) + tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; + + tbl_entry.color_space = input_color_space; + + if (color_space >= COLOR_SPACE_YCBCR601) + select = DCN2_ICSC_SELECT_ICSC_A; + else + select = DCN2_ICSC_SELECT_BYPASS; + + dpp2_program_input_csc(dpp_base, color_space, select, &tbl_entry); + } else + dpp2_program_input_csc(dpp_base, color_space, select, NULL); + + if (force_disable_cursor) { + REG_UPDATE(CURSOR_CONTROL, + CURSOR_ENABLE, 0); + REG_UPDATE(CURSOR0_CONTROL, + CUR0_ENABLE, 0); + + } + dpp2_power_on_obuf(dpp_base, true); + +} + +/*compute the maximum number of lines that we can fit in the line buffer*/ +void dscl2_calc_lb_num_partitions( + const struct scaler_data *scl_data, + enum lb_memory_config lb_config, + int *num_part_y, + int *num_part_c) +{ + int memory_line_size_y, memory_line_size_c, memory_line_size_a, + lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a; + + int line_size = scl_data->viewport.width < scl_data->recout.width ? + scl_data->viewport.width : scl_data->recout.width; + int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? + scl_data->viewport_c.width : scl_data->recout.width; + + if (line_size == 0) + line_size = 1; + + if (line_size_c == 0) + line_size_c = 1; + + memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */ + memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */ + memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ + + if (lb_config == LB_MEMORY_CONFIG_1) { + lb_memory_size = 970; + lb_memory_size_c = 970; + lb_memory_size_a = 970; + } else if (lb_config == LB_MEMORY_CONFIG_2) { + lb_memory_size = 1290; + lb_memory_size_c = 1290; + lb_memory_size_a = 1290; + } else if (lb_config == LB_MEMORY_CONFIG_3) { + /* 420 mode: using 3rd mem from Y, Cr and Cb */ + lb_memory_size = 970 + 1290 + 484 + 484 + 484; + lb_memory_size_c = 970 + 1290; + lb_memory_size_a = 970 + 1290 + 484; + } else { + lb_memory_size = 970 + 1290 + 484; + lb_memory_size_c = 970 + 1290 + 484; + lb_memory_size_a = 970 + 1290 + 484; + } + *num_part_y = lb_memory_size / memory_line_size_y; + *num_part_c = lb_memory_size_c / memory_line_size_c; + num_partitions_a = lb_memory_size_a / memory_line_size_a; + + if (scl_data->lb_params.alpha_en + && (num_partitions_a < *num_part_y)) + *num_part_y = num_partitions_a; + + if (*num_part_y > 64) + *num_part_y = 64; + if (*num_part_c > 64) + *num_part_c = 64; +} + +void dpp2_cnv_set_alpha_keyer( + struct dpp *dpp_base, + struct cnv_color_keyer_params *color_keyer) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_UPDATE(COLOR_KEYER_CONTROL, COLOR_KEYER_EN, color_keyer->color_keyer_en); + + REG_UPDATE(COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, color_keyer->color_keyer_mode); + + REG_UPDATE(COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, color_keyer->color_keyer_alpha_low); + REG_UPDATE(COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, color_keyer->color_keyer_alpha_high); + + REG_UPDATE(COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, color_keyer->color_keyer_red_low); + REG_UPDATE(COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, color_keyer->color_keyer_red_high); + + REG_UPDATE(COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, color_keyer->color_keyer_green_low); + REG_UPDATE(COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, color_keyer->color_keyer_green_high); + + REG_UPDATE(COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, color_keyer->color_keyer_blue_low); + REG_UPDATE(COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, color_keyer->color_keyer_blue_high); +} + +void dpp2_set_cursor_attributes( + struct dpp *dpp_base, + struct dc_cursor_attributes *cursor_attributes) +{ + enum dc_cursor_color_format color_format = cursor_attributes->color_format; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + int cur_rom_en = 0; + + if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || + color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } + } + + REG_UPDATE_3(CURSOR0_CONTROL, + CUR0_MODE, color_format, + CUR0_EXPANSION_MODE, 0, + CUR0_ROM_EN, cur_rom_en); + + if (color_format == CURSOR_MODE_MONO) { + /* todo: clarify what to program these to */ + REG_UPDATE(CURSOR0_COLOR0, + CUR0_COLOR0, 0x00000000); + REG_UPDATE(CURSOR0_COLOR1, + CUR0_COLOR1, 0xFFFFFFFF); + } +} + +void oppn20_dummy_program_regamma_pwl( + struct dpp *dpp, + const struct pwl_params *params, + enum opp_regamma mode) +{} + +static struct dpp_funcs dcn20_dpp_funcs = { + .dpp_read_state = dpp20_read_state, + .dpp_reset = dpp_reset, + .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, + .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, + .dpp_set_gamut_remap = dpp2_cm_set_gamut_remap, + .dpp_set_csc_adjustment = NULL, + .dpp_set_csc_default = NULL, + .dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl, + .dpp_set_degamma = dpp2_set_degamma, + .dpp_program_input_lut = dpp2_dummy_program_input_lut, + .dpp_full_bypass = dpp1_full_bypass, + .dpp_setup = dpp2_cnv_setup, + .dpp_program_degamma_pwl = dpp2_set_degamma_pwl, + .dpp_program_blnd_lut = dpp20_program_blnd_lut, + .dpp_program_shaper_lut = dpp20_program_shaper, + .dpp_program_3dlut = dpp20_program_3dlut, + .dpp_program_bias_and_scale = NULL, + .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, + .set_cursor_attributes = dpp2_set_cursor_attributes, + .set_cursor_position = dpp1_set_cursor_position, + .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, + .dpp_dppclk_control = dpp1_dppclk_control, + .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap, +}; + +static struct dpp_caps dcn20_dpp_cap = { + .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, + .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions, +}; + +bool dpp2_construct( + struct dcn20_dpp *dpp, + struct dc_context *ctx, + uint32_t inst, + const struct dcn2_dpp_registers *tf_regs, + const struct dcn2_dpp_shift *tf_shift, + const struct dcn2_dpp_mask *tf_mask) +{ + dpp->base.ctx = ctx; + + dpp->base.inst = inst; + dpp->base.funcs = &dcn20_dpp_funcs; + dpp->base.caps = &dcn20_dpp_cap; + + dpp->tf_regs = tf_regs; + dpp->tf_shift = tf_shift; + dpp->tf_mask = tf_mask; + + dpp->lb_pixel_depth_supported = + LB_PIXEL_DEPTH_18BPP | + LB_PIXEL_DEPTH_24BPP | + LB_PIXEL_DEPTH_30BPP | + LB_PIXEL_DEPTH_36BPP; + + dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY; + dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/ + + return true; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h new file mode 100644 index 0000000000..49cb25c9cb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h @@ -0,0 +1,781 @@ +/* Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN20_DPP_H__ +#define __DCN20_DPP_H__ + +#include "dcn10/dcn10_dpp.h" + +#define TO_DCN20_DPP(dpp)\ + container_of(dpp, struct dcn20_dpp, base) + +#define TF_REG_LIST_DCN20_COMMON_UPDATED(id) \ + SRI(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id) + +#define TF_REG_LIST_DCN20_COMMON(id) \ + SRI(CM_BLNDGAM_CONTROL, CM, id), \ + SRI(CM_BLNDGAM_RAMB_START_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMB_START_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMB_START_CNTL_R, CM, id), \ + SRI(CM_BLNDGAM_RAMB_END_CNTL1_B, CM, id), \ + SRI(CM_BLNDGAM_RAMB_END_CNTL2_B, CM, id), \ + SRI(CM_BLNDGAM_RAMB_END_CNTL1_G, CM, id), \ + SRI(CM_BLNDGAM_RAMB_END_CNTL2_G, CM, id), \ + SRI(CM_BLNDGAM_RAMB_END_CNTL1_R, CM, id), \ + SRI(CM_BLNDGAM_RAMB_END_CNTL2_R, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_0_1, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_2_3, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_4_5, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_6_7, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_8_9, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_10_11, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_12_13, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_14_15, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_16_17, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_18_19, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_20_21, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_22_23, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_24_25, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_26_27, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_28_29, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_30_31, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_32_33, CM, id), \ + SRI(CM_BLNDGAM_RAMA_START_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMA_START_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMA_START_CNTL_R, CM, id), \ + SRI(CM_BLNDGAM_RAMA_END_CNTL1_B, CM, id), \ + SRI(CM_BLNDGAM_RAMA_END_CNTL2_B, CM, id), \ + SRI(CM_BLNDGAM_RAMA_END_CNTL1_G, CM, id), \ + SRI(CM_BLNDGAM_RAMA_END_CNTL2_G, CM, id), \ + SRI(CM_BLNDGAM_RAMA_END_CNTL1_R, CM, id), \ + SRI(CM_BLNDGAM_RAMA_END_CNTL2_R, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_0_1, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_2_3, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_4_5, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_6_7, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_8_9, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_10_11, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_12_13, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_14_15, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_16_17, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_18_19, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_20_21, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_22_23, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_24_25, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_26_27, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_28_29, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_30_31, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_32_33, CM, id), \ + SRI(CM_BLNDGAM_LUT_INDEX, CM, id), \ + SRI(CM_BLNDGAM_LUT_DATA, CM, id), \ + SRI(CM_3DLUT_MODE, CM, id), \ + SRI(CM_3DLUT_INDEX, CM, id), \ + SRI(CM_3DLUT_DATA, CM, id), \ + SRI(CM_3DLUT_DATA_30BIT, CM, id), \ + SRI(CM_3DLUT_READ_WRITE_CONTROL, CM, id), \ + SRI(CM_SHAPER_LUT_WRITE_EN_MASK, CM, id), \ + SRI(CM_SHAPER_CONTROL, CM, id), \ + SRI(CM_SHAPER_RAMB_START_CNTL_B, CM, id), \ + SRI(CM_SHAPER_RAMB_START_CNTL_G, CM, id), \ + SRI(CM_SHAPER_RAMB_START_CNTL_R, CM, id), \ + SRI(CM_SHAPER_RAMB_END_CNTL_B, CM, id), \ + SRI(CM_SHAPER_RAMB_END_CNTL_G, CM, id), \ + SRI(CM_SHAPER_RAMB_END_CNTL_R, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_0_1, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_2_3, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_4_5, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_6_7, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_8_9, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_10_11, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_12_13, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_14_15, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_16_17, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_18_19, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_20_21, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_22_23, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_24_25, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_26_27, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_28_29, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_30_31, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_32_33, CM, id), \ + SRI(CM_SHAPER_RAMA_START_CNTL_B, CM, id), \ + SRI(CM_SHAPER_RAMA_START_CNTL_G, CM, id), \ + SRI(CM_SHAPER_RAMA_START_CNTL_R, CM, id), \ + SRI(CM_SHAPER_RAMA_END_CNTL_B, CM, id), \ + SRI(CM_SHAPER_RAMA_END_CNTL_G, CM, id), \ + SRI(CM_SHAPER_RAMA_END_CNTL_R, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_0_1, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_2_3, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_4_5, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_6_7, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_8_9, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_10_11, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_12_13, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_14_15, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_16_17, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_18_19, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_20_21, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_22_23, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_24_25, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_26_27, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_28_29, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_30_31, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \ + SRI(CM_SHAPER_LUT_INDEX, CM, id) + +#define TF_REG_LIST_DCN20_COMMON_APPEND(id) \ + SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\ + SRI(CM_ICSC_B_C11_C12, CM, id), \ + SRI(CM_ICSC_B_C33_C34, CM, id) + +#define TF_REG_LIST_DCN20(id) \ + TF_REG_LIST_DCN(id), \ + TF_REG_LIST_DCN20_COMMON(id), \ + TF_REG_LIST_DCN20_COMMON_UPDATED(id), \ + SRI(CURSOR_CONTROL, CURSOR0_, id), \ + SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \ + SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \ + SRI(FCNV_FP_BIAS_G, CNVC_CFG, id), \ + SRI(FCNV_FP_BIAS_B, CNVC_CFG, id), \ + SRI(FCNV_FP_SCALE_R, CNVC_CFG, id), \ + SRI(FCNV_FP_SCALE_G, CNVC_CFG, id), \ + SRI(FCNV_FP_SCALE_B, CNVC_CFG, id), \ + SRI(COLOR_KEYER_CONTROL, CNVC_CFG, id), \ + SRI(COLOR_KEYER_ALPHA, CNVC_CFG, id), \ + SRI(COLOR_KEYER_RED, CNVC_CFG, id), \ + SRI(COLOR_KEYER_GREEN, CNVC_CFG, id), \ + SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \ + SRI(CM_SHAPER_LUT_DATA, CM, id), \ + SRI(CURSOR_CONTROL, CURSOR0_, id),\ + SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\ + SRI(DSCL_MEM_PWR_CTRL, DSCL, id) + + +#define TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh)\ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_LUT_MODE, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh) + + +#define TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh)\ + TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_INDEX, CM_BLNDGAM_LUT_INDEX, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_DATA, CM_BLNDGAM_LUT_DATA, mask_sh), \ + TF_SF(CM0_CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, mask_sh), \ + TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \ + TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_SIZE, mask_sh), \ + TF_SF(CM0_CM_3DLUT_INDEX, CM_3DLUT_INDEX, mask_sh), \ + TF_SF(CM0_CM_3DLUT_DATA, CM_3DLUT_DATA0, mask_sh), \ + TF_SF(CM0_CM_3DLUT_DATA, CM_3DLUT_DATA1, mask_sh), \ + TF_SF(CM0_CM_3DLUT_DATA_30BIT, CM_3DLUT_DATA_30BIT, mask_sh), \ + TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mask_sh), \ + TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, mask_sh), \ + TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_READ_SEL, mask_sh), \ + TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, mask_sh), \ + TF_SF(CM0_CM_SHAPER_LUT_INDEX, CM_SHAPER_LUT_INDEX, mask_sh), \ + TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh) + + +#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\ + TF_REG_LIST_SH_MASK_DCN(mask_sh), \ + TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \ + TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh), \ + TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, mask_sh), \ + TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CNV16, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE_C, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_EN, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIX_INV_MODE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIXEL_ALPHA_MOD_EN, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh),\ + TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\ + TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh) + +/* DPP CM debug status register: + * + * Status index including current ICSC, Gamut Remap Mode is 9 + * ICSC Mode: [4..3] + * Gamut Remap Mode: [10..9] + */ +#define CM_TEST_DEBUG_DATA_STATUS_IDX 9 + +#define TF_DEBUG_REG_LIST_SH_DCN20 \ + TF_DEBUG_REG_LIST_SH_DCN10, \ + .CM_TEST_DEBUG_DATA_ICSC_MODE = 3, \ + .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 9 + +#define TF_DEBUG_REG_LIST_MASK_DCN20 \ + TF_DEBUG_REG_LIST_MASK_DCN10, \ + .CM_TEST_DEBUG_DATA_ICSC_MODE = 0x18, \ + .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 0x600 + +#define TF_REG_FIELD_LIST_DCN2_0(type) \ + TF_REG_FIELD_LIST(type) \ + type CM_BLNDGAM_LUT_DATA; \ + type CM_TEST_DEBUG_DATA_ICSC_MODE; \ + type CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE; \ + type FORMAT_CNV16; \ + type CNVC_BYPASS_MSB_ALIGN; \ + type CLAMP_POSITIVE; \ + type CLAMP_POSITIVE_C; \ + type ALPHA_2BIT_LUT0; \ + type ALPHA_2BIT_LUT1; \ + type ALPHA_2BIT_LUT2; \ + type ALPHA_2BIT_LUT3; \ + type FCNV_FP_BIAS_R; \ + type FCNV_FP_BIAS_G; \ + type FCNV_FP_BIAS_B; \ + type FCNV_FP_SCALE_R; \ + type FCNV_FP_SCALE_G; \ + type FCNV_FP_SCALE_B; \ + type COLOR_KEYER_EN; \ + type COLOR_KEYER_MODE; \ + type COLOR_KEYER_ALPHA_LOW; \ + type COLOR_KEYER_ALPHA_HIGH; \ + type COLOR_KEYER_RED_LOW; \ + type COLOR_KEYER_RED_HIGH; \ + type COLOR_KEYER_GREEN_LOW; \ + type COLOR_KEYER_GREEN_HIGH; \ + type COLOR_KEYER_BLUE_LOW; \ + type COLOR_KEYER_BLUE_HIGH; \ + type CUR0_PIX_INV_MODE; \ + type CUR0_PIXEL_ALPHA_MOD_EN; \ + type CUR0_ROM_EN;\ + type OBUF_MEM_PWR_FORCE + + +struct dcn2_dpp_shift { + TF_REG_FIELD_LIST_DCN2_0(uint8_t); +}; + +struct dcn2_dpp_mask { + TF_REG_FIELD_LIST_DCN2_0(uint32_t); +}; + +#define DPP_DCN2_REG_VARIABLE_LIST \ + DPP_COMMON_REG_VARIABLE_LIST \ + uint32_t CM_BLNDGAM_LUT_DATA; \ + uint32_t ALPHA_2BIT_LUT; \ + uint32_t FCNV_FP_BIAS_R; \ + uint32_t FCNV_FP_BIAS_G; \ + uint32_t FCNV_FP_BIAS_B; \ + uint32_t FCNV_FP_SCALE_R; \ + uint32_t FCNV_FP_SCALE_G; \ + uint32_t FCNV_FP_SCALE_B; \ + uint32_t COLOR_KEYER_CONTROL; \ + uint32_t COLOR_KEYER_ALPHA; \ + uint32_t COLOR_KEYER_RED; \ + uint32_t COLOR_KEYER_GREEN; \ + uint32_t COLOR_KEYER_BLUE; \ + uint32_t OBUF_MEM_PWR_CTRL + +#define DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND \ + uint32_t CM_GAMUT_REMAP_B_C11_C12; \ + uint32_t CM_GAMUT_REMAP_B_C13_C14; \ + uint32_t CM_GAMUT_REMAP_B_C21_C22; \ + uint32_t CM_GAMUT_REMAP_B_C23_C24; \ + uint32_t CM_GAMUT_REMAP_B_C31_C32; \ + uint32_t CM_GAMUT_REMAP_B_C33_C34; \ + uint32_t CM_ICSC_B_C11_C12; \ + uint32_t CM_ICSC_B_C33_C34 + +struct dcn2_dpp_registers { + DPP_DCN2_REG_VARIABLE_LIST; + DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND; +}; + +struct dcn20_dpp { + struct dpp base; + + const struct dcn2_dpp_registers *tf_regs; + const struct dcn2_dpp_shift *tf_shift; + const struct dcn2_dpp_mask *tf_mask; + + const uint16_t *filter_v; + const uint16_t *filter_h; + const uint16_t *filter_v_c; + const uint16_t *filter_h_c; + int lb_pixel_depth_supported; + int lb_memory_size; + int lb_bits_per_entry; + bool is_write_to_ram_a_safe; + struct scaler_data scl_data; + struct pwl_params pwl_data; +}; + +enum dcn20_input_csc_select { + DCN2_ICSC_SELECT_BYPASS = 0, + DCN2_ICSC_SELECT_ICSC_A = 1, + DCN2_ICSC_SELECT_ICSC_B = 2 +}; + +enum dcn20_gamut_remap_select { + DCN2_GAMUT_REMAP_BYPASS = 0, + DCN2_GAMUT_REMAP_COEF_A = 1, + DCN2_GAMUT_REMAP_COEF_B = 2 +}; + +void dpp20_read_state(struct dpp *dpp_base, + struct dcn_dpp_state *s); + +void dpp2_set_degamma_pwl( + struct dpp *dpp_base, + const struct pwl_params *params); + +void dpp2_set_degamma( + struct dpp *dpp_base, + enum ipp_degamma_mode mode); + +void dpp2_cm_set_gamut_remap( + struct dpp *dpp_base, + const struct dpp_grph_csc_adjustment *adjust); + +void dpp2_program_input_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn20_input_csc_select input_select, + const struct out_csc_color_matrix *tbl_entry); + +bool dpp20_program_blnd_lut( + struct dpp *dpp_base, const struct pwl_params *params); + +bool dpp20_program_shaper( + struct dpp *dpp_base, + const struct pwl_params *params); + +bool dpp20_program_3dlut( + struct dpp *dpp_base, + const struct tetrahedral_params *params); + +void dpp2_cnv_set_alpha_keyer( + struct dpp *dpp_base, + struct cnv_color_keyer_params *color_keyer); + +void dscl2_calc_lb_num_partitions( + const struct scaler_data *scl_data, + enum lb_memory_config lb_config, + int *num_part_y, + int *num_part_c); + +void dpp2_set_cursor_attributes( + struct dpp *dpp_base, + struct dc_cursor_attributes *cursor_attributes); + +void dpp2_dummy_program_input_lut( + struct dpp *dpp_base, + const struct dc_gamma *gamma); + +void oppn20_dummy_program_regamma_pwl( + struct dpp *dpp, + const struct pwl_params *params, + enum opp_regamma mode); + +void dpp2_set_hdr_multiplier( + struct dpp *dpp_base, + uint32_t multiplier); + +bool dpp2_construct(struct dcn20_dpp *dpp2, + struct dc_context *ctx, + uint32_t inst, + const struct dcn2_dpp_registers *tf_regs, + const struct dcn2_dpp_shift *tf_shift, + const struct dcn2_dpp_mask *tf_mask); + +void dpp2_power_on_obuf( + struct dpp *dpp_base, + bool power_on); + +void dpp2_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust); +#endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c new file mode 100644 index 0000000000..31613372e2 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c @@ -0,0 +1,1202 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "core_types.h" + +#include "reg_helper.h" +#include "dcn20/dcn20_dpp.h" +#include "basics/conversion.h" + +#include "dcn10/dcn10_cm_common.h" + +#define REG(reg)\ + dpp->tf_regs->reg + +#define IND_REG(index) \ + (index) + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + + +static void dpp2_enable_cm_block( + struct dpp *dpp_base) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + unsigned int cm_bypass_mode = 0; + //Temp, put CM in bypass mode + if (dpp_base->ctx->dc->debug.cm_in_bypass) + cm_bypass_mode = 1; + + REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode); +} + + +static bool dpp2_degamma_ram_inuse( + struct dpp *dpp_base, + bool *ram_a_inuse) +{ + bool ret = false; + uint32_t status_reg = 0; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_GET(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, + &status_reg); + + if (status_reg == 3) { + *ram_a_inuse = true; + ret = true; + } else if (status_reg == 4) { + *ram_a_inuse = false; + ret = true; + } + return ret; +} + +static void dpp2_program_degamma_lut( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num, + bool is_ram_a) +{ + uint32_t i; + + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, + CM_DGAM_LUT_WRITE_EN_MASK, 7); + REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, + is_ram_a == true ? 0:1); + + REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0); + for (i = 0 ; i < num; i++) { + REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg); + + REG_SET(CM_DGAM_LUT_DATA, 0, + CM_DGAM_LUT_DATA, rgb[i].delta_red_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, + CM_DGAM_LUT_DATA, rgb[i].delta_green_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, + CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg); + + } + +} + +void dpp2_set_degamma_pwl( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + bool is_ram_a = true; + + dpp1_power_on_degamma_lut(dpp_base, true); + dpp2_enable_cm_block(dpp_base); + dpp2_degamma_ram_inuse(dpp_base, &is_ram_a); + if (is_ram_a == true) + dpp1_program_degamma_lutb_settings(dpp_base, params); + else + dpp1_program_degamma_luta_settings(dpp_base, params); + + dpp2_program_degamma_lut(dpp_base, params->rgb_resulted, params->hw_points_num, !is_ram_a); + dpp1_degamma_ram_select(dpp_base, !is_ram_a); +} + +void dpp2_set_degamma( + struct dpp *dpp_base, + enum ipp_degamma_mode mode) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + dpp2_enable_cm_block(dpp_base); + + switch (mode) { + case IPP_DEGAMMA_MODE_BYPASS: + /* Setting de gamma bypass for now */ + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0); + break; + case IPP_DEGAMMA_MODE_HW_sRGB: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1); + break; + case IPP_DEGAMMA_MODE_HW_xvYCC: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); + break; + case IPP_DEGAMMA_MODE_USER_PWL: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); + break; + default: + BREAK_TO_DEBUGGER(); + break; + } +} + +static void program_gamut_remap( + struct dcn20_dpp *dpp, + const uint16_t *regval, + enum dcn20_gamut_remap_select select) +{ + uint32_t cur_select = 0; + struct color_matrices_reg gam_regs; + + if (regval == NULL || select == DCN2_GAMUT_REMAP_BYPASS) { + REG_SET(CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, 0); + return; + } + + /* determine which gamut_remap coefficients (A or B) we are using + * currently. select the alternate set to double buffer + * the update so gamut_remap is updated on frame boundary + */ + IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_STATUS_IDX, + CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &cur_select); + + /* value stored in dbg reg will be 1 greater than mode we want */ + if (cur_select != DCN2_GAMUT_REMAP_COEF_A) + select = DCN2_GAMUT_REMAP_COEF_A; + else + select = DCN2_GAMUT_REMAP_COEF_B; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (select == DCN2_GAMUT_REMAP_COEF_A) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + } else { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + REG_SET( + CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, select); + +} + +void dpp2_cm_set_gamut_remap( + struct dpp *dpp_base, + const struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + int i = 0; + + if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) + /* Bypass if type is bypass or hw */ + program_gamut_remap(dpp, NULL, DCN2_GAMUT_REMAP_BYPASS); + else { + struct fixed31_32 arr_matrix[12]; + uint16_t arr_reg_val[12]; + + for (i = 0; i < 12; i++) + arr_matrix[i] = adjust->temperature_matrix[i]; + + convert_float_matrix( + arr_reg_val, arr_matrix, 12); + + program_gamut_remap(dpp, arr_reg_val, DCN2_GAMUT_REMAP_COEF_A); + } +} + +static void read_gamut_remap(struct dcn20_dpp *dpp, + uint16_t *regval, + enum dcn20_gamut_remap_select *select) +{ + struct color_matrices_reg gam_regs; + uint32_t selection; + + IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_STATUS_IDX, + CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &selection); + + *select = selection; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (*select == DCN2_GAMUT_REMAP_COEF_A) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == DCN2_GAMUT_REMAP_COEF_B) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + } +} + +void dpp2_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + uint16_t arr_reg_val[12] = {0}; + enum dcn20_gamut_remap_select select; + + read_gamut_remap(dpp, arr_reg_val, &select); + + if (select == DCN2_GAMUT_REMAP_BYPASS) { + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + return; + } + + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + convert_hw_matrix(adjust->temperature_matrix, + arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} + +void dpp2_program_input_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn20_input_csc_select input_select, + const struct out_csc_color_matrix *tbl_entry) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + int i; + int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); + const uint16_t *regval = NULL; + uint32_t cur_select = 0; + enum dcn20_input_csc_select select; + struct color_matrices_reg icsc_regs; + + if (input_select == DCN2_ICSC_SELECT_BYPASS) { + REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); + return; + } + + if (tbl_entry == NULL) { + for (i = 0; i < arr_size; i++) + if (dpp_input_csc_matrix[i].color_space == color_space) { + regval = dpp_input_csc_matrix[i].regval; + break; + } + + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + } else { + regval = tbl_entry->regval; + } + + /* determine which CSC coefficients (A or B) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_STATUS_IDX, + CM_TEST_DEBUG_DATA_ICSC_MODE, &cur_select); + + if (cur_select != DCN2_ICSC_SELECT_ICSC_A) + select = DCN2_ICSC_SELECT_ICSC_A; + else + select = DCN2_ICSC_SELECT_ICSC_B; + + icsc_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; + icsc_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11; + icsc_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; + icsc_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; + + if (select == DCN2_ICSC_SELECT_ICSC_A) { + + icsc_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); + icsc_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); + + } else { + + icsc_regs.csc_c11_c12 = REG(CM_ICSC_B_C11_C12); + icsc_regs.csc_c33_c34 = REG(CM_ICSC_B_C33_C34); + + } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &icsc_regs); + + REG_SET(CM_ICSC_CONTROL, 0, + CM_ICSC_MODE, select); +} + +static void dpp20_power_on_blnd_lut( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_SET(CM_MEM_PWR_CTRL, 0, + BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0:1); + +} + +static void dpp20_configure_blnd_lut( + struct dpp *dpp_base, + bool is_ram_a) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_UPDATE(CM_BLNDGAM_LUT_WRITE_EN_MASK, + CM_BLNDGAM_LUT_WRITE_EN_MASK, 7); + REG_UPDATE(CM_BLNDGAM_LUT_WRITE_EN_MASK, + CM_BLNDGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1); + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); +} + +static void dpp20_program_blnd_pwl( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num) +{ + uint32_t i; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + for (i = 0 ; i < num; i++) { + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg); + + REG_SET(CM_BLNDGAM_LUT_DATA, 0, + CM_BLNDGAM_LUT_DATA, rgb[i].delta_red_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, + CM_BLNDGAM_LUT_DATA, rgb[i].delta_green_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, + CM_BLNDGAM_LUT_DATA, rgb[i].delta_blue_reg); + + } + +} + +static void dcn20_dpp_cm_get_reg_field( + struct dcn20_dpp *dpp, + struct xfer_func_reg *reg) +{ + reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + + reg->shifts.field_region_end = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_B; + reg->masks.field_region_end = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_B; + reg->shifts.field_region_end_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; + reg->masks.field_region_end_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; + reg->shifts.field_region_end_base = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; + reg->masks.field_region_end_base = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; + reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; + reg->masks.field_region_linear_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; + reg->shifts.exp_region_start = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_B; + reg->masks.exp_region_start = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_B; + reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; + reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; +} + +/*program blnd lut RAM A*/ +static void dpp20_program_blnd_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + struct xfer_func_reg gam_regs; + + dcn20_dpp_cm_get_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMA_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMA_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMA_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMA_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMA_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMA_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMA_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMA_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMA_END_CNTL2_R); + gam_regs.region_start = REG(CM_BLNDGAM_RAMA_REGION_0_1); + gam_regs.region_end = REG(CM_BLNDGAM_RAMA_REGION_32_33); + + cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); +} + +/*program blnd lut RAM B*/ +static void dpp20_program_blnd_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + struct xfer_func_reg gam_regs; + + dcn20_dpp_cm_get_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMB_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMB_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMB_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMB_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMB_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMB_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMB_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMB_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMB_END_CNTL2_R); + gam_regs.region_start = REG(CM_BLNDGAM_RAMB_REGION_0_1); + gam_regs.region_end = REG(CM_BLNDGAM_RAMB_REGION_32_33); + + cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); +} + +static enum dc_lut_mode dpp20_get_blndgam_current(struct dpp *dpp_base) +{ + enum dc_lut_mode mode; + uint32_t state_mode; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, &state_mode); + + switch (state_mode) { + case 0: + mode = LUT_BYPASS; + break; + case 1: + mode = LUT_RAM_A; + break; + case 2: + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + + return mode; +} + +bool dpp20_program_blnd_lut( + struct dpp *dpp_base, const struct pwl_params *params) +{ + enum dc_lut_mode current_mode; + enum dc_lut_mode next_mode; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + if (params == NULL) { + REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_LUT_MODE, 0); + return false; + } + current_mode = dpp20_get_blndgam_current(dpp_base); + if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) + next_mode = LUT_RAM_B; + else + next_mode = LUT_RAM_A; + + dpp20_power_on_blnd_lut(dpp_base, true); + dpp20_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A); + + if (next_mode == LUT_RAM_A) + dpp20_program_blnd_luta_settings(dpp_base, params); + else + dpp20_program_blnd_lutb_settings(dpp_base, params); + + dpp20_program_blnd_pwl( + dpp_base, params->rgb_resulted, params->hw_points_num); + + REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_LUT_MODE, + next_mode == LUT_RAM_A ? 1:2); + + return true; +} + + +static void dpp20_program_shaper_lut( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num) +{ + uint32_t i, red, green, blue; + uint32_t red_delta, green_delta, blue_delta; + uint32_t red_value, green_value, blue_value; + + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + for (i = 0 ; i < num; i++) { + + red = rgb[i].red_reg; + green = rgb[i].green_reg; + blue = rgb[i].blue_reg; + + red_delta = rgb[i].delta_red_reg; + green_delta = rgb[i].delta_green_reg; + blue_delta = rgb[i].delta_blue_reg; + + red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff); + green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff); + blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff); + + REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, red_value); + REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, green_value); + REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, blue_value); + } + +} + +static enum dc_lut_mode dpp20_get_shaper_current(struct dpp *dpp_base) +{ + enum dc_lut_mode mode; + uint32_t state_mode; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_GET(CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, &state_mode); + + switch (state_mode) { + case 0: + mode = LUT_BYPASS; + break; + case 1: + mode = LUT_RAM_A; + break; + case 2: + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + + return mode; +} + +static void dpp20_configure_shaper_lut( + struct dpp *dpp_base, + bool is_ram_a) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, + CM_SHAPER_LUT_WRITE_EN_MASK, 7); + REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, + CM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1); + REG_SET(CM_SHAPER_LUT_INDEX, 0, CM_SHAPER_LUT_INDEX, 0); +} + +/*program shaper RAM A*/ + +static void dpp20_program_shaper_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + const struct gamma_curve *curve; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_SET_2(CM_SHAPER_RAMA_START_CNTL_B, 0, + CM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); + REG_SET_2(CM_SHAPER_RAMA_START_CNTL_G, 0, + CM_SHAPER_RAMA_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, 0); + REG_SET_2(CM_SHAPER_RAMA_START_CNTL_R, 0, + CM_SHAPER_RAMA_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, 0); + + REG_SET_2(CM_SHAPER_RAMA_END_CNTL_B, 0, + CM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMA_END_CNTL_G, 0, + CM_SHAPER_RAMA_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMA_END_CNTL_R, 0, + CM_SHAPER_RAMA_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); + + curve = params->arr_curve_points; + REG_SET_4(CM_SHAPER_RAMA_REGION_0_1, 0, + CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_2_3, 0, + CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_4_5, 0, + CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_6_7, 0, + CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_8_9, 0, + CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_10_11, 0, + CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_12_13, 0, + CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_14_15, 0, + CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_16_17, 0, + CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_18_19, 0, + CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_20_21, 0, + CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_22_23, 0, + CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_24_25, 0, + CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_26_27, 0, + CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_28_29, 0, + CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_30_31, 0, + CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_32_33, 0, + CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); +} + +/*program shaper RAM B*/ +static void dpp20_program_shaper_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + const struct gamma_curve *curve; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_SET_2(CM_SHAPER_RAMB_START_CNTL_B, 0, + CM_SHAPER_RAMB_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, 0); + REG_SET_2(CM_SHAPER_RAMB_START_CNTL_G, 0, + CM_SHAPER_RAMB_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, 0); + REG_SET_2(CM_SHAPER_RAMB_START_CNTL_R, 0, + CM_SHAPER_RAMB_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, 0); + + REG_SET_2(CM_SHAPER_RAMB_END_CNTL_B, 0, + CM_SHAPER_RAMB_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMB_END_CNTL_G, 0, + CM_SHAPER_RAMB_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMB_END_CNTL_R, 0, + CM_SHAPER_RAMB_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); + + curve = params->arr_curve_points; + REG_SET_4(CM_SHAPER_RAMB_REGION_0_1, 0, + CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_2_3, 0, + CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_4_5, 0, + CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_6_7, 0, + CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_8_9, 0, + CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_10_11, 0, + CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_12_13, 0, + CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_14_15, 0, + CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_16_17, 0, + CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_18_19, 0, + CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_20_21, 0, + CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_22_23, 0, + CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_24_25, 0, + CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_26_27, 0, + CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_28_29, 0, + CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_30_31, 0, + CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_32_33, 0, + CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); + +} + + +bool dpp20_program_shaper( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + enum dc_lut_mode current_mode; + enum dc_lut_mode next_mode; + + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + if (params == NULL) { + REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0); + return false; + } + current_mode = dpp20_get_shaper_current(dpp_base); + + if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) + next_mode = LUT_RAM_B; + else + next_mode = LUT_RAM_A; + + dpp20_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A); + + if (next_mode == LUT_RAM_A) + dpp20_program_shaper_luta_settings(dpp_base, params); + else + dpp20_program_shaper_lutb_settings(dpp_base, params); + + dpp20_program_shaper_lut( + dpp_base, params->rgb_resulted, params->hw_points_num); + + REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2); + + return true; + +} + +static enum dc_lut_mode get3dlut_config( + struct dpp *dpp_base, + bool *is_17x17x17, + bool *is_12bits_color_channel) +{ + uint32_t i_mode, i_enable_10bits, lut_size; + enum dc_lut_mode mode; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_CONFIG_STATUS, &i_mode, + CM_3DLUT_30BIT_EN, &i_enable_10bits); + + switch (i_mode) { + case 0: + mode = LUT_BYPASS; + break; + case 1: + mode = LUT_RAM_A; + break; + case 2: + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + if (i_enable_10bits > 0) + *is_12bits_color_channel = false; + else + *is_12bits_color_channel = true; + + REG_GET(CM_3DLUT_MODE, CM_3DLUT_SIZE, &lut_size); + + if (lut_size == 0) + *is_17x17x17 = true; + else + *is_17x17x17 = false; + + return mode; +} +/* + * select ramA or ramB, or bypass + * select color channel size 10 or 12 bits + * select 3dlut size 17x17x17 or 9x9x9 + */ +static void dpp20_set_3dlut_mode( + struct dpp *dpp_base, + enum dc_lut_mode mode, + bool is_color_channel_12bits, + bool is_lut_size17x17x17) +{ + uint32_t lut_mode; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + if (mode == LUT_BYPASS) + lut_mode = 0; + else if (mode == LUT_RAM_A) + lut_mode = 1; + else + lut_mode = 2; + + REG_UPDATE_2(CM_3DLUT_MODE, + CM_3DLUT_MODE, lut_mode, + CM_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1); +} + +static void dpp20_select_3dlut_ram( + struct dpp *dpp_base, + enum dc_lut_mode mode, + bool is_color_channel_12bits) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_UPDATE_2(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1, + CM_3DLUT_30BIT_EN, + is_color_channel_12bits == true ? 0:1); +} + + + +static void dpp20_set3dlut_ram12( + struct dpp *dpp_base, + const struct dc_rgb *lut, + uint32_t entries) +{ + uint32_t i, red, green, blue, red1, green1, blue1; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + for (i = 0 ; i < entries; i += 2) { + red = lut[i].red<<4; + green = lut[i].green<<4; + blue = lut[i].blue<<4; + red1 = lut[i+1].red<<4; + green1 = lut[i+1].green<<4; + blue1 = lut[i+1].blue<<4; + + REG_SET_2(CM_3DLUT_DATA, 0, + CM_3DLUT_DATA0, red, + CM_3DLUT_DATA1, red1); + + REG_SET_2(CM_3DLUT_DATA, 0, + CM_3DLUT_DATA0, green, + CM_3DLUT_DATA1, green1); + + REG_SET_2(CM_3DLUT_DATA, 0, + CM_3DLUT_DATA0, blue, + CM_3DLUT_DATA1, blue1); + + } +} + +/* + * load selected lut with 10 bits color channels + */ +static void dpp20_set3dlut_ram10( + struct dpp *dpp_base, + const struct dc_rgb *lut, + uint32_t entries) +{ + uint32_t i, red, green, blue, value; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + for (i = 0; i < entries; i++) { + red = lut[i].red; + green = lut[i].green; + blue = lut[i].blue; + + value = (red<<20) | (green<<10) | blue; + + REG_SET(CM_3DLUT_DATA_30BIT, 0, CM_3DLUT_DATA_30BIT, value); + } + +} + + +static void dpp20_select_3dlut_ram_mask( + struct dpp *dpp_base, + uint32_t ram_selection_mask) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_UPDATE(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, + ram_selection_mask); + REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0); +} + +bool dpp20_program_3dlut( + struct dpp *dpp_base, + const struct tetrahedral_params *params) +{ + enum dc_lut_mode mode; + bool is_17x17x17; + bool is_12bits_color_channel; + const struct dc_rgb *lut0; + const struct dc_rgb *lut1; + const struct dc_rgb *lut2; + const struct dc_rgb *lut3; + int lut_size0; + int lut_size; + + if (params == NULL) { + dpp20_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false); + return false; + } + mode = get3dlut_config(dpp_base, &is_17x17x17, &is_12bits_color_channel); + + if (mode == LUT_BYPASS || mode == LUT_RAM_B) + mode = LUT_RAM_A; + else + mode = LUT_RAM_B; + + is_17x17x17 = !params->use_tetrahedral_9; + is_12bits_color_channel = params->use_12bits; + if (is_17x17x17) { + lut0 = params->tetrahedral_17.lut0; + lut1 = params->tetrahedral_17.lut1; + lut2 = params->tetrahedral_17.lut2; + lut3 = params->tetrahedral_17.lut3; + lut_size0 = sizeof(params->tetrahedral_17.lut0)/ + sizeof(params->tetrahedral_17.lut0[0]); + lut_size = sizeof(params->tetrahedral_17.lut1)/ + sizeof(params->tetrahedral_17.lut1[0]); + } else { + lut0 = params->tetrahedral_9.lut0; + lut1 = params->tetrahedral_9.lut1; + lut2 = params->tetrahedral_9.lut2; + lut3 = params->tetrahedral_9.lut3; + lut_size0 = sizeof(params->tetrahedral_9.lut0)/ + sizeof(params->tetrahedral_9.lut0[0]); + lut_size = sizeof(params->tetrahedral_9.lut1)/ + sizeof(params->tetrahedral_9.lut1[0]); + } + + dpp20_select_3dlut_ram(dpp_base, mode, + is_12bits_color_channel); + dpp20_select_3dlut_ram_mask(dpp_base, 0x1); + if (is_12bits_color_channel) + dpp20_set3dlut_ram12(dpp_base, lut0, lut_size0); + else + dpp20_set3dlut_ram10(dpp_base, lut0, lut_size0); + + dpp20_select_3dlut_ram_mask(dpp_base, 0x2); + if (is_12bits_color_channel) + dpp20_set3dlut_ram12(dpp_base, lut1, lut_size); + else + dpp20_set3dlut_ram10(dpp_base, lut1, lut_size); + + dpp20_select_3dlut_ram_mask(dpp_base, 0x4); + if (is_12bits_color_channel) + dpp20_set3dlut_ram12(dpp_base, lut2, lut_size); + else + dpp20_set3dlut_ram10(dpp_base, lut2, lut_size); + + dpp20_select_3dlut_ram_mask(dpp_base, 0x8); + if (is_12bits_color_channel) + dpp20_set3dlut_ram12(dpp_base, lut3, lut_size); + else + dpp20_set3dlut_ram10(dpp_base, lut3, lut_size); + + + dpp20_set_3dlut_mode(dpp_base, mode, is_12bits_color_channel, + is_17x17x17); + + return true; +} + +void dpp2_set_hdr_multiplier( + struct dpp *dpp_base, + uint32_t multiplier) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier); +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt new file mode 100644 index 0000000000..7711cd3c47 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt @@ -0,0 +1,4 @@ +dal3_subdirectory_sources( + dcn201_dpp.c + dcn201_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c new file mode 100644 index 0000000000..345202fee4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c @@ -0,0 +1,313 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "core_types.h" + +#include "reg_helper.h" +#include "dcn201/dcn201_dpp.h" +#include "basics/conversion.h" + +#define REG(reg)\ + dpp->tf_regs->reg + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + +static void dpp201_cnv_setup( + struct dpp *dpp_base, + enum surface_pixel_format format, + enum expansion_mode mode, + struct dc_csc_transform input_csc_color_matrix, + enum dc_color_space input_color_space, + struct cnv_alpha_2bit_lut *alpha_2bit_lut) +{ + struct dcn201_dpp *dpp = TO_DCN201_DPP(dpp_base); + uint32_t pixel_format = 0; + uint32_t alpha_en = 1; + enum dc_color_space color_space = COLOR_SPACE_SRGB; + enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS; + bool force_disable_cursor = false; + uint32_t is_2bit = 0; + + REG_SET_2(FORMAT_CONTROL, 0, + CNVC_BYPASS, 0, + FORMAT_EXPANSION_MODE, mode); + + REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0); + REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0); + REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0); + REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0); + + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + pixel_format = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + pixel_format = 3; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + pixel_format = 8; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + pixel_format = 10; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + force_disable_cursor = false; + pixel_format = 65; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + force_disable_cursor = true; + pixel_format = 64; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + force_disable_cursor = true; + pixel_format = 67; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + force_disable_cursor = true; + pixel_format = 66; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + pixel_format = 22; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + pixel_format = 24; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + pixel_format = 25; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: + pixel_format = 12; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: + pixel_format = 112; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: + pixel_format = 113; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: + pixel_format = 114; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: + pixel_format = 115; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: + pixel_format = 118; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: + pixel_format = 119; + alpha_en = 0; + break; + default: + break; + } + + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + + if (is_2bit == 1 && alpha_2bit_lut != NULL) { + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3); + } + + REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, + CNVC_SURFACE_PIXEL_FORMAT, pixel_format); + REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); + + dpp1_program_input_csc(dpp_base, color_space, select, NULL); + + if (force_disable_cursor) { + REG_UPDATE(CURSOR_CONTROL, + CURSOR_ENABLE, 0); + REG_UPDATE(CURSOR0_CONTROL, + CUR0_ENABLE, 0); + } + dpp2_power_on_obuf(dpp_base, true); +} + +#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19)) + +static bool dpp201_get_optimal_number_of_taps( + struct dpp *dpp, + struct scaler_data *scl_data, + const struct scaling_taps *in_taps) +{ + if (scl_data->viewport.width != scl_data->h_active && + scl_data->viewport.height != scl_data->v_active && + dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && + scl_data->format == PIXEL_FORMAT_FP16) + return false; + + if (scl_data->viewport.width > scl_data->h_active && + dpp->ctx->dc->debug.max_downscale_src_width != 0 && + scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) + return false; + + if (scl_data->ratios.horz.value == (8ll << 32)) + scl_data->ratios.horz.value--; + if (scl_data->ratios.vert.value == (8ll << 32)) + scl_data->ratios.vert.value--; + if (scl_data->ratios.horz_c.value == (8ll << 32)) + scl_data->ratios.horz_c.value--; + if (scl_data->ratios.vert_c.value == (8ll << 32)) + scl_data->ratios.vert_c.value--; + + if (in_taps->h_taps == 0) { + if (dc_fixpt_ceil(scl_data->ratios.horz) > 4) + scl_data->taps.h_taps = 8; + else + scl_data->taps.h_taps = 4; + } else + scl_data->taps.h_taps = in_taps->h_taps; + + if (in_taps->v_taps == 0) { + if (dc_fixpt_ceil(scl_data->ratios.vert) > 4) + scl_data->taps.v_taps = 8; + else + scl_data->taps.v_taps = 4; + } else + scl_data->taps.v_taps = in_taps->v_taps; + if (in_taps->v_taps_c == 0) { + if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4) + scl_data->taps.v_taps_c = 4; + else + scl_data->taps.v_taps_c = 2; + } else + scl_data->taps.v_taps_c = in_taps->v_taps_c; + if (in_taps->h_taps_c == 0) { + if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4) + scl_data->taps.h_taps_c = 4; + else + scl_data->taps.h_taps_c = 2; + } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) + scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; + else + scl_data->taps.h_taps_c = in_taps->h_taps_c; + + if (!dpp->ctx->dc->debug.always_scale) { + if (IDENTITY_RATIO(scl_data->ratios.horz)) + scl_data->taps.h_taps = 1; + if (IDENTITY_RATIO(scl_data->ratios.vert)) + scl_data->taps.v_taps = 1; + if (IDENTITY_RATIO(scl_data->ratios.horz_c)) + scl_data->taps.h_taps_c = 1; + if (IDENTITY_RATIO(scl_data->ratios.vert_c)) + scl_data->taps.v_taps_c = 1; + } + + return true; +} + +static struct dpp_funcs dcn201_dpp_funcs = { + .dpp_read_state = dpp20_read_state, + .dpp_reset = dpp_reset, + .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, + .dpp_get_optimal_number_of_taps = dpp201_get_optimal_number_of_taps, + .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, + .dpp_set_csc_adjustment = NULL, + .dpp_set_csc_default = NULL, + .dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl, + .dpp_set_degamma = dpp2_set_degamma, + .dpp_program_input_lut = dpp2_dummy_program_input_lut, + .dpp_full_bypass = dpp1_full_bypass, + .dpp_setup = dpp201_cnv_setup, + .dpp_program_degamma_pwl = dpp2_set_degamma_pwl, + .dpp_program_blnd_lut = dpp20_program_blnd_lut, + .dpp_program_shaper_lut = dpp20_program_shaper, + .dpp_program_3dlut = dpp20_program_3dlut, + .dpp_program_bias_and_scale = NULL, + .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, + .set_cursor_attributes = dpp2_set_cursor_attributes, + .set_cursor_position = dpp1_set_cursor_position, + .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, + .dpp_dppclk_control = dpp1_dppclk_control, + .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap, +}; + +static struct dpp_caps dcn201_dpp_cap = { + .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, + .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions, +}; + +bool dpp201_construct( + struct dcn201_dpp *dpp, + struct dc_context *ctx, + uint32_t inst, + const struct dcn201_dpp_registers *tf_regs, + const struct dcn201_dpp_shift *tf_shift, + const struct dcn201_dpp_mask *tf_mask) +{ + dpp->base.ctx = ctx; + + dpp->base.inst = inst; + dpp->base.funcs = &dcn201_dpp_funcs; + dpp->base.caps = &dcn201_dpp_cap; + + dpp->tf_regs = tf_regs; + dpp->tf_shift = tf_shift; + dpp->tf_mask = tf_mask; + + dpp->lb_pixel_depth_supported = + LB_PIXEL_DEPTH_18BPP | + LB_PIXEL_DEPTH_24BPP | + LB_PIXEL_DEPTH_30BPP; + + dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY; + dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h new file mode 100644 index 0000000000..cbd5b47b4a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h @@ -0,0 +1,83 @@ +/* Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN201_DPP_H__ +#define __DCN201_DPP_H__ + +#include "dcn20/dcn20_dpp.h" + +#define TO_DCN201_DPP(dpp)\ + container_of(dpp, struct dcn201_dpp, base) + +#define TF_REG_LIST_DCN201(id) \ + TF_REG_LIST_DCN20(id) + +#define TF_REG_LIST_SH_MASK_DCN201(mask_sh)\ + TF_REG_LIST_SH_MASK_DCN20(mask_sh) + +#define TF_REG_FIELD_LIST_DCN201(type) \ + TF_REG_FIELD_LIST_DCN2_0(type) + +struct dcn201_dpp_shift { + TF_REG_FIELD_LIST_DCN201(uint8_t); +}; + +struct dcn201_dpp_mask { + TF_REG_FIELD_LIST_DCN201(uint32_t); +}; + +#define DPP_DCN201_REG_VARIABLE_LIST \ + DPP_DCN2_REG_VARIABLE_LIST + +struct dcn201_dpp_registers { + DPP_DCN201_REG_VARIABLE_LIST; +}; + +struct dcn201_dpp { + struct dpp base; + + const struct dcn201_dpp_registers *tf_regs; + const struct dcn201_dpp_shift *tf_shift; + const struct dcn201_dpp_mask *tf_mask; + + const uint16_t *filter_v; + const uint16_t *filter_h; + const uint16_t *filter_v_c; + const uint16_t *filter_h_c; + int lb_pixel_depth_supported; + int lb_memory_size; + int lb_bits_per_entry; + bool is_write_to_ram_a_safe; + struct scaler_data scl_data; + struct pwl_params pwl_data; +}; + +bool dpp201_construct(struct dcn201_dpp *dpp2, + struct dc_context *ctx, + uint32_t inst, + const struct dcn201_dpp_registers *tf_regs, + const struct dcn201_dpp_shift *tf_shift, + const struct dcn201_dpp_mask *tf_mask); + +#endif /* __DC_HWSS_DCN201_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt new file mode 100644 index 0000000000..0faee2a1e3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt @@ -0,0 +1,5 @@ +dal3_subdirectory_sources( + dcn30_dpp.c + dcn30_dpp_cm.c + dcn30_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c new file mode 100644 index 0000000000..f8c0cee340 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -0,0 +1,1531 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "core_types.h" +#include "reg_helper.h" +#include "dcn30/dcn30_dpp.h" +#include "basics/conversion.h" +#include "dcn30/dcn30_cm_common.h" + +#define REG(reg)\ + dpp->tf_regs->reg + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + + +void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + uint32_t gamcor_lut_mode, rgam_lut_mode; + + REG_GET(DPP_CONTROL, + DPP_CLOCK_ENABLE, &s->is_enabled); + + // Pre-degamma (ROM) + REG_GET_2(PRE_DEGAM, + PRE_DEGAM_MODE, &s->pre_dgam_mode, + PRE_DEGAM_SELECT, &s->pre_dgam_select); + + // Gamma Correction (RAM) + REG_GET(CM_GAMCOR_CONTROL, + CM_GAMCOR_MODE_CURRENT, &s->gamcor_mode); + if (s->gamcor_mode) { + REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &gamcor_lut_mode); + if (!gamcor_lut_mode) + s->gamcor_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B + } + + // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size) + REG_GET(CM_SHAPER_CONTROL, + CM_SHAPER_LUT_MODE, &s->shaper_lut_mode); + REG_GET(CM_3DLUT_MODE, + CM_3DLUT_MODE_CURRENT, &s->lut3d_mode); + REG_GET(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth); + REG_GET(CM_3DLUT_MODE, + CM_3DLUT_SIZE, &s->lut3d_size); + + // Blend/Out Gamma (RAM) + REG_GET(CM_BLNDGAM_CONTROL, + CM_BLNDGAM_MODE_CURRENT, &s->rgam_lut_mode); + if (s->rgam_lut_mode){ + REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &rgam_lut_mode); + if (!rgam_lut_mode) + s->rgam_lut_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B + } +} + +/*program post scaler scs block in dpp CM*/ +void dpp3_program_post_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn10_input_csc_select input_select, + const struct out_csc_color_matrix *tbl_entry) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + int i; + int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); + const uint16_t *regval = NULL; + uint32_t cur_select = 0; + enum dcn10_input_csc_select select; + struct color_matrices_reg gam_regs; + + if (input_select == INPUT_CSC_SELECT_BYPASS) { + REG_SET(CM_POST_CSC_CONTROL, 0, CM_POST_CSC_MODE, 0); + return; + } + + if (tbl_entry == NULL) { + for (i = 0; i < arr_size; i++) + if (dpp_input_csc_matrix[i].color_space == color_space) { + regval = dpp_input_csc_matrix[i].regval; + break; + } + + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + } else { + regval = tbl_entry->regval; + } + + /* determine which CSC matrix (icsc or coma) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + REG_GET(CM_POST_CSC_CONTROL, + CM_POST_CSC_MODE_CURRENT, &cur_select); + + if (cur_select != INPUT_CSC_SELECT_ICSC) + select = INPUT_CSC_SELECT_ICSC; + else + select = INPUT_CSC_SELECT_COMA; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_POST_CSC_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_POST_CSC_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_POST_CSC_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_POST_CSC_C12; + + if (select == INPUT_CSC_SELECT_ICSC) { + + gam_regs.csc_c11_c12 = REG(CM_POST_CSC_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_POST_CSC_C33_C34); + + } else { + + gam_regs.csc_c11_c12 = REG(CM_POST_CSC_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_POST_CSC_B_C33_C34); + + } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + REG_SET(CM_POST_CSC_CONTROL, 0, + CM_POST_CSC_MODE, select); +} + + +/*CNVC degam unit has read only LUTs*/ +void dpp3_set_pre_degam(struct dpp *dpp_base, enum dc_transfer_func_predefined tr) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + int pre_degam_en = 1; + int degamma_lut_selection = 0; + + switch (tr) { + case TRANSFER_FUNCTION_LINEAR: + case TRANSFER_FUNCTION_UNITY: + pre_degam_en = 0; //bypass + break; + case TRANSFER_FUNCTION_SRGB: + degamma_lut_selection = 0; + break; + case TRANSFER_FUNCTION_BT709: + degamma_lut_selection = 4; + break; + case TRANSFER_FUNCTION_PQ: + degamma_lut_selection = 5; + break; + case TRANSFER_FUNCTION_HLG: + degamma_lut_selection = 6; + break; + case TRANSFER_FUNCTION_GAMMA22: + degamma_lut_selection = 1; + break; + case TRANSFER_FUNCTION_GAMMA24: + degamma_lut_selection = 2; + break; + case TRANSFER_FUNCTION_GAMMA26: + degamma_lut_selection = 3; + break; + default: + pre_degam_en = 0; + break; + } + + REG_SET_2(PRE_DEGAM, 0, + PRE_DEGAM_MODE, pre_degam_en, + PRE_DEGAM_SELECT, degamma_lut_selection); +} + +void dpp3_cnv_setup ( + struct dpp *dpp_base, + enum surface_pixel_format format, + enum expansion_mode mode, + struct dc_csc_transform input_csc_color_matrix, + enum dc_color_space input_color_space, + struct cnv_alpha_2bit_lut *alpha_2bit_lut) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + uint32_t pixel_format = 0; + uint32_t alpha_en = 1; + enum dc_color_space color_space = COLOR_SPACE_SRGB; + enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS; + bool force_disable_cursor = false; + uint32_t is_2bit = 0; + uint32_t alpha_plane_enable = 0; + uint32_t dealpha_en = 0, dealpha_ablnd_en = 0; + uint32_t realpha_en = 0, realpha_ablnd_en = 0; + uint32_t program_prealpha_dealpha = 0; + struct out_csc_color_matrix tbl_entry; + int i; + + REG_SET_2(FORMAT_CONTROL, 0, + CNVC_BYPASS, 0, + FORMAT_EXPANSION_MODE, mode); + + REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0); + REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0); + REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0); + REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0); + + REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_R, 0); + REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_G, 1); + REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_B, 2); + + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + pixel_format = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + pixel_format = 3; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + pixel_format = 8; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + pixel_format = 10; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + force_disable_cursor = false; + pixel_format = 65; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + force_disable_cursor = true; + pixel_format = 64; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + force_disable_cursor = true; + pixel_format = 67; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + force_disable_cursor = true; + pixel_format = 66; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + pixel_format = 26; /* ARGB16161616_UNORM */ + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + pixel_format = 24; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + pixel_format = 25; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: + pixel_format = 12; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: + pixel_format = 112; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: + pixel_format = 113; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: + pixel_format = 114; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: + pixel_format = 115; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGBE: + pixel_format = 116; + alpha_plane_enable = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: + pixel_format = 116; + alpha_plane_enable = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: + pixel_format = 118; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: + pixel_format = 119; + alpha_en = 0; + break; + default: + break; + } + + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + + if (is_2bit == 1 && alpha_2bit_lut != NULL) { + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3); + } + + REG_SET_2(CNVC_SURFACE_PIXEL_FORMAT, 0, + CNVC_SURFACE_PIXEL_FORMAT, pixel_format, + CNVC_ALPHA_PLANE_ENABLE, alpha_plane_enable); + REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); + + if (program_prealpha_dealpha) { + dealpha_en = 1; + realpha_en = 1; + } + REG_SET_2(PRE_DEALPHA, 0, + PRE_DEALPHA_EN, dealpha_en, + PRE_DEALPHA_ABLND_EN, dealpha_ablnd_en); + REG_SET_2(PRE_REALPHA, 0, + PRE_REALPHA_EN, realpha_en, + PRE_REALPHA_ABLND_EN, realpha_ablnd_en); + + /* If input adjustment exists, program the ICSC with those values. */ + if (input_csc_color_matrix.enable_adjustment == true) { + for (i = 0; i < 12; i++) + tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; + + tbl_entry.color_space = input_color_space; + + if (color_space >= COLOR_SPACE_YCBCR601) + select = INPUT_CSC_SELECT_ICSC; + else + select = INPUT_CSC_SELECT_BYPASS; + + dpp3_program_post_csc(dpp_base, color_space, select, + &tbl_entry); + } else { + dpp3_program_post_csc(dpp_base, color_space, select, NULL); + } + + if (force_disable_cursor) { + REG_UPDATE(CURSOR_CONTROL, + CURSOR_ENABLE, 0); + REG_UPDATE(CURSOR0_CONTROL, + CUR0_ENABLE, 0); + } +} + +#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19)) + +void dpp3_set_cursor_attributes( + struct dpp *dpp_base, + struct dc_cursor_attributes *cursor_attributes) +{ + enum dc_cursor_color_format color_format = cursor_attributes->color_format; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + int cur_rom_en = 0; + + if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || + color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } + } + + REG_UPDATE_3(CURSOR0_CONTROL, + CUR0_MODE, color_format, + CUR0_EXPANSION_MODE, 0, + CUR0_ROM_EN, cur_rom_en); + + if (color_format == CURSOR_MODE_MONO) { + /* todo: clarify what to program these to */ + REG_UPDATE(CURSOR0_COLOR0, + CUR0_COLOR0, 0x00000000); + REG_UPDATE(CURSOR0_COLOR1, + CUR0_COLOR1, 0xFFFFFFFF); + } + + dpp_base->att.cur0_ctl.bits.expansion_mode = 0; + dpp_base->att.cur0_ctl.bits.cur0_rom_en = cur_rom_en; + dpp_base->att.cur0_ctl.bits.mode = color_format; +} + + +bool dpp3_get_optimal_number_of_taps( + struct dpp *dpp, + struct scaler_data *scl_data, + const struct scaling_taps *in_taps) +{ + int num_part_y, num_part_c; + int max_taps_y, max_taps_c; + int min_taps_y, min_taps_c; + enum lb_memory_config lb_config; + + if (scl_data->viewport.width > scl_data->h_active && + dpp->ctx->dc->debug.max_downscale_src_width != 0 && + scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) + return false; + + /* + * Set default taps if none are provided + * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling + * taps = 4 for upscaling + */ + if (in_taps->h_taps == 0) { + if (dc_fixpt_ceil(scl_data->ratios.horz) > 1) + scl_data->taps.h_taps = min(2 * dc_fixpt_ceil(scl_data->ratios.horz), 8); + else + scl_data->taps.h_taps = 4; + } else + scl_data->taps.h_taps = in_taps->h_taps; + if (in_taps->v_taps == 0) { + if (dc_fixpt_ceil(scl_data->ratios.vert) > 1) + scl_data->taps.v_taps = min(dc_fixpt_ceil(dc_fixpt_mul_int(scl_data->ratios.vert, 2)), 8); + else + scl_data->taps.v_taps = 4; + } else + scl_data->taps.v_taps = in_taps->v_taps; + if (in_taps->v_taps_c == 0) { + if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 1) + scl_data->taps.v_taps_c = min(dc_fixpt_ceil(dc_fixpt_mul_int(scl_data->ratios.vert_c, 2)), 8); + else + scl_data->taps.v_taps_c = 4; + } else + scl_data->taps.v_taps_c = in_taps->v_taps_c; + if (in_taps->h_taps_c == 0) { + if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 1) + scl_data->taps.h_taps_c = min(2 * dc_fixpt_ceil(scl_data->ratios.horz_c), 8); + else + scl_data->taps.h_taps_c = 4; + } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) + /* Only 1 and even h_taps_c are supported by hw */ + scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; + else + scl_data->taps.h_taps_c = in_taps->h_taps_c; + + /*Ensure we can support the requested number of vtaps*/ + min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert); + min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c); + + /* Use LB_MEMORY_CONFIG_3 for 4:2:0 */ + if ((scl_data->format == PIXEL_FORMAT_420BPP8) || (scl_data->format == PIXEL_FORMAT_420BPP10)) + lb_config = LB_MEMORY_CONFIG_3; + else + lb_config = LB_MEMORY_CONFIG_0; + + dpp->caps->dscl_calc_lb_num_partitions( + scl_data, lb_config, &num_part_y, &num_part_c); + + /* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */ + if (dc_fixpt_ceil(scl_data->ratios.vert) > 2) + max_taps_y = num_part_y - (dc_fixpt_ceil(scl_data->ratios.vert) - 2); + else + max_taps_y = num_part_y; + + if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 2) + max_taps_c = num_part_c - (dc_fixpt_ceil(scl_data->ratios.vert_c) - 2); + else + max_taps_c = num_part_c; + + if (max_taps_y < min_taps_y) + return false; + else if (max_taps_c < min_taps_c) + return false; + + if (scl_data->taps.v_taps > max_taps_y) + scl_data->taps.v_taps = max_taps_y; + + if (scl_data->taps.v_taps_c > max_taps_c) + scl_data->taps.v_taps_c = max_taps_c; + + if (!dpp->ctx->dc->debug.always_scale) { + if (IDENTITY_RATIO(scl_data->ratios.horz)) + scl_data->taps.h_taps = 1; + if (IDENTITY_RATIO(scl_data->ratios.vert)) + scl_data->taps.v_taps = 1; + if (IDENTITY_RATIO(scl_data->ratios.horz_c)) + scl_data->taps.h_taps_c = 1; + if (IDENTITY_RATIO(scl_data->ratios.vert_c)) + scl_data->taps.v_taps_c = 1; + } + + return true; +} + +static void dpp3_deferred_update(struct dpp *dpp_base) +{ + int bypass_state; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (dpp_base->deferred_reg_writes.bits.disable_dscl) { + REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3); + dpp_base->deferred_reg_writes.bits.disable_dscl = false; + } + + if (dpp_base->deferred_reg_writes.bits.disable_gamcor) { + REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &bypass_state); + if (bypass_state == 0) { // only program if bypass was latched + REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 3); + } else + ASSERT(0); // LUT select was updated again before vupdate + dpp_base->deferred_reg_writes.bits.disable_gamcor = false; + } + + if (dpp_base->deferred_reg_writes.bits.disable_blnd_lut) { + REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &bypass_state); + if (bypass_state == 0) { // only program if bypass was latched + REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 3); + } else + ASSERT(0); // LUT select was updated again before vupdate + dpp_base->deferred_reg_writes.bits.disable_blnd_lut = false; + } + + if (dpp_base->deferred_reg_writes.bits.disable_3dlut) { + REG_GET(CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, &bypass_state); + if (bypass_state == 0) { // only program if bypass was latched + REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 3); + } else + ASSERT(0); // LUT select was updated again before vupdate + dpp_base->deferred_reg_writes.bits.disable_3dlut = false; + } + + if (dpp_base->deferred_reg_writes.bits.disable_shaper) { + REG_GET(CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, &bypass_state); + if (bypass_state == 0) { // only program if bypass was latched + REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 3); + } else + ASSERT(0); // LUT select was updated again before vupdate + dpp_base->deferred_reg_writes.bits.disable_shaper = false; + } +} + +static void dpp3_power_on_blnd_lut( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { + if (power_on) { + REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 0); + REG_WAIT(CM_MEM_PWR_STATUS, BLNDGAM_MEM_PWR_STATE, 0, 1, 5); + } else { + dpp_base->ctx->dc->optimized_required = true; + dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true; + } + } else { + REG_SET(CM_MEM_PWR_CTRL, 0, + BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1); + } +} + +static void dpp3_power_on_hdr3dlut( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { + if (power_on) { + REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 0); + REG_WAIT(CM_MEM_PWR_STATUS2, HDR3DLUT_MEM_PWR_STATE, 0, 1, 5); + } else { + dpp_base->ctx->dc->optimized_required = true; + dpp_base->deferred_reg_writes.bits.disable_3dlut = true; + } + } +} + +static void dpp3_power_on_shaper( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { + if (power_on) { + REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 0); + REG_WAIT(CM_MEM_PWR_STATUS2, SHAPER_MEM_PWR_STATE, 0, 1, 5); + } else { + dpp_base->ctx->dc->optimized_required = true; + dpp_base->deferred_reg_writes.bits.disable_shaper = true; + } + } +} + +static void dpp3_configure_blnd_lut( + struct dpp *dpp_base, + bool is_ram_a) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_UPDATE_2(CM_BLNDGAM_LUT_CONTROL, + CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 7, + CM_BLNDGAM_LUT_HOST_SEL, is_ram_a == true ? 0 : 1); + + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); +} + +static void dpp3_program_blnd_pwl( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num) +{ + uint32_t i; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg; + uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg; + uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg; + + if (is_rgb_equal(rgb, num)) { + for (i = 0 ; i < num; i++) + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); + } else { + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); + REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 4); + for (i = 0 ; i < num; i++) + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); + + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); + REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 2); + for (i = 0 ; i < num; i++) + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_green); + + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); + REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 1); + for (i = 0 ; i < num; i++) + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_blue); + } +} + +static void dcn3_dpp_cm_get_reg_field( + struct dcn3_dpp *dpp, + struct dcn3_xfer_func_reg *reg) +{ + reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + + reg->shifts.field_region_end = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_B; + reg->masks.field_region_end = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_B; + reg->shifts.field_region_end_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; + reg->masks.field_region_end_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; + reg->shifts.field_region_end_base = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; + reg->masks.field_region_end_base = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; + reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; + reg->masks.field_region_linear_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; + reg->shifts.exp_region_start = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_B; + reg->masks.exp_region_start = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_B; + reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; + reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; +} + +/*program blnd lut RAM A*/ +static void dpp3_program_blnd_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + struct dcn3_xfer_func_reg gam_regs; + + dcn3_dpp_cm_get_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMA_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMA_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMA_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMA_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMA_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMA_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMA_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMA_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMA_END_CNTL2_R); + gam_regs.region_start = REG(CM_BLNDGAM_RAMA_REGION_0_1); + gam_regs.region_end = REG(CM_BLNDGAM_RAMA_REGION_32_33); + + cm_helper_program_gamcor_xfer_func(dpp->base.ctx, params, &gam_regs); +} + +/*program blnd lut RAM B*/ +static void dpp3_program_blnd_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + struct dcn3_xfer_func_reg gam_regs; + + dcn3_dpp_cm_get_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMB_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMB_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMB_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMB_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMB_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMB_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMB_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMB_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMB_END_CNTL2_R); + gam_regs.region_start = REG(CM_BLNDGAM_RAMB_REGION_0_1); + gam_regs.region_end = REG(CM_BLNDGAM_RAMB_REGION_32_33); + + cm_helper_program_gamcor_xfer_func(dpp->base.ctx, params, &gam_regs); +} + +static enum dc_lut_mode dpp3_get_blndgam_current(struct dpp *dpp_base) +{ + enum dc_lut_mode mode; + uint32_t mode_current = 0; + uint32_t in_use = 0; + + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &mode_current); + REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &in_use); + + switch (mode_current) { + case 0: + case 1: + mode = LUT_BYPASS; + break; + + case 2: + if (in_use == 0) + mode = LUT_RAM_A; + else + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + + return mode; +} + +static bool dpp3_program_blnd_lut(struct dpp *dpp_base, + const struct pwl_params *params) +{ + enum dc_lut_mode current_mode; + enum dc_lut_mode next_mode; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (params == NULL) { + REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_MODE, 0); + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) + dpp3_power_on_blnd_lut(dpp_base, false); + return false; + } + + current_mode = dpp3_get_blndgam_current(dpp_base); + if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_B) + next_mode = LUT_RAM_A; + else + next_mode = LUT_RAM_B; + + dpp3_power_on_blnd_lut(dpp_base, true); + dpp3_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A); + + if (next_mode == LUT_RAM_A) + dpp3_program_blnd_luta_settings(dpp_base, params); + else + dpp3_program_blnd_lutb_settings(dpp_base, params); + + dpp3_program_blnd_pwl( + dpp_base, params->rgb_resulted, params->hw_points_num); + + REG_UPDATE_2(CM_BLNDGAM_CONTROL, + CM_BLNDGAM_MODE, 2, + CM_BLNDGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1); + + return true; +} + + +static void dpp3_program_shaper_lut( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num) +{ + uint32_t i, red, green, blue; + uint32_t red_delta, green_delta, blue_delta; + uint32_t red_value, green_value, blue_value; + + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + for (i = 0 ; i < num; i++) { + + red = rgb[i].red_reg; + green = rgb[i].green_reg; + blue = rgb[i].blue_reg; + + red_delta = rgb[i].delta_red_reg; + green_delta = rgb[i].delta_green_reg; + blue_delta = rgb[i].delta_blue_reg; + + red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff); + green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff); + blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff); + + REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, red_value); + REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, green_value); + REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, blue_value); + } + +} + +static enum dc_lut_mode dpp3_get_shaper_current(struct dpp *dpp_base) +{ + enum dc_lut_mode mode; + uint32_t state_mode; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_GET(CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, &state_mode); + + switch (state_mode) { + case 0: + mode = LUT_BYPASS; + break; + case 1: + mode = LUT_RAM_A; + break; + case 2: + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + + return mode; +} + +static void dpp3_configure_shaper_lut( + struct dpp *dpp_base, + bool is_ram_a) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, + CM_SHAPER_LUT_WRITE_EN_MASK, 7); + REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, + CM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1); + REG_SET(CM_SHAPER_LUT_INDEX, 0, CM_SHAPER_LUT_INDEX, 0); +} + +/*program shaper RAM A*/ + +static void dpp3_program_shaper_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + const struct gamma_curve *curve; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_SET_2(CM_SHAPER_RAMA_START_CNTL_B, 0, + CM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); + REG_SET_2(CM_SHAPER_RAMA_START_CNTL_G, 0, + CM_SHAPER_RAMA_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, 0); + REG_SET_2(CM_SHAPER_RAMA_START_CNTL_R, 0, + CM_SHAPER_RAMA_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, 0); + + REG_SET_2(CM_SHAPER_RAMA_END_CNTL_B, 0, + CM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMA_END_CNTL_G, 0, + CM_SHAPER_RAMA_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMA_END_CNTL_R, 0, + CM_SHAPER_RAMA_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); + + curve = params->arr_curve_points; + REG_SET_4(CM_SHAPER_RAMA_REGION_0_1, 0, + CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_2_3, 0, + CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_4_5, 0, + CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_6_7, 0, + CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_8_9, 0, + CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_10_11, 0, + CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_12_13, 0, + CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_14_15, 0, + CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_16_17, 0, + CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_18_19, 0, + CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_20_21, 0, + CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_22_23, 0, + CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_24_25, 0, + CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_26_27, 0, + CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_28_29, 0, + CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_30_31, 0, + CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_32_33, 0, + CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); +} + +/*program shaper RAM B*/ +static void dpp3_program_shaper_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + const struct gamma_curve *curve; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_SET_2(CM_SHAPER_RAMB_START_CNTL_B, 0, + CM_SHAPER_RAMB_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, 0); + REG_SET_2(CM_SHAPER_RAMB_START_CNTL_G, 0, + CM_SHAPER_RAMB_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, 0); + REG_SET_2(CM_SHAPER_RAMB_START_CNTL_R, 0, + CM_SHAPER_RAMB_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, 0); + + REG_SET_2(CM_SHAPER_RAMB_END_CNTL_B, 0, + CM_SHAPER_RAMB_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMB_END_CNTL_G, 0, + CM_SHAPER_RAMB_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMB_END_CNTL_R, 0, + CM_SHAPER_RAMB_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); + + curve = params->arr_curve_points; + REG_SET_4(CM_SHAPER_RAMB_REGION_0_1, 0, + CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_2_3, 0, + CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_4_5, 0, + CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_6_7, 0, + CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_8_9, 0, + CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_10_11, 0, + CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_12_13, 0, + CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_14_15, 0, + CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_16_17, 0, + CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_18_19, 0, + CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_20_21, 0, + CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_22_23, 0, + CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_24_25, 0, + CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_26_27, 0, + CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_28_29, 0, + CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_30_31, 0, + CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_32_33, 0, + CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); + +} + + +static bool dpp3_program_shaper(struct dpp *dpp_base, + const struct pwl_params *params) +{ + enum dc_lut_mode current_mode; + enum dc_lut_mode next_mode; + + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (params == NULL) { + REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0); + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) + dpp3_power_on_shaper(dpp_base, false); + return false; + } + + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) + dpp3_power_on_shaper(dpp_base, true); + + current_mode = dpp3_get_shaper_current(dpp_base); + + if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) + next_mode = LUT_RAM_B; + else + next_mode = LUT_RAM_A; + + dpp3_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A); + + if (next_mode == LUT_RAM_A) + dpp3_program_shaper_luta_settings(dpp_base, params); + else + dpp3_program_shaper_lutb_settings(dpp_base, params); + + dpp3_program_shaper_lut( + dpp_base, params->rgb_resulted, params->hw_points_num); + + REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2); + + return true; + +} + +static enum dc_lut_mode get3dlut_config( + struct dpp *dpp_base, + bool *is_17x17x17, + bool *is_12bits_color_channel) +{ + uint32_t i_mode, i_enable_10bits, lut_size; + enum dc_lut_mode mode; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_GET(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_30BIT_EN, &i_enable_10bits); + REG_GET(CM_3DLUT_MODE, + CM_3DLUT_MODE_CURRENT, &i_mode); + + switch (i_mode) { + case 0: + mode = LUT_BYPASS; + break; + case 1: + mode = LUT_RAM_A; + break; + case 2: + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + if (i_enable_10bits > 0) + *is_12bits_color_channel = false; + else + *is_12bits_color_channel = true; + + REG_GET(CM_3DLUT_MODE, CM_3DLUT_SIZE, &lut_size); + + if (lut_size == 0) + *is_17x17x17 = true; + else + *is_17x17x17 = false; + + return mode; +} +/* + * select ramA or ramB, or bypass + * select color channel size 10 or 12 bits + * select 3dlut size 17x17x17 or 9x9x9 + */ +static void dpp3_set_3dlut_mode( + struct dpp *dpp_base, + enum dc_lut_mode mode, + bool is_color_channel_12bits, + bool is_lut_size17x17x17) +{ + uint32_t lut_mode; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (mode == LUT_BYPASS) + lut_mode = 0; + else if (mode == LUT_RAM_A) + lut_mode = 1; + else + lut_mode = 2; + + REG_UPDATE_2(CM_3DLUT_MODE, + CM_3DLUT_MODE, lut_mode, + CM_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1); +} + +static void dpp3_select_3dlut_ram( + struct dpp *dpp_base, + enum dc_lut_mode mode, + bool is_color_channel_12bits) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_UPDATE_2(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1, + CM_3DLUT_30BIT_EN, + is_color_channel_12bits == true ? 0:1); +} + + + +static void dpp3_set3dlut_ram12( + struct dpp *dpp_base, + const struct dc_rgb *lut, + uint32_t entries) +{ + uint32_t i, red, green, blue, red1, green1, blue1; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + for (i = 0 ; i < entries; i += 2) { + red = lut[i].red<<4; + green = lut[i].green<<4; + blue = lut[i].blue<<4; + red1 = lut[i+1].red<<4; + green1 = lut[i+1].green<<4; + blue1 = lut[i+1].blue<<4; + + REG_SET_2(CM_3DLUT_DATA, 0, + CM_3DLUT_DATA0, red, + CM_3DLUT_DATA1, red1); + + REG_SET_2(CM_3DLUT_DATA, 0, + CM_3DLUT_DATA0, green, + CM_3DLUT_DATA1, green1); + + REG_SET_2(CM_3DLUT_DATA, 0, + CM_3DLUT_DATA0, blue, + CM_3DLUT_DATA1, blue1); + + } +} + +/* + * load selected lut with 10 bits color channels + */ +static void dpp3_set3dlut_ram10( + struct dpp *dpp_base, + const struct dc_rgb *lut, + uint32_t entries) +{ + uint32_t i, red, green, blue, value; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + for (i = 0; i < entries; i++) { + red = lut[i].red; + green = lut[i].green; + blue = lut[i].blue; + + value = (red<<20) | (green<<10) | blue; + + REG_SET(CM_3DLUT_DATA_30BIT, 0, CM_3DLUT_DATA_30BIT, value); + } + +} + + +static void dpp3_select_3dlut_ram_mask( + struct dpp *dpp_base, + uint32_t ram_selection_mask) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_UPDATE(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, + ram_selection_mask); + REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0); +} + +static bool dpp3_program_3dlut(struct dpp *dpp_base, + const struct tetrahedral_params *params) +{ + enum dc_lut_mode mode; + bool is_17x17x17; + bool is_12bits_color_channel; + const struct dc_rgb *lut0; + const struct dc_rgb *lut1; + const struct dc_rgb *lut2; + const struct dc_rgb *lut3; + int lut_size0; + int lut_size; + + if (params == NULL) { + dpp3_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false); + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) + dpp3_power_on_hdr3dlut(dpp_base, false); + return false; + } + + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) + dpp3_power_on_hdr3dlut(dpp_base, true); + + mode = get3dlut_config(dpp_base, &is_17x17x17, &is_12bits_color_channel); + + if (mode == LUT_BYPASS || mode == LUT_RAM_B) + mode = LUT_RAM_A; + else + mode = LUT_RAM_B; + + is_17x17x17 = !params->use_tetrahedral_9; + is_12bits_color_channel = params->use_12bits; + if (is_17x17x17) { + lut0 = params->tetrahedral_17.lut0; + lut1 = params->tetrahedral_17.lut1; + lut2 = params->tetrahedral_17.lut2; + lut3 = params->tetrahedral_17.lut3; + lut_size0 = sizeof(params->tetrahedral_17.lut0)/ + sizeof(params->tetrahedral_17.lut0[0]); + lut_size = sizeof(params->tetrahedral_17.lut1)/ + sizeof(params->tetrahedral_17.lut1[0]); + } else { + lut0 = params->tetrahedral_9.lut0; + lut1 = params->tetrahedral_9.lut1; + lut2 = params->tetrahedral_9.lut2; + lut3 = params->tetrahedral_9.lut3; + lut_size0 = sizeof(params->tetrahedral_9.lut0)/ + sizeof(params->tetrahedral_9.lut0[0]); + lut_size = sizeof(params->tetrahedral_9.lut1)/ + sizeof(params->tetrahedral_9.lut1[0]); + } + + dpp3_select_3dlut_ram(dpp_base, mode, + is_12bits_color_channel); + dpp3_select_3dlut_ram_mask(dpp_base, 0x1); + if (is_12bits_color_channel) + dpp3_set3dlut_ram12(dpp_base, lut0, lut_size0); + else + dpp3_set3dlut_ram10(dpp_base, lut0, lut_size0); + + dpp3_select_3dlut_ram_mask(dpp_base, 0x2); + if (is_12bits_color_channel) + dpp3_set3dlut_ram12(dpp_base, lut1, lut_size); + else + dpp3_set3dlut_ram10(dpp_base, lut1, lut_size); + + dpp3_select_3dlut_ram_mask(dpp_base, 0x4); + if (is_12bits_color_channel) + dpp3_set3dlut_ram12(dpp_base, lut2, lut_size); + else + dpp3_set3dlut_ram10(dpp_base, lut2, lut_size); + + dpp3_select_3dlut_ram_mask(dpp_base, 0x8); + if (is_12bits_color_channel) + dpp3_set3dlut_ram12(dpp_base, lut3, lut_size); + else + dpp3_set3dlut_ram10(dpp_base, lut3, lut_size); + + + dpp3_set_3dlut_mode(dpp_base, mode, is_12bits_color_channel, + is_17x17x17); + + return true; +} +static struct dpp_funcs dcn30_dpp_funcs = { + .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, + .dpp_read_state = dpp30_read_state, + .dpp_reset = dpp_reset, + .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, + .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, + .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap, + .dpp_set_csc_adjustment = NULL, + .dpp_set_csc_default = NULL, + .dpp_program_regamma_pwl = NULL, + .dpp_set_pre_degam = dpp3_set_pre_degam, + .dpp_program_input_lut = NULL, + .dpp_full_bypass = dpp1_full_bypass, + .dpp_setup = dpp3_cnv_setup, + .dpp_program_degamma_pwl = NULL, + .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, + .dpp_program_cm_bias = dpp3_program_cm_bias, + .dpp_program_blnd_lut = dpp3_program_blnd_lut, + .dpp_program_shaper_lut = dpp3_program_shaper, + .dpp_program_3dlut = dpp3_program_3dlut, + .dpp_deferred_update = dpp3_deferred_update, + .dpp_program_bias_and_scale = NULL, + .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, + .set_cursor_attributes = dpp3_set_cursor_attributes, + .set_cursor_position = dpp1_set_cursor_position, + .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, + .dpp_dppclk_control = dpp1_dppclk_control, + .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, +}; + + +static struct dpp_caps dcn30_dpp_cap = { + .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, + .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions, +}; + +bool dpp3_construct( + struct dcn3_dpp *dpp, + struct dc_context *ctx, + uint32_t inst, + const struct dcn3_dpp_registers *tf_regs, + const struct dcn3_dpp_shift *tf_shift, + const struct dcn3_dpp_mask *tf_mask) +{ + dpp->base.ctx = ctx; + + dpp->base.inst = inst; + dpp->base.funcs = &dcn30_dpp_funcs; + dpp->base.caps = &dcn30_dpp_cap; + + dpp->tf_regs = tf_regs; + dpp->tf_shift = tf_shift; + dpp->tf_mask = tf_mask; + + return true; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h new file mode 100644 index 0000000000..269f437c16 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h @@ -0,0 +1,646 @@ +/* Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN30_DPP_H__ +#define __DCN30_DPP_H__ + +#include "dcn20/dcn20_dpp.h" + +#define TO_DCN30_DPP(dpp)\ + container_of(dpp, struct dcn3_dpp, base) + +#define DPP_REG_LIST_DCN30_COMMON(id)\ + SRI(CM_DEALPHA, CM, id),\ + SRI(CM_MEM_PWR_STATUS, CM, id),\ + SRI(CM_BIAS_CR_R, CM, id),\ + SRI(CM_BIAS_Y_G_CB_B, CM, id),\ + SRI(PRE_DEGAM, CNVC_CFG, id),\ + SRI(CM_GAMCOR_CONTROL, CM, id),\ + SRI(CM_GAMCOR_LUT_CONTROL, CM, id),\ + SRI(CM_GAMCOR_LUT_INDEX, CM, id),\ + SRI(CM_GAMCOR_LUT_INDEX, CM, id),\ + SRI(CM_GAMCOR_LUT_DATA, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_CNTL_B, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_CNTL_G, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_CNTL_R, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R, CM, id),\ + SRI(CM_GAMCOR_RAMB_END_CNTL1_B, CM, id),\ + SRI(CM_GAMCOR_RAMB_END_CNTL2_B, CM, id),\ + SRI(CM_GAMCOR_RAMB_END_CNTL1_G, CM, id),\ + SRI(CM_GAMCOR_RAMB_END_CNTL2_G, CM, id),\ + SRI(CM_GAMCOR_RAMB_END_CNTL1_R, CM, id),\ + SRI(CM_GAMCOR_RAMB_END_CNTL2_R, CM, id),\ + SRI(CM_GAMCOR_RAMB_REGION_0_1, CM, id),\ + SRI(CM_GAMCOR_RAMB_REGION_32_33, CM, id),\ + SRI(CM_GAMCOR_RAMB_OFFSET_B, CM, id),\ + SRI(CM_GAMCOR_RAMB_OFFSET_G, CM, id),\ + SRI(CM_GAMCOR_RAMB_OFFSET_R, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_B, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_G, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_R, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_CNTL_B, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_CNTL_G, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_CNTL_R, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R, CM, id),\ + SRI(CM_GAMCOR_RAMA_END_CNTL1_B, CM, id),\ + SRI(CM_GAMCOR_RAMA_END_CNTL2_B, CM, id),\ + SRI(CM_GAMCOR_RAMA_END_CNTL1_G, CM, id),\ + SRI(CM_GAMCOR_RAMA_END_CNTL2_G, CM, id),\ + SRI(CM_GAMCOR_RAMA_END_CNTL1_R, CM, id),\ + SRI(CM_GAMCOR_RAMA_END_CNTL2_R, CM, id),\ + SRI(CM_GAMCOR_RAMA_REGION_0_1, CM, id),\ + SRI(CM_GAMCOR_RAMA_REGION_32_33, CM, id),\ + SRI(CM_GAMCOR_RAMA_OFFSET_B, CM, id),\ + SRI(CM_GAMCOR_RAMA_OFFSET_G, CM, id),\ + SRI(CM_GAMCOR_RAMA_OFFSET_R, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_G, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_R, CM, id),\ + SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\ + SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\ + SRI(CM_GAMUT_REMAP_C13_C14, CM, id),\ + SRI(CM_GAMUT_REMAP_C21_C22, CM, id),\ + SRI(CM_GAMUT_REMAP_C23_C24, CM, id),\ + SRI(CM_GAMUT_REMAP_C31_C32, CM, id),\ + SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\ + SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \ + SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \ + SRI(OTG_H_BLANK, DSCL, id), \ + SRI(OTG_V_BLANK, DSCL, id), \ + SRI(SCL_MODE, DSCL, id), \ + SRI(LB_DATA_FORMAT, DSCL, id), \ + SRI(LB_MEMORY_CTRL, DSCL, id), \ + SRI(DSCL_AUTOCAL, DSCL, id), \ + SRI(DSCL_CONTROL, DSCL, id), \ + SRI(SCL_TAP_CONTROL, DSCL, id), \ + SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \ + SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \ + SRI(DSCL_2TAP_CONTROL, DSCL, id), \ + SRI(MPC_SIZE, DSCL, id), \ + SRI(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \ + SRI(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \ + SRI(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \ + SRI(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \ + SRI(SCL_HORZ_FILTER_INIT, DSCL, id), \ + SRI(SCL_HORZ_FILTER_INIT_C, DSCL, id), \ + SRI(SCL_VERT_FILTER_INIT, DSCL, id), \ + SRI(SCL_VERT_FILTER_INIT_C, DSCL, id), \ + SRI(RECOUT_START, DSCL, id), \ + SRI(RECOUT_SIZE, DSCL, id), \ + SRI(PRE_DEALPHA, CNVC_CFG, id), \ + SRI(PRE_REALPHA, CNVC_CFG, id), \ + SRI(PRE_CSC_MODE, CNVC_CFG, id), \ + SRI(PRE_CSC_C11_C12, CNVC_CFG, id), \ + SRI(PRE_CSC_C33_C34, CNVC_CFG, id), \ + SRI(PRE_CSC_B_C11_C12, CNVC_CFG, id), \ + SRI(PRE_CSC_B_C33_C34, CNVC_CFG, id), \ + SRI(CM_POST_CSC_CONTROL, CM, id), \ + SRI(CM_POST_CSC_C11_C12, CM, id), \ + SRI(CM_POST_CSC_C33_C34, CM, id), \ + SRI(CM_POST_CSC_B_C11_C12, CM, id), \ + SRI(CM_POST_CSC_B_C33_C34, CM, id), \ + SRI(CM_MEM_PWR_CTRL, CM, id), \ + SRI(CM_CONTROL, CM, id), \ + SRI(CM_TEST_DEBUG_INDEX, CM, id), \ + SRI(CM_TEST_DEBUG_DATA, CM, id), \ + SRI(FORMAT_CONTROL, CNVC_CFG, id), \ + SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ + SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ + SRI(CURSOR0_COLOR0, CNVC_CUR, id), \ + SRI(CURSOR0_COLOR1, CNVC_CUR, id), \ + SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \ + SRI(DPP_CONTROL, DPP_TOP, id), \ + SRI(CM_HDR_MULT_COEF, CM, id), \ + SRI(CURSOR_CONTROL, CURSOR0_, id), \ + SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \ + SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \ + SRI(FCNV_FP_BIAS_G, CNVC_CFG, id), \ + SRI(FCNV_FP_BIAS_B, CNVC_CFG, id), \ + SRI(FCNV_FP_SCALE_R, CNVC_CFG, id), \ + SRI(FCNV_FP_SCALE_G, CNVC_CFG, id), \ + SRI(FCNV_FP_SCALE_B, CNVC_CFG, id), \ + SRI(COLOR_KEYER_CONTROL, CNVC_CFG, id), \ + SRI(COLOR_KEYER_ALPHA, CNVC_CFG, id), \ + SRI(COLOR_KEYER_RED, CNVC_CFG, id), \ + SRI(COLOR_KEYER_GREEN, CNVC_CFG, id), \ + SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \ + SRI(CURSOR_CONTROL, CURSOR0_, id),\ + SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\ + SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \ + SRI(DSCL_MEM_PWR_CTRL, DSCL, id) + +#define DPP_REG_LIST_DCN30(id)\ + DPP_REG_LIST_DCN30_COMMON(id), \ + TF_REG_LIST_DCN20_COMMON(id), \ + SRI(CM_BLNDGAM_CONTROL, CM, id), \ + SRI(CM_SHAPER_LUT_DATA, CM, id),\ + SRI(CM_MEM_PWR_CTRL2, CM, id), \ + SRI(CM_MEM_PWR_STATUS2, CM, id), \ + SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM, id),\ + SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM, id),\ + SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM, id),\ + SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B, CM, id),\ + SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G, CM, id),\ + SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R, CM, id),\ + SRI(CM_BLNDGAM_LUT_CONTROL, CM, id) + + + +#define DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh)\ + TF_SF(CM0_CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, mask_sh),\ + TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_EN, mask_sh),\ + TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_ABLND, mask_sh),\ + TF_SF(CM0_CM_BIAS_CR_R, CM_BIAS_CR_R, mask_sh),\ + TF_SF(CM0_CM_BIAS_Y_G_CB_B, CM_BIAS_Y_G, mask_sh),\ + TF_SF(CM0_CM_BIAS_Y_G_CB_B, CM_BIAS_CB_B, mask_sh),\ + TF_SF(CM0_CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_DIS, mask_sh),\ + TF_SF(CM0_CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, mask_sh),\ + TF_SF(CNVC_CFG0_PRE_DEGAM, PRE_DEGAM_MODE, mask_sh),\ + TF_SF(CNVC_CFG0_PRE_DEGAM, PRE_DEGAM_SELECT, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_MODE, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_PWL_DISABLE, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_LUT_INDEX, CM_GAMCOR_LUT_INDEX, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_LUT_DATA, CM_GAMCOR_LUT_DATA, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_WRITE_COLOR_MASK, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_READ_COLOR_SEL, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_HOST_SEL, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_CONFIG_MODE, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_START_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_START_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL1_B, CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL2_B, CM_GAMCOR_RAMA_EXP_REGION_END_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL2_B, CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_OFFSET_B, CM_GAMCOR_RAMA_OFFSET_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C13, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C14, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C21, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C22, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C23, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C24, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C31, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C32, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh),\ + TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_START, mask_sh),\ + TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_END, mask_sh),\ + TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_START, mask_sh),\ + TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_END, mask_sh),\ + TF_SF(DSCL0_LB_DATA_FORMAT, INTERLEAVE_EN, mask_sh),\ + TF2_SF(DSCL0, LB_DATA_FORMAT__ALPHA_EN, mask_sh),\ + TF_SF(DSCL0_LB_MEMORY_CTRL, MEMORY_CONFIG, mask_sh),\ + TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\ + TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\ + TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\ + TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\ + TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS_C, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS_C, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_PHASE, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_FILTER_TYPE, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_FACTOR, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_FACTOR, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, DSCL_MODE, mask_sh),\ + TF_SF(DSCL0_RECOUT_START, RECOUT_START_X, mask_sh),\ + TF_SF(DSCL0_RECOUT_START, RECOUT_START_Y, mask_sh),\ + TF_SF(DSCL0_RECOUT_SIZE, RECOUT_WIDTH, mask_sh),\ + TF_SF(DSCL0_RECOUT_SIZE, RECOUT_HEIGHT, mask_sh),\ + TF_SF(DSCL0_MPC_SIZE, MPC_WIDTH, mask_sh),\ + TF_SF(DSCL0_MPC_SIZE, MPC_HEIGHT, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C, SCL_H_SCALE_RATIO_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C, SCL_V_SCALE_RATIO_C, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_FRAC_C, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_INT_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_FRAC_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_INT_C, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, SCL_CHROMA_COEF_MODE, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT_CURRENT, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_DEALPHA, PRE_DEALPHA_EN, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_DEALPHA, PRE_DEALPHA_ABLND_EN, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_REALPHA, PRE_REALPHA_EN, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_REALPHA, PRE_REALPHA_ABLND_EN, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_CSC_MODE, PRE_CSC_MODE, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_CSC_MODE, PRE_CSC_MODE_CURRENT, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_CSC_C11_C12, PRE_CSC_C11, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_CSC_C11_C12, PRE_CSC_C12, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_CSC_C33_C34, PRE_CSC_C33, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_CSC_C33_C34, PRE_CSC_C34, mask_sh), \ + TF_SF(CM0_CM_POST_CSC_CONTROL, CM_POST_CSC_MODE, mask_sh), \ + TF_SF(CM0_CM_POST_CSC_CONTROL, CM_POST_CSC_MODE_CURRENT, mask_sh), \ + TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C11, mask_sh), \ + TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C12, mask_sh), \ + TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C33, mask_sh), \ + TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C34, mask_sh), \ + TF_SF(CM0_CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_INDEX, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \ + TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \ + TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \ + TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_ALPHA_PLANE_ENABLE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \ + TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \ + TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh), \ + TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CNV16, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE_C, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_R, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_G, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_B, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_EN, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIX_INV_MODE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIXEL_ALPHA_MOD_EN, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh),\ + TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\ + TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh),\ + TF_SF(DSCL0_DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, mask_sh) + +#define DPP_REG_LIST_SH_MASK_DCN30_UPDATED(mask_sh)\ + TF_SF(CM0_CM_MEM_PWR_STATUS, BLNDGAM_MEM_PWR_STATE, mask_sh), \ + TF_SF(CM0_CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, mask_sh),\ + TF_SF(CM0_CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, mask_sh),\ + TF_SF(CM0_CM_MEM_PWR_STATUS2, HDR3DLUT_MEM_PWR_STATE, mask_sh),\ + TF_SF(CM0_CM_MEM_PWR_STATUS2, SHAPER_MEM_PWR_STATE, mask_sh),\ + TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_HOST_SEL, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_CONFIG_MODE, mask_sh), \ + TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, mask_sh), \ + TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, mask_sh) + + +#define DPP_REG_LIST_SH_MASK_DCN30(mask_sh)\ + DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh), \ + TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \ + DPP_REG_LIST_SH_MASK_DCN30_UPDATED(mask_sh) + +#define DPP_REG_FIELD_LIST_DCN3(type) \ + TF_REG_FIELD_LIST_DCN2_0(type); \ + type FORMAT_CROSSBAR_R; \ + type FORMAT_CROSSBAR_G; \ + type FORMAT_CROSSBAR_B; \ + type CM_DEALPHA_EN;\ + type CM_DEALPHA_ABLND;\ + type CM_BIAS_Y_G;\ + type CM_BIAS_CB_B;\ + type CM_BIAS_CR_R;\ + type GAMCOR_MEM_PWR_DIS; \ + type GAMCOR_MEM_PWR_FORCE; \ + type HDR3DLUT_MEM_PWR_FORCE; \ + type SHAPER_MEM_PWR_FORCE; \ + type PRE_DEGAM_MODE;\ + type PRE_DEGAM_SELECT;\ + type CNVC_ALPHA_PLANE_ENABLE; \ + type PRE_DEALPHA_EN; \ + type PRE_DEALPHA_ABLND_EN; \ + type PRE_REALPHA_EN; \ + type PRE_REALPHA_ABLND_EN; \ + type PRE_CSC_MODE; \ + type PRE_CSC_MODE_CURRENT; \ + type PRE_CSC_C11; \ + type PRE_CSC_C12; \ + type PRE_CSC_C33; \ + type PRE_CSC_C34; \ + type CM_POST_CSC_MODE; \ + type CM_POST_CSC_MODE_CURRENT; \ + type CM_POST_CSC_C11; \ + type CM_POST_CSC_C12; \ + type CM_POST_CSC_C33; \ + type CM_POST_CSC_C34; \ + type CM_GAMCOR_MODE; \ + type CM_GAMCOR_SELECT; \ + type CM_GAMCOR_PWL_DISABLE; \ + type CM_GAMCOR_MODE_CURRENT; \ + type CM_GAMCOR_SELECT_CURRENT; \ + type CM_GAMCOR_LUT_INDEX; \ + type CM_GAMCOR_LUT_DATA; \ + type CM_GAMCOR_LUT_WRITE_COLOR_MASK; \ + type CM_GAMCOR_LUT_READ_COLOR_SEL; \ + type CM_GAMCOR_LUT_READ_DBG; \ + type CM_GAMCOR_LUT_HOST_SEL; \ + type CM_GAMCOR_LUT_CONFIG_MODE; \ + type CM_GAMCOR_LUT_STATUS; \ + type CM_GAMCOR_RAMA_EXP_REGION_START_B; \ + type CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B; \ + type CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B; \ + type CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B; \ + type CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B; \ + type CM_GAMCOR_RAMA_EXP_REGION_END_B; \ + type CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B; \ + type CM_GAMCOR_RAMA_OFFSET_B; \ + type CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET; \ + type CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS; \ + type CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET; \ + type CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;\ + type CM_GAMUT_REMAP_MODE_CURRENT;\ + type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_B; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_G; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_R; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_G; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_R; \ + type CM_BLNDGAM_LUT_WRITE_COLOR_MASK; \ + type CM_BLNDGAM_LUT_HOST_SEL; \ + type CM_BLNDGAM_LUT_CONFIG_MODE; \ + type CM_3DLUT_MODE_CURRENT; \ + type CM_SHAPER_MODE_CURRENT; \ + type CM_BLNDGAM_MODE; \ + type CM_BLNDGAM_MODE_CURRENT; \ + type CM_BLNDGAM_SELECT_CURRENT; \ + type CM_BLNDGAM_SELECT; \ + type GAMCOR_MEM_PWR_STATE; \ + type BLNDGAM_MEM_PWR_STATE; \ + type HDR3DLUT_MEM_PWR_STATE; \ + type SHAPER_MEM_PWR_STATE + +struct dcn3_dpp_shift { + DPP_REG_FIELD_LIST_DCN3(uint8_t); +}; + +struct dcn3_dpp_mask { + DPP_REG_FIELD_LIST_DCN3(uint32_t); +}; + +#define DPP_DCN3_REG_VARIABLE_LIST_COMMON \ + DPP_DCN2_REG_VARIABLE_LIST; \ + uint32_t CM_MEM_PWR_STATUS;\ + uint32_t CM_MEM_PWR_STATUS2;\ + uint32_t CM_MEM_PWR_CTRL2;\ + uint32_t CM_DEALPHA;\ + uint32_t CM_BIAS_CR_R;\ + uint32_t CM_BIAS_Y_G_CB_B;\ + uint32_t PRE_DEGAM;\ + uint32_t PRE_DEALPHA; \ + uint32_t PRE_REALPHA; \ + uint32_t PRE_CSC_MODE; \ + uint32_t PRE_CSC_C11_C12; \ + uint32_t PRE_CSC_C33_C34; \ + uint32_t PRE_CSC_B_C11_C12; \ + uint32_t PRE_CSC_B_C33_C34; \ + uint32_t CM_POST_CSC_CONTROL; \ + uint32_t CM_POST_CSC_C11_C12; \ + uint32_t CM_POST_CSC_C33_C34; \ + uint32_t CM_POST_CSC_B_C11_C12; \ + uint32_t CM_POST_CSC_B_C33_C34; \ + uint32_t CM_GAMUT_REMAP_B_C11_C12; \ + uint32_t CM_GAMUT_REMAP_B_C13_C14; \ + uint32_t CM_GAMUT_REMAP_B_C21_C22; \ + uint32_t CM_GAMUT_REMAP_B_C23_C24; \ + uint32_t CM_GAMUT_REMAP_B_C31_C32; \ + uint32_t CM_GAMUT_REMAP_B_C33_C34; \ + uint32_t CM_GAMCOR_CONTROL; \ + uint32_t CM_GAMCOR_LUT_CONTROL; \ + uint32_t CM_GAMCOR_LUT_INDEX; \ + uint32_t CM_GAMCOR_LUT_DATA; \ + uint32_t CM_GAMCOR_RAMB_START_CNTL_B; \ + uint32_t CM_GAMCOR_RAMB_START_CNTL_G; \ + uint32_t CM_GAMCOR_RAMB_START_CNTL_R; \ + uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_B; \ + uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_G; \ + uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_R; \ + uint32_t CM_GAMCOR_RAMB_END_CNTL1_B; \ + uint32_t CM_GAMCOR_RAMB_END_CNTL2_B; \ + uint32_t CM_GAMCOR_RAMB_END_CNTL1_G; \ + uint32_t CM_GAMCOR_RAMB_END_CNTL2_G; \ + uint32_t CM_GAMCOR_RAMB_END_CNTL1_R; \ + uint32_t CM_GAMCOR_RAMB_END_CNTL2_R; \ + uint32_t CM_GAMCOR_RAMB_REGION_0_1; \ + uint32_t CM_GAMCOR_RAMB_REGION_32_33; \ + uint32_t CM_GAMCOR_RAMB_OFFSET_B; \ + uint32_t CM_GAMCOR_RAMB_OFFSET_G; \ + uint32_t CM_GAMCOR_RAMB_OFFSET_R; \ + uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_B; \ + uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_G; \ + uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_R; \ + uint32_t CM_GAMCOR_RAMA_START_CNTL_B; \ + uint32_t CM_GAMCOR_RAMA_START_CNTL_G; \ + uint32_t CM_GAMCOR_RAMA_START_CNTL_R; \ + uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_B; \ + uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_G; \ + uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_R; \ + uint32_t CM_GAMCOR_RAMA_END_CNTL1_B; \ + uint32_t CM_GAMCOR_RAMA_END_CNTL2_B; \ + uint32_t CM_GAMCOR_RAMA_END_CNTL1_G; \ + uint32_t CM_GAMCOR_RAMA_END_CNTL2_G; \ + uint32_t CM_GAMCOR_RAMA_END_CNTL1_R; \ + uint32_t CM_GAMCOR_RAMA_END_CNTL2_R; \ + uint32_t CM_GAMCOR_RAMA_REGION_0_1; \ + uint32_t CM_GAMCOR_RAMA_REGION_32_33; \ + uint32_t CM_GAMCOR_RAMA_OFFSET_B; \ + uint32_t CM_GAMCOR_RAMA_OFFSET_G; \ + uint32_t CM_GAMCOR_RAMA_OFFSET_R; \ + uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_B; \ + uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_G; \ + uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_R; \ + uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B; \ + uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G; \ + uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R; \ + uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B; \ + uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G; \ + uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R; \ + uint32_t CM_BLNDGAM_LUT_CONTROL + + +struct dcn3_dpp_registers { + DPP_DCN3_REG_VARIABLE_LIST_COMMON; +}; + + +struct dcn3_dpp { + struct dpp base; + + const struct dcn3_dpp_registers *tf_regs; + const struct dcn3_dpp_shift *tf_shift; + const struct dcn3_dpp_mask *tf_mask; + + const uint16_t *filter_v; + const uint16_t *filter_h; + const uint16_t *filter_v_c; + const uint16_t *filter_h_c; + int lb_pixel_depth_supported; + int lb_memory_size; + int lb_bits_per_entry; + bool is_write_to_ram_a_safe; + struct scaler_data scl_data; + struct pwl_params pwl_data; +}; + +bool dpp3_construct(struct dcn3_dpp *dpp3, + struct dc_context *ctx, + uint32_t inst, + const struct dcn3_dpp_registers *tf_regs, + const struct dcn3_dpp_shift *tf_shift, + const struct dcn3_dpp_mask *tf_mask); + +bool dpp3_program_gamcor_lut( + struct dpp *dpp_base, const struct pwl_params *params); + +void dpp3_program_CM_dealpha( + struct dpp *dpp_base, + uint32_t enable, uint32_t additive_blending); + +void dpp30_read_state(struct dpp *dpp_base, + struct dcn_dpp_state *s); + +bool dpp3_get_optimal_number_of_taps( + struct dpp *dpp, + struct scaler_data *scl_data, + const struct scaling_taps *in_taps); + +void dpp3_cnv_setup ( + struct dpp *dpp_base, + enum surface_pixel_format format, + enum expansion_mode mode, + struct dc_csc_transform input_csc_color_matrix, + enum dc_color_space input_color_space, + struct cnv_alpha_2bit_lut *alpha_2bit_lut); + +void dpp3_program_CM_bias( + struct dpp *dpp_base, + struct CM_bias_params *bias_params); + +void dpp3_set_hdr_multiplier( + struct dpp *dpp_base, + uint32_t multiplier); + +void dpp3_cm_set_gamut_remap( + struct dpp *dpp_base, + const struct dpp_grph_csc_adjustment *adjust); + +void dpp3_set_pre_degam(struct dpp *dpp_base, + enum dc_transfer_func_predefined tr); + +void dpp3_set_cursor_attributes( + struct dpp *dpp_base, + struct dc_cursor_attributes *cursor_attributes); + +void dpp3_program_post_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn10_input_csc_select input_select, + const struct out_csc_color_matrix *tbl_entry); + +void dpp3_program_cm_bias( + struct dpp *dpp_base, + struct CM_bias_params *bias_params); + +void dpp3_program_cm_dealpha( + struct dpp *dpp_base, + uint32_t enable, uint32_t additive_blending); + +void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust); +#endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c new file mode 100644 index 0000000000..82eca0e7b7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c @@ -0,0 +1,461 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "core_types.h" +#include "reg_helper.h" +#include "dcn30/dcn30_dpp.h" +#include "basics/conversion.h" +#include "dcn30/dcn30_cm_common.h" + +#define REG(reg)\ + dpp->tf_regs->reg + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + +static void dpp3_enable_cm_block( + struct dpp *dpp_base) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + unsigned int cm_bypass_mode = 0; + + // debug option: put CM in bypass mode + if (dpp_base->ctx->dc->debug.cm_in_bypass) + cm_bypass_mode = 1; + + REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode); +} + +static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base) +{ + enum dc_lut_mode mode = LUT_BYPASS; + uint32_t state_mode; + uint32_t lut_mode; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode); + + if (state_mode == 2) {//Programmable RAM LUT + REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode); + if (lut_mode == 0) + mode = LUT_RAM_A; + else + mode = LUT_RAM_B; + } + + return mode; +} + +static void dpp3_program_gammcor_lut( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num, + bool is_ram_a) +{ + uint32_t i; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg; + uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg; + uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg; + + /*fill in the LUT with all base values to be used by pwl module + * HW auto increments the LUT index: back-to-back write + */ + if (is_rgb_equal(rgb, num)) { + for (i = 0 ; i < num; i++) + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg); + + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red); + + } else { + REG_UPDATE(CM_GAMCOR_LUT_CONTROL, + CM_GAMCOR_LUT_WRITE_COLOR_MASK, 4); + for (i = 0 ; i < num; i++) + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg); + + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red); + + REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0); + + REG_UPDATE(CM_GAMCOR_LUT_CONTROL, + CM_GAMCOR_LUT_WRITE_COLOR_MASK, 2); + for (i = 0 ; i < num; i++) + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].green_reg); + + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_green); + + REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0); + + REG_UPDATE(CM_GAMCOR_LUT_CONTROL, + CM_GAMCOR_LUT_WRITE_COLOR_MASK, 1); + for (i = 0 ; i < num; i++) + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].blue_reg); + + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_blue); + } +} + +static void dpp3_power_on_gamcor_lut( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { + if (power_on) { + REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 0); + REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5); + } else { + dpp_base->ctx->dc->optimized_required = true; + dpp_base->deferred_reg_writes.bits.disable_gamcor = true; + } + } else + REG_SET(CM_MEM_PWR_CTRL, 0, + GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1); +} + +void dpp3_program_cm_dealpha( + struct dpp *dpp_base, + uint32_t enable, uint32_t additive_blending) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_SET_2(CM_DEALPHA, 0, + CM_DEALPHA_EN, enable, + CM_DEALPHA_ABLND, additive_blending); +} + +void dpp3_program_cm_bias( + struct dpp *dpp_base, + struct CM_bias_params *bias_params) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_SET(CM_BIAS_CR_R, 0, CM_BIAS_CR_R, bias_params->cm_bias_cr_r); + REG_SET_2(CM_BIAS_Y_G_CB_B, 0, + CM_BIAS_Y_G, bias_params->cm_bias_y_g, + CM_BIAS_CB_B, bias_params->cm_bias_cb_b); +} + +static void dpp3_gamcor_reg_field( + struct dcn3_dpp *dpp, + struct dcn3_xfer_func_reg *reg) +{ + + reg->shifts.field_region_start_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B; + reg->masks.field_region_start_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B; + reg->shifts.field_offset = dpp->tf_shift->CM_GAMCOR_RAMA_OFFSET_B; + reg->masks.field_offset = dpp->tf_mask->CM_GAMCOR_RAMA_OFFSET_B; + + reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET; + reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET; + reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET; + reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET; + reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS; + reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS; + + reg->shifts.field_region_end = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_B; + reg->masks.field_region_end = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_B; + reg->shifts.field_region_end_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B; + reg->masks.field_region_end_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B; + reg->shifts.field_region_end_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B; + reg->masks.field_region_end_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B; + reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B; + reg->masks.field_region_linear_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B; + reg->shifts.exp_region_start = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_B; + reg->masks.exp_region_start = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_B; + reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B; + reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B; +} + +static void dpp3_configure_gamcor_lut( + struct dpp *dpp_base, + bool is_ram_a) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_UPDATE(CM_GAMCOR_LUT_CONTROL, + CM_GAMCOR_LUT_WRITE_COLOR_MASK, 7); + REG_UPDATE(CM_GAMCOR_LUT_CONTROL, + CM_GAMCOR_LUT_HOST_SEL, is_ram_a == true ? 0:1); + REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0); +} + + +bool dpp3_program_gamcor_lut( + struct dpp *dpp_base, const struct pwl_params *params) +{ + enum dc_lut_mode current_mode; + enum dc_lut_mode next_mode; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + struct dcn3_xfer_func_reg gam_regs; + + dpp3_enable_cm_block(dpp_base); + + if (params == NULL) { //bypass if we have no pwl data + REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 0); + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) + dpp3_power_on_gamcor_lut(dpp_base, false); + return false; + } + dpp3_power_on_gamcor_lut(dpp_base, true); + REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 2); + + current_mode = dpp30_get_gamcor_current(dpp_base); + if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) + next_mode = LUT_RAM_B; + else + next_mode = LUT_RAM_A; + + dpp3_power_on_gamcor_lut(dpp_base, true); + dpp3_configure_gamcor_lut(dpp_base, next_mode == LUT_RAM_A); + + if (next_mode == LUT_RAM_B) { + gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMB_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMB_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMB_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMB_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMB_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMB_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMB_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMB_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMB_END_CNTL2_R); + gam_regs.region_start = REG(CM_GAMCOR_RAMB_REGION_0_1); + gam_regs.region_end = REG(CM_GAMCOR_RAMB_REGION_32_33); + //New registers in DCN3AG/DCN GAMCOR block + gam_regs.offset_b = REG(CM_GAMCOR_RAMB_OFFSET_B); + gam_regs.offset_g = REG(CM_GAMCOR_RAMB_OFFSET_G); + gam_regs.offset_r = REG(CM_GAMCOR_RAMB_OFFSET_R); + gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_B); + gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_G); + gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_R); + } else { + gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMA_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMA_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMA_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMA_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMA_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMA_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMA_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMA_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMA_END_CNTL2_R); + gam_regs.region_start = REG(CM_GAMCOR_RAMA_REGION_0_1); + gam_regs.region_end = REG(CM_GAMCOR_RAMA_REGION_32_33); + //New registers in DCN3AG/DCN GAMCOR block + gam_regs.offset_b = REG(CM_GAMCOR_RAMA_OFFSET_B); + gam_regs.offset_g = REG(CM_GAMCOR_RAMA_OFFSET_G); + gam_regs.offset_r = REG(CM_GAMCOR_RAMA_OFFSET_R); + gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_B); + gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_G); + gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_R); + } + + //get register fields + dpp3_gamcor_reg_field(dpp, &gam_regs); + + //program register set for LUTA/LUTB + cm_helper_program_gamcor_xfer_func(dpp_base->ctx, params, &gam_regs); + + dpp3_program_gammcor_lut(dpp_base, params->rgb_resulted, params->hw_points_num, + next_mode == LUT_RAM_A); + + //select Gamma LUT to use for next frame + REG_UPDATE(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, next_mode == LUT_RAM_A ? 0:1); + + return true; +} + +void dpp3_set_hdr_multiplier( + struct dpp *dpp_base, + uint32_t multiplier) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier); +} + + +static void program_gamut_remap( + struct dcn3_dpp *dpp, + const uint16_t *regval, + int select) +{ + uint16_t selection = 0; + struct color_matrices_reg gam_regs; + + if (regval == NULL || select == GAMUT_REMAP_BYPASS) { + REG_SET(CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, 0); + return; + } + switch (select) { + case GAMUT_REMAP_COEFF: + selection = 1; + break; + /*this corresponds to GAMUT_REMAP coefficients set B + *we don't have common coefficient sets in dcn3ag/dcn3 + */ + case GAMUT_REMAP_COMA_COEFF: + selection = 2; + break; + default: + break; + } + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + + if (select == GAMUT_REMAP_COEFF) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } else if (select == GAMUT_REMAP_COMA_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } + //select coefficient set to use + REG_SET( + CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, selection); +} + +void dpp3_cm_set_gamut_remap( + struct dpp *dpp_base, + const struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + int i = 0; + int gamut_mode; + + if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) + /* Bypass if type is bypass or hw */ + program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS); + else { + struct fixed31_32 arr_matrix[12]; + uint16_t arr_reg_val[12]; + + for (i = 0; i < 12; i++) + arr_matrix[i] = adjust->temperature_matrix[i]; + + convert_float_matrix( + arr_reg_val, arr_matrix, 12); + + //current coefficient set in use + REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &gamut_mode); + + if (gamut_mode == 0) + gamut_mode = 1; //use coefficient set A + else if (gamut_mode == 1) + gamut_mode = 2; + else + gamut_mode = 1; + + //follow dcn2 approach for now - using only coefficient set A + program_gamut_remap(dpp, arr_reg_val, gamut_mode); + } +} + +static void read_gamut_remap(struct dcn3_dpp *dpp, + uint16_t *regval, + int *select) +{ + struct color_matrices_reg gam_regs; + uint32_t selection; + + //current coefficient set in use + REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &selection); + + *select = selection; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (*select == GAMUT_REMAP_COEFF) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == GAMUT_REMAP_COMA_COEFF) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + } +} + +void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + uint16_t arr_reg_val[12] = {0}; + int select; + + read_gamut_remap(dpp, arr_reg_val, &select); + + if (select == GAMUT_REMAP_BYPASS) { + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + return; + } + + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + convert_hw_matrix(adjust->temperature_matrix, + arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt new file mode 100644 index 0000000000..7743edc459 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt @@ -0,0 +1,4 @@ +dal3_subdirectory_sources( + dcn32_dpp.c + dcn32_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c new file mode 100644 index 0000000000..41679997b4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c @@ -0,0 +1,165 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "core_types.h" +#include "reg_helper.h" +#include "dcn32/dcn32_dpp.h" +#include "basics/conversion.h" +#include "dcn30/dcn30_cm_common.h" + +/* Compute the maximum number of lines that we can fit in the line buffer */ +static void dscl32_calc_lb_num_partitions( + const struct scaler_data *scl_data, + enum lb_memory_config lb_config, + int *num_part_y, + int *num_part_c) +{ + int memory_line_size_y, memory_line_size_c, memory_line_size_a, + lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a; + + int line_size = scl_data->viewport.width < scl_data->recout.width ? + scl_data->viewport.width : scl_data->recout.width; + int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? + scl_data->viewport_c.width : scl_data->recout.width; + + if (line_size == 0) + line_size = 1; + + if (line_size_c == 0) + line_size_c = 1; + + memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */ + memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */ + memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ + + if (lb_config == LB_MEMORY_CONFIG_1) { + lb_memory_size = 970; + lb_memory_size_c = 970; + lb_memory_size_a = 970; + } else if (lb_config == LB_MEMORY_CONFIG_2) { + lb_memory_size = 1290; + lb_memory_size_c = 1290; + lb_memory_size_a = 1290; + } else if (lb_config == LB_MEMORY_CONFIG_3) { + if (scl_data->viewport.width == scl_data->h_active && + scl_data->viewport.height == scl_data->v_active) { + /* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */ + /* use increased LB size for calculation only if Scaler not enabled */ + lb_memory_size = 970 + 1290 + 1170 + 1170 + 1170; + lb_memory_size_c = 970 + 1290; + lb_memory_size_a = 970 + 1290 + 1170; + } else { + /* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */ + lb_memory_size = 970 + 1290 + 484 + 484 + 484; + lb_memory_size_c = 970 + 1290; + lb_memory_size_a = 970 + 1290 + 484; + } + } else { + if (scl_data->viewport.width == scl_data->h_active && + scl_data->viewport.height == scl_data->v_active) { + /* use increased LB size for calculation only if Scaler not enabled */ + lb_memory_size = 970 + 1290 + 1170; + lb_memory_size_c = 970 + 1290 + 1170; + lb_memory_size_a = 970 + 1290 + 1170; + } else { + lb_memory_size = 970 + 1290 + 484; + lb_memory_size_c = 970 + 1290 + 484; + lb_memory_size_a = 970 + 1290 + 484; + } + } + *num_part_y = lb_memory_size / memory_line_size_y; + *num_part_c = lb_memory_size_c / memory_line_size_c; + num_partitions_a = lb_memory_size_a / memory_line_size_a; + + if (scl_data->lb_params.alpha_en + && (num_partitions_a < *num_part_y)) + *num_part_y = num_partitions_a; + + if (*num_part_y > 32) + *num_part_y = 32; + if (*num_part_c > 32) + *num_part_c = 32; +} + +static struct dpp_funcs dcn32_dpp_funcs = { + .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, + .dpp_read_state = dpp30_read_state, + .dpp_reset = dpp_reset, + .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, + .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, + .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap, + .dpp_set_csc_adjustment = NULL, + .dpp_set_csc_default = NULL, + .dpp_program_regamma_pwl = NULL, + .dpp_set_pre_degam = dpp3_set_pre_degam, + .dpp_program_input_lut = NULL, + .dpp_full_bypass = dpp1_full_bypass, + .dpp_setup = dpp3_cnv_setup, + .dpp_program_degamma_pwl = NULL, + .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, + .dpp_program_cm_bias = dpp3_program_cm_bias, + + .dpp_program_blnd_lut = NULL, // BLNDGAM is removed completely in DCN3.2 DPP + .dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) + .dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) + + .dpp_program_bias_and_scale = NULL, + .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, + .set_cursor_attributes = dpp3_set_cursor_attributes, + .set_cursor_position = dpp1_set_cursor_position, + .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, + .dpp_dppclk_control = dpp1_dppclk_control, + .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, +}; + + +static struct dpp_caps dcn32_dpp_cap = { + .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, + .max_lb_partitions = 31, + .dscl_calc_lb_num_partitions = dscl32_calc_lb_num_partitions, +}; + +bool dpp32_construct( + struct dcn3_dpp *dpp, + struct dc_context *ctx, + uint32_t inst, + const struct dcn3_dpp_registers *tf_regs, + const struct dcn3_dpp_shift *tf_shift, + const struct dcn3_dpp_mask *tf_mask) +{ + dpp->base.ctx = ctx; + + dpp->base.inst = inst; + dpp->base.funcs = &dcn32_dpp_funcs; + dpp->base.caps = &dcn32_dpp_cap; + + dpp->tf_regs = tf_regs; + dpp->tf_shift = tf_shift; + dpp->tf_mask = tf_mask; + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h new file mode 100644 index 0000000000..572958d287 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h @@ -0,0 +1,38 @@ +/* Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN32_DPP_H__ +#define __DCN32_DPP_H__ + +#include "dcn20/dcn20_dpp.h" +#include "dcn30/dcn30_dpp.h" + +bool dpp32_construct(struct dcn3_dpp *dpp3, + struct dc_context *ctx, + uint32_t inst, + const struct dcn3_dpp_registers *tf_regs, + const struct dcn3_dpp_shift *tf_shift, + const struct dcn3_dpp_mask *tf_mask); + +#endif /* __DCN32_DPP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt new file mode 100644 index 0000000000..91df5db264 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt @@ -0,0 +1,4 @@ +dal3_subdirectory_sources( + dcn35_dpp.c + dcn35_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c new file mode 100644 index 0000000000..e16274fee3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "dcn35/dcn35_dpp.h" +#include "reg_helper.h" + +#define REG(reg) dpp->tf_regs->reg + +#define CTX dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + ((const struct dcn35_dpp_shift *)(dpp->tf_shift))->field_name, \ + ((const struct dcn35_dpp_mask *)(dpp->tf_mask))->field_name + +void dpp35_dppclk_control( + struct dpp *dpp_base, + bool dppclk_div, + bool enable) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + if (enable) { + if (dpp->tf_mask->DPPCLK_RATE_CONTROL) + REG_UPDATE_2(DPP_CONTROL, + DPPCLK_RATE_CONTROL, dppclk_div, + DPP_CLOCK_ENABLE, 1); + else + REG_UPDATE_2(DPP_CONTROL, + DPP_CLOCK_ENABLE, 1, + DISPCLK_R_GATE_DISABLE, 1); + } else + REG_UPDATE_2(DPP_CONTROL, + DPP_CLOCK_ENABLE, 0, + DISPCLK_R_GATE_DISABLE, 0); +} + +static struct dpp_funcs dcn35_dpp_funcs = { + .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, + .dpp_read_state = dpp30_read_state, + .dpp_reset = dpp_reset, + .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, + .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, + .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap, + .dpp_set_csc_adjustment = NULL, + .dpp_set_csc_default = NULL, + .dpp_program_regamma_pwl = NULL, + .dpp_set_pre_degam = dpp3_set_pre_degam, + .dpp_program_input_lut = NULL, + .dpp_full_bypass = dpp1_full_bypass, + .dpp_setup = dpp3_cnv_setup, + .dpp_program_degamma_pwl = NULL, + .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, + .dpp_program_cm_bias = dpp3_program_cm_bias, + + .dpp_program_blnd_lut = NULL, // BLNDGAM is removed completely in DCN3.2 DPP + .dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) + .dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) + + .dpp_program_bias_and_scale = NULL, + .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, + .set_cursor_attributes = dpp3_set_cursor_attributes, + .set_cursor_position = dpp1_set_cursor_position, + .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, + .dpp_dppclk_control = dpp35_dppclk_control, + .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, +}; + + +bool dpp35_construct( + struct dcn3_dpp *dpp, struct dc_context *ctx, + uint32_t inst, const struct dcn3_dpp_registers *tf_regs, + const struct dcn35_dpp_shift *tf_shift, + const struct dcn35_dpp_mask *tf_mask) +{ + bool ret = dpp32_construct(dpp, ctx, inst, tf_regs, + (const struct dcn3_dpp_shift *)(tf_shift), + (const struct dcn3_dpp_mask *)(tf_mask)); + + dpp->base.funcs = &dcn35_dpp_funcs; + return ret; +} + +void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable) +{ + REG_UPDATE(DPP_CONTROL, DPP_FGCG_REP_DIS, !enable); +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h new file mode 100644 index 0000000000..135872d882 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN35_DPP_H__ +#define __DCN35_DPP_H__ + +#include "dcn32/dcn32_dpp.h" + +#define DPP_REG_LIST_SH_MASK_DCN35(mask_sh) \ + DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh), \ + TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh), \ + TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh), \ + TF_SF(DPP_TOP0_DPP_CONTROL, DISPCLK_R_GATE_DISABLE, mask_sh) + +#define DPP_REG_FIELD_LIST_DCN35(type) \ + struct { \ + DPP_REG_FIELD_LIST_DCN3(type); \ + type DPP_FGCG_REP_DIS; \ + } + +struct dcn35_dpp_shift { + DPP_REG_FIELD_LIST_DCN35(uint8_t); +}; + +struct dcn35_dpp_mask { + DPP_REG_FIELD_LIST_DCN35(uint32_t); +}; + +void dpp35_dppclk_control( + struct dpp *dpp_base, + bool dppclk_div, + bool enable); + +bool dpp35_construct(struct dcn3_dpp *dpp3, struct dc_context *ctx, + uint32_t inst, const struct dcn3_dpp_registers *tf_regs, + const struct dcn35_dpp_shift *tf_shift, + const struct dcn35_dpp_mask *tf_mask); + +void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable); + +#endif // __DCN35_DPP_H diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 8e7b35f764..150ef23440 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -458,7 +458,7 @@ bool dc_dsc_compute_bandwidth_range( bool is_dsc_possible = false; struct dsc_enc_caps dsc_enc_caps; struct dsc_enc_caps dsc_common_caps; - struct dc_dsc_config config; + struct dc_dsc_config config = {0}; struct dc_dsc_config_options options = {0}; options.dsc_min_slice_height_override = dsc_min_slice_height_override; @@ -868,9 +868,9 @@ static bool setup_dsc_config( struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; - int max_slices_h; - int min_slices_h; - int num_slices_h; + int max_slices_h = 0; + int min_slices_h = 0; + int num_slices_h = 0; int pic_width; int slice_width; int target_bpp; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index 36d6c1646a..59864130cf 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -101,7 +101,6 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, { int ret; struct drm_dsc_config dsc_cfg; - unsigned long long tmp; dsc_params->pps = *pps; dsc_params->pps.initial_scale_value = 8 * rc->rc_model_size / (rc->rc_model_size - rc->initial_fullness_offset); @@ -112,9 +111,9 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64; ret = drm_dsc_compute_rc_parameters(&dsc_cfg); - tmp = (unsigned long long)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1); - do_div(tmp, (uint32_t)dsc_cfg.slice_width); //ROUND-UP - dsc_params->bytes_per_pixel = (uint32_t)tmp; + dsc_params->bytes_per_pixel = + (uint32_t)(div_u64(((uint64_t)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1)), + (uint32_t)dsc_cfg.slice_width)); /* Round-up */ copy_pps_fields(&dsc_params->pps, &dsc_cfg); dsc_params->rc_buffer_model_size = dsc_cfg.rc_bits; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c index d734e3a134..2840ed5c57 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c @@ -95,10 +95,6 @@ static bool offset_to_id( return true; default: ASSERT_CRITICAL(false); -#ifdef PALLADIUM_SUPPORTED - *en = GPIO_DDC_LINE_DDC1; - return true; -#endif return false; } break; @@ -184,11 +180,6 @@ static bool offset_to_id( /* UNEXPECTED */ default: /* case REG(DC_GPIO_SYNCA_A): not exista */ -#ifdef PALLADIUM_SUPPORTED - *id = GPIO_ID_HPD; - *en = GPIO_DDC_LINE_DDC1; - return true; -#endif ASSERT_CRITICAL(false); return false; } @@ -308,10 +299,6 @@ static bool id_to_offset( break; default: ASSERT_CRITICAL(false); -#ifdef PALLADIUM_SUPPORTED - info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK; - result = true; -#endif result = false; } break; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index 3ede6e02c3..663c17f527 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -128,7 +128,7 @@ struct gpio *dal_gpio_service_create_irq( uint32_t offset, uint32_t mask) { - enum gpio_id id; + enum gpio_id id = 0; uint32_t en; if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en)) { @@ -144,7 +144,7 @@ struct gpio *dal_gpio_service_create_generic_mux( uint32_t offset, uint32_t mask) { - enum gpio_id id; + enum gpio_id id = 0; uint32_t en; struct gpio *generic; @@ -178,7 +178,7 @@ struct gpio_pin_info dal_gpio_get_generic_pin_info( enum gpio_id id, uint32_t en) { - struct gpio_pin_info pin; + struct gpio_pin_info pin = {0}; if (service->translate.funcs->id_to_offset) { service->translate.funcs->id_to_offset(id, en, &pin); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile index 9e8e9de51a..cf8aa23b44 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile +++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile @@ -180,7 +180,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN35) ############################################################################### -HWSS_DCN351 = dcn351_init.o +HWSS_DCN351 = dcn351_hwseq.o dcn351_init.o AMD_DAL_HWSS_DCN351 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn351/,$(HWSS_DCN351)) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 0ba1feaf96..0d3ea291ee 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -249,7 +249,7 @@ static bool dce110_enable_display_power_gating( return false; } -static void build_prescale_params(struct ipp_prescale_params *prescale_params, +static void dce110_prescale_params(struct ipp_prescale_params *prescale_params, const struct dc_plane_state *plane_state) { prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED; @@ -289,16 +289,14 @@ dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, if (ipp == NULL) return false; - if (plane_state->in_transfer_func) - tf = plane_state->in_transfer_func; + tf = &plane_state->in_transfer_func; - build_prescale_params(&prescale_params, plane_state); + dce110_prescale_params(&prescale_params, plane_state); ipp->funcs->ipp_program_prescale(ipp, &prescale_params); - if (plane_state->gamma_correction && - !plane_state->gamma_correction->is_identity && + if (!plane_state->gamma_correction.is_identity && dce_use_lut(plane_state->format)) - ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction); + ipp->funcs->ipp_program_input_lut(ipp, &plane_state->gamma_correction); if (tf == NULL) { /* Default case if no input transfer function specified */ @@ -614,11 +612,10 @@ dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, xfm->funcs->opp_power_on_regamma_lut(xfm, true); xfm->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM; - if (stream->out_transfer_func && - stream->out_transfer_func->type == TF_TYPE_PREDEFINED && - stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB) { + if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED && + stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB) { xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_SRGB); - } else if (dce110_translate_regamma_to_hw_format(stream->out_transfer_func, + } else if (dce110_translate_regamma_to_hw_format(&stream->out_transfer_func, &xfm->regamma_params)) { xfm->funcs->opp_program_regamma_pwl(xfm, &xfm->regamma_params); xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_USER); @@ -1192,16 +1189,6 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); } - - if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { - /* TODO: This looks like a bug to me as we are disabling HPO IO when - * we are just disabling a single HPO stream. Shouldn't we disable HPO - * HW control only when HPOs for all streams are disabled? - */ - if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control) - pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control( - pipe_ctx->stream->ctx->dc->hwseq, false); - } } void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, @@ -1550,7 +1537,7 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( } if (pipe_ctx->stream_res.audio != NULL) { - struct audio_output audio_output; + struct audio_output audio_output = {0}; build_audio_output(context, pipe_ctx, &audio_output); @@ -2201,7 +2188,7 @@ static void dce110_setup_audio_dto( struct dc *dc, struct dc_state *context) { - int i; + unsigned int i; /* program audio wall clock. use HDMI as clock source if HDMI * audio active. Otherwise, use DP as clock source @@ -2273,7 +2260,7 @@ static void dce110_setup_audio_dto( continue; if (pipe_ctx->stream_res.audio != NULL) { - struct audio_output audio_output; + struct audio_output audio_output = {0}; build_audio_output(context, pipe_ctx, &audio_output); @@ -2288,6 +2275,19 @@ static void dce110_setup_audio_dto( } } +static bool dce110_is_hpo_enabled(struct dc_state *context) +{ + int i; + + for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) { + if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i]) { + return true; + } + } + + return false; +} + enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context) @@ -2296,6 +2296,8 @@ enum dc_status dce110_apply_ctx_to_hw( struct dc_bios *dcb = dc->ctx->dc_bios; enum dc_status status; int i; + bool was_hpo_enabled = dce110_is_hpo_enabled(dc->current_state); + bool is_hpo_enabled = dce110_is_hpo_enabled(context); /* reset syncd pipes from disabled pipes */ if (dc->config.use_pipe_ctx_sync_logic) @@ -2338,6 +2340,10 @@ enum dc_status dce110_apply_ctx_to_hw( dce110_setup_audio_dto(dc, context); + if (dc->hwseq->funcs.setup_hpo_hw_control && was_hpo_enabled != is_hpo_enabled) { + dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, is_hpo_enabled); + } + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i]; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index 314798400b..0c4aef8ffe 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -367,7 +367,7 @@ static void dcn10_log_color_state(struct dc *dc, dc->caps.color.dpp.ocsc); DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n"); - for (i = 0; i < pool->pipe_count; i++) { + for (i = 0; i < pool->mpcc_count; i++) { struct mpcc_state s = {0}; pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); @@ -1366,6 +1366,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) struct dce_hwseq *hws = dc->hwseq; struct hubbub *hubbub = dc->res_pool->hubbub; bool can_apply_seamless_boot = false; + bool tg_enabled[MAX_PIPES] = {false}; for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->apply_seamless_boot_optimization) { @@ -1447,6 +1448,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) // requesting data while in PSR. tg->funcs->tg_init(tg); hubp->power_gated = true; + tg_enabled[i] = true; continue; } @@ -1488,6 +1490,20 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) tg->funcs->tg_init(tg); } + /* Clean up MPC tree */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (tg_enabled[i]) { + if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) { + if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) { + int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id; + + if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id])) + dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL; + } + } + } + } + /* Power gate DSCs */ if (hws->funcs.dsc_pg_control != NULL) { uint32_t num_opps = 0; @@ -1813,14 +1829,12 @@ bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, if (dpp_base == NULL) return false; - if (plane_state->in_transfer_func) - tf = plane_state->in_transfer_func; + tf = &plane_state->in_transfer_func; - if (plane_state->gamma_correction && - !dpp_base->ctx->dc->debug.always_use_regamma - && !plane_state->gamma_correction->is_identity + if (!dpp_base->ctx->dc->debug.always_use_regamma + && !plane_state->gamma_correction.is_identity && dce_use_lut(plane_state->format)) - dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction); + dpp_base->funcs->dpp_program_input_lut(dpp_base, &plane_state->gamma_correction); if (tf == NULL) dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS); @@ -1861,7 +1875,7 @@ bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, #define MAX_NUM_HW_POINTS 0x200 static void log_tf(struct dc_context *ctx, - struct dc_transfer_func *tf, uint32_t hw_points_num) + const struct dc_transfer_func *tf, uint32_t hw_points_num) { // DC_LOG_GAMMA is default logging of all hw points // DC_LOG_ALL_GAMMA logs all points, not only hw points @@ -1898,16 +1912,15 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM; - if (stream->out_transfer_func && - stream->out_transfer_func->type == TF_TYPE_PREDEFINED && - stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB) + if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED && + stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB) dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB); /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full * update. */ else if (cm_helper_translate_curve_to_hw_format(dc->ctx, - stream->out_transfer_func, + &stream->out_transfer_func, &dpp->regamma_params, false)) { dpp->funcs->dpp_program_regamma_pwl( dpp, @@ -1915,10 +1928,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, } else dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS); - if (stream->ctx && - stream->out_transfer_func) { + if (stream->ctx) { log_tf(stream->ctx, - stream->out_transfer_func, + &stream->out_transfer_func, dpp->regamma_params.hw_points_num); } @@ -2173,7 +2185,7 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size, struct dc_crtc_timing *hw_crtc_timing; uint64_t phase[MAX_PIPES]; uint64_t modulo[MAX_PIPES]; - unsigned int pclk; + unsigned int pclk = 0; uint32_t embedded_pix_clk_100hz; uint16_t embedded_h_total; @@ -2264,7 +2276,7 @@ void dcn10_enable_vblanks_synchronization( struct dc_context *dc_ctx = dc->ctx; struct output_pixel_processor *opp; struct timing_generator *tg; - int i, width, height, master; + int i, width = 0, height = 0, master; DC_LOGGER_INIT(dc_ctx->logger); @@ -2330,7 +2342,7 @@ void dcn10_enable_timing_synchronization( struct dc_context *dc_ctx = dc->ctx; struct output_pixel_processor *opp; struct timing_generator *tg; - int i, width, height; + int i, width = 0, height = 0; DC_LOGGER_INIT(dc_ctx->logger); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 8b3536c380..7d833fa6dd 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -155,7 +155,7 @@ void dcn20_log_color_state(struct dc *dc, DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE" " OGAM mode\n"); - for (i = 0; i < pool->pipe_count; i++) { + for (i = 0; i < pool->mpcc_count; i++) { struct mpcc_state s = {0}; pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); @@ -403,7 +403,7 @@ void dcn20_init_blank( struct output_pixel_processor *opp = NULL; struct output_pixel_processor *bottom_opp = NULL; uint32_t num_opps, opp_id_src0, opp_id_src1; - uint32_t otg_active_width, otg_active_height; + uint32_t otg_active_width = 0, otg_active_height = 0; /* program opp dpg blank color */ color_space = COLOR_SPACE_SRGB; @@ -873,6 +873,22 @@ enum dc_status dcn20_enable_stream_timing( return DC_ERROR_UNEXPECTED; } + if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { + struct dccg *dccg = dc->res_pool->dccg; + struct timing_generator *tg = pipe_ctx->stream_res.tg; + struct dtbclk_dto_params dto_params = {0}; + + if (dccg->funcs->set_dtbclk_p_src) + dccg->funcs->set_dtbclk_p_src(dccg, DTBCLK0, tg->inst); + + dto_params.otg_inst = tg->inst; + dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; + dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); + dto_params.timing = &pipe_ctx->stream->timing; + dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + } + if (dc_is_hdmi_tmds_signal(stream->signal)) { stream->link->phy_state.symclk_ref_cnts.otg = 1; if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF) @@ -959,22 +975,6 @@ enum dc_status dcn20_enable_stream_timing( pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg); } - if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { - struct dccg *dccg = dc->res_pool->dccg; - struct timing_generator *tg = pipe_ctx->stream_res.tg; - struct dtbclk_dto_params dto_params = {0}; - - if (dccg->funcs->set_dtbclk_p_src) - dccg->funcs->set_dtbclk_p_src(dccg, DTBCLK0, tg->inst); - - dto_params.otg_inst = tg->inst; - dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; - dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); - dto_params.timing = &pipe_ctx->stream->timing; - dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); - dccg->funcs->set_dtbclk_dto(dccg, &dto_params); - } - return DC_OK; } @@ -1011,7 +1011,7 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, { int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; - struct pwl_params *params = NULL; + const struct pwl_params *params = NULL; /* * program OGAM only for the top pipe * if there is a pipe split then fix diagnostic is required: @@ -1022,19 +1022,19 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, if (mpc->funcs->power_on_mpc_mem_pwr) mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true); if (pipe_ctx->top_pipe == NULL - && mpc->funcs->set_output_gamma && stream->out_transfer_func) { - if (stream->out_transfer_func->type == TF_TYPE_HWPWL) - params = &stream->out_transfer_func->pwl; - else if (pipe_ctx->stream->out_transfer_func->type == + && mpc->funcs->set_output_gamma) { + if (stream->out_transfer_func.type == TF_TYPE_HWPWL) + params = &stream->out_transfer_func.pwl; + else if (pipe_ctx->stream->out_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && cm_helper_translate_curve_to_hw_format(dc->ctx, - stream->out_transfer_func, + &stream->out_transfer_func, &mpc->blender_params, false)) params = &mpc->blender_params; /* * there is no ROM */ - if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) + if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED) BREAK_TO_DEBUGGER(); } /* @@ -1050,17 +1050,15 @@ bool dcn20_set_blend_lut( { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; bool result = true; - struct pwl_params *blend_lut = NULL; - - if (plane_state->blend_tf) { - if (plane_state->blend_tf->type == TF_TYPE_HWPWL) - blend_lut = &plane_state->blend_tf->pwl; - else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format(plane_state->ctx, - plane_state->blend_tf, - &dpp_base->regamma_params, false); - blend_lut = &dpp_base->regamma_params; - } + const struct pwl_params *blend_lut = NULL; + + if (plane_state->blend_tf.type == TF_TYPE_HWPWL) + blend_lut = &plane_state->blend_tf.pwl; + else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { + cm_helper_translate_curve_to_hw_format(plane_state->ctx, + &plane_state->blend_tf, + &dpp_base->regamma_params, false); + blend_lut = &dpp_base->regamma_params; } result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut); @@ -1072,24 +1070,21 @@ bool dcn20_set_shaper_3dlut( { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; bool result = true; - struct pwl_params *shaper_lut = NULL; - - if (plane_state->in_shaper_func) { - if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL) - shaper_lut = &plane_state->in_shaper_func->pwl; - else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format(plane_state->ctx, - plane_state->in_shaper_func, - &dpp_base->shaper_params, true); - shaper_lut = &dpp_base->shaper_params; - } + const struct pwl_params *shaper_lut = NULL; + + if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL) + shaper_lut = &plane_state->in_shaper_func.pwl; + else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { + cm_helper_translate_curve_to_hw_format(plane_state->ctx, + &plane_state->in_shaper_func, + &dpp_base->shaper_params, true); + shaper_lut = &dpp_base->shaper_params; } result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut); - if (plane_state->lut3d_func && - plane_state->lut3d_func->state.bits.initialized == 1) + if (plane_state->lut3d_func.state.bits.initialized == 1) result = dpp_base->funcs->dpp_program_3dlut(dpp_base, - &plane_state->lut3d_func->lut_3d); + &plane_state->lut3d_func.lut_3d); else result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL); @@ -1112,15 +1107,7 @@ bool dcn20_set_input_transfer_func(struct dc *dc, hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state); hws->funcs.set_blend_lut(pipe_ctx, plane_state); - if (plane_state->in_transfer_func) - tf = plane_state->in_transfer_func; - - - if (tf == NULL) { - dpp_base->funcs->dpp_set_degamma(dpp_base, - IPP_DEGAMMA_MODE_BYPASS); - return true; - } + tf = &plane_state->in_transfer_func; if (tf->type == TF_TYPE_HWPWL || tf->type == TF_TYPE_DISTRIBUTED_POINTS) use_degamma_ram = true; @@ -1917,9 +1904,11 @@ static void dcn20_program_pipe( dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub); } - if (dc->res_pool->hubbub->funcs->program_det_size && pipe_ctx->update_flags.bits.det_size) - dc->res_pool->hubbub->funcs->program_det_size( - dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb); + if (pipe_ctx->update_flags.bits.det_size) { + if (dc->res_pool->hubbub->funcs->program_det_size) + dc->res_pool->hubbub->funcs->program_det_size( + dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb); + } if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) dcn20_update_dchubp_dpp(dc, pipe_ctx, context); @@ -2080,9 +2069,11 @@ void dcn20_program_front_end_for_ctx( * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with * DET allocation. */ - if (hubbub->funcs->program_det_size && (context->res_ctx.pipe_ctx[i].update_flags.bits.disable || - (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) - hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); + if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable || + (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) { + if (hubbub->funcs->program_det_size) + hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); + } hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); } @@ -2892,11 +2883,6 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; - if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { - if (dc->hwseq->funcs.setup_hpo_hw_control) - dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true); - } - if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { dto_params.otg_inst = tg->inst; dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c index 884e3e3233..ef6488165b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c @@ -67,6 +67,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .setup_stereo = dcn10_setup_stereo, .set_avmute = dce110_set_avmute, .log_hw_state = dcn10_log_hw_state, + .log_color_state = dcn20_log_color_state, .get_hw_state = dcn10_get_hw_state, .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c index d5769f3887..6be846635a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c @@ -167,7 +167,7 @@ void dcn201_init_blank( struct tg_color black_color = {0}; struct output_pixel_processor *opp = NULL; uint32_t num_opps, opp_id_src0, opp_id_src1; - uint32_t otg_active_width, otg_active_height; + uint32_t otg_active_width = 0, otg_active_height = 0; /* program opp dpg blank color */ color_space = COLOR_SPACE_SRGB; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c index 7252f5f781..804be977ea 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c @@ -66,7 +66,7 @@ static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *c int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) { - struct dcn_hubbub_phys_addr_config config; + struct dcn_hubbub_phys_addr_config config = {0}; config.system_aperture.fb_top = pa_config->system_aperture.fb_top; config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index 8bc3d01537..ed9141a67d 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -166,7 +166,7 @@ void dcn30_log_color_state(struct dc *dc, "C21 C22 C23 C24 " "C31 C32 C33 C34 \n"); - for (i = 0; i < pool->pipe_count; i++) { + for (i = 0; i < pool->mpcc_count; i++) { struct mpcc_state s = {0}; pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); @@ -223,16 +223,14 @@ bool dcn30_set_blend_lut( { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; bool result = true; - struct pwl_params *blend_lut = NULL; - - if (plane_state->blend_tf) { - if (plane_state->blend_tf->type == TF_TYPE_HWPWL) - blend_lut = &plane_state->blend_tf->pwl; - else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm3_helper_translate_curve_to_hw_format( - plane_state->blend_tf, &dpp_base->regamma_params, false); - blend_lut = &dpp_base->regamma_params; - } + const struct pwl_params *blend_lut = NULL; + + if (plane_state->blend_tf.type == TF_TYPE_HWPWL) + blend_lut = &plane_state->blend_tf.pwl; + else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { + cm3_helper_translate_curve_to_hw_format( + &plane_state->blend_tf, &dpp_base->regamma_params, false); + blend_lut = &dpp_base->regamma_params; } result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut); @@ -300,27 +298,24 @@ bool dcn30_set_input_transfer_func(struct dc *dc, struct dpp *dpp_base = pipe_ctx->plane_res.dpp; enum dc_transfer_func_predefined tf; bool result = true; - struct pwl_params *params = NULL; + const struct pwl_params *params = NULL; if (dpp_base == NULL || plane_state == NULL) return false; tf = TRANSFER_FUNCTION_UNITY; - if (plane_state->in_transfer_func && - plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED) - tf = plane_state->in_transfer_func->tf; + if (plane_state->in_transfer_func.type == TF_TYPE_PREDEFINED) + tf = plane_state->in_transfer_func.tf; dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf); - if (plane_state->in_transfer_func) { - if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL) - params = &plane_state->in_transfer_func->pwl; - else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS && - cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func, - &dpp_base->degamma_params, false)) - params = &dpp_base->degamma_params; - } + if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL) + params = &plane_state->in_transfer_func.pwl; + else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && + cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func, + &dpp_base->degamma_params, false)) + params = &dpp_base->degamma_params; result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); @@ -378,24 +373,24 @@ bool dcn30_set_output_transfer_func(struct dc *dc, { int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; - struct pwl_params *params = NULL; + const struct pwl_params *params = NULL; bool ret = false; /* program OGAM or 3DLUT only for the top pipe*/ if (pipe_ctx->top_pipe == NULL) { /*program rmu shaper and 3dlut in MPC*/ ret = dcn30_set_mpc_shaper_3dlut(pipe_ctx, stream); - if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) { - if (stream->out_transfer_func->type == TF_TYPE_HWPWL) - params = &stream->out_transfer_func->pwl; - else if (pipe_ctx->stream->out_transfer_func->type == + if (ret == false && mpc->funcs->set_output_gamma) { + if (stream->out_transfer_func.type == TF_TYPE_HWPWL) + params = &stream->out_transfer_func.pwl; + else if (pipe_ctx->stream->out_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && cm3_helper_translate_curve_to_hw_format( - stream->out_transfer_func, + &stream->out_transfer_func, &mpc->blender_params, false)) params = &mpc->blender_params; /* there are no ROM LUTs in OUTGAM */ - if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) + if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED) BREAK_TO_DEBUGGER(); } } @@ -804,7 +799,7 @@ void dcn30_init_hw(struct dc *dc) // Get DMCUB capabilities dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; - dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; } void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) @@ -818,7 +813,7 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) enable); /* Wait for two frame to make sure AV mute is sent out */ - if (enable) { + if (enable && pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) { pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); @@ -890,7 +885,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) { union dmub_rb_cmd cmd; uint32_t tmr_delay = 0, tmr_scale = 0; - struct dc_cursor_attributes cursor_attr; + struct dc_cursor_attributes cursor_attr = {0}; bool cursor_cache_enable = false; struct dc_stream_state *stream = NULL; struct dc_plane_state *plane = NULL; @@ -946,7 +941,8 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888 && plane->address.page_table_base.quad_part == 0 && dc->hwss.does_plane_fit_in_mall && - dc->hwss.does_plane_fit_in_mall(dc, plane, + dc->hwss.does_plane_fit_in_mall(dc, plane->plane_size.surface_pitch, + plane->plane_size.surface_size.height, plane->format, cursor_cache_enable ? &cursor_attr : NULL)) { unsigned int v_total = stream->adjust.v_total_max ? stream->adjust.v_total_max : stream->timing.v_total; @@ -1076,11 +1072,15 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) return true; } -bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, struct dc_cursor_attributes *cursor_attr) +bool dcn30_does_plane_fit_in_mall(struct dc *dc, + unsigned int pitch, + unsigned int height, + enum surface_pixel_format format, + struct dc_cursor_attributes *cursor_attr) { // add meta size? - unsigned int surface_size = plane->plane_size.surface_pitch * plane->plane_size.surface_size.height * - (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4); + unsigned int surface_size = pitch * height * + (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4); unsigned int mall_size = dc->caps.mall_size_total; unsigned int cursor_size = 0; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h index 638f018a3c..76b1683948 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h @@ -71,7 +71,10 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx); void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx); -bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, +bool dcn30_does_plane_fit_in_mall(struct dc *dc, + unsigned int pitch, + unsigned int height, + enum surface_pixel_format format, struct dc_cursor_attributes *cursor_attr); bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index a760f0c6fe..1c8abb417b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -273,7 +273,7 @@ void dcn31_init_hw(struct dc *dc) // Get DMCUB capabilities dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; - dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; } void dcn31_dsc_pg_control( @@ -479,7 +479,7 @@ void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool p int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) { - struct dcn_hubbub_phys_addr_config config; + struct dcn_hubbub_phys_addr_config config = {0}; config.system_aperture.fb_top = pa_config->system_aperture.fb_top; config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c index 093f438755..0d8a05cf8b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c @@ -82,7 +82,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) if (enable) { struct dsc_config dsc_cfg; - struct dsc_optc_config dsc_optc_cfg; + struct dsc_optc_config dsc_optc_cfg = {0}; enum optc_dsc_mode optc_dsc_mode; /* Enable DSC hw block */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 7668229438..b8e884368d 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -239,8 +239,10 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c // Convert number of cache lines required to number of ways if (dc->debug.force_mall_ss_num_ways > 0) { num_ways = dc->debug.force_mall_ss_num_ways; + } else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) { + num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes); } else { - num_ways = dcn32_helper_mall_bytes_to_ways(dc, mall_ss_size_bytes); + num_ways = 0; } return num_ways; @@ -261,7 +263,9 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) for (i = 0; i < dc->current_state->stream_count; i++) { /* MALL SS messaging is not supported with PSR at this time */ if (dc->current_state->streams[i] != NULL && - dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) + dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && + (dc->current_state->stream_count > 1 || (!dc->current_state->streams[i]->dpms_off && + dc->current_state->stream_status[i].plane_count > 0))) return false; } @@ -475,39 +479,35 @@ bool dcn32_set_mcm_luts( int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; bool result = true; - struct pwl_params *lut_params = NULL; + const struct pwl_params *lut_params = NULL; // 1D LUT - if (plane_state->blend_tf) { - if (plane_state->blend_tf->type == TF_TYPE_HWPWL) - lut_params = &plane_state->blend_tf->pwl; - else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm3_helper_translate_curve_to_hw_format(plane_state->blend_tf, - &dpp_base->regamma_params, false); - lut_params = &dpp_base->regamma_params; - } + if (plane_state->blend_tf.type == TF_TYPE_HWPWL) + lut_params = &plane_state->blend_tf.pwl; + else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { + cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf, + &dpp_base->regamma_params, false); + lut_params = &dpp_base->regamma_params; } result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); lut_params = NULL; // Shaper - if (plane_state->in_shaper_func) { - if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL) - lut_params = &plane_state->in_shaper_func->pwl; - else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { - // TODO: dpp_base replace - ASSERT(false); - cm3_helper_translate_curve_to_hw_format(plane_state->in_shaper_func, - &dpp_base->shaper_params, true); - lut_params = &dpp_base->shaper_params; - } + if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL) + lut_params = &plane_state->in_shaper_func.pwl; + else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { + // TODO: dpp_base replace + ASSERT(false); + cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func, + &dpp_base->shaper_params, true); + lut_params = &dpp_base->shaper_params; } result = mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); // 3D - if (plane_state->lut3d_func && plane_state->lut3d_func->state.bits.initialized == 1) - result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func->lut_3d, mpcc_id); + if (plane_state->lut3d_func.state.bits.initialized == 1) + result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id); else result = mpc->funcs->program_3dlut(mpc, NULL, mpcc_id); @@ -524,27 +524,24 @@ bool dcn32_set_input_transfer_func(struct dc *dc, enum dc_transfer_func_predefined tf; bool result = true; - struct pwl_params *params = NULL; + const struct pwl_params *params = NULL; if (mpc == NULL || plane_state == NULL) return false; tf = TRANSFER_FUNCTION_UNITY; - if (plane_state->in_transfer_func && - plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED) - tf = plane_state->in_transfer_func->tf; + if (plane_state->in_transfer_func.type == TF_TYPE_PREDEFINED) + tf = plane_state->in_transfer_func.tf; dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf); - if (plane_state->in_transfer_func) { - if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL) - params = &plane_state->in_transfer_func->pwl; - else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS && - cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func, - &dpp_base->degamma_params, false)) - params = &dpp_base->degamma_params; - } + if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL) + params = &plane_state->in_transfer_func.pwl; + else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && + cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func, + &dpp_base->degamma_params, false)) + params = &dpp_base->degamma_params; dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); @@ -562,24 +559,24 @@ bool dcn32_set_output_transfer_func(struct dc *dc, { int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; - struct pwl_params *params = NULL; + const struct pwl_params *params = NULL; bool ret = false; /* program OGAM or 3DLUT only for the top pipe*/ if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) { /*program shaper and 3dlut in MPC*/ ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream); - if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) { - if (stream->out_transfer_func->type == TF_TYPE_HWPWL) - params = &stream->out_transfer_func->pwl; - else if (pipe_ctx->stream->out_transfer_func->type == + if (ret == false && mpc->funcs->set_output_gamma) { + if (stream->out_transfer_func.type == TF_TYPE_HWPWL) + params = &stream->out_transfer_func.pwl; + else if (pipe_ctx->stream->out_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && cm3_helper_translate_curve_to_hw_format( - stream->out_transfer_func, + &stream->out_transfer_func, &mpc->blender_params, false)) params = &mpc->blender_params; /* there are no ROM LUTs in OUTGAM */ - if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) + if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED) BREAK_TO_DEBUGGER(); } } @@ -956,10 +953,10 @@ void dcn32_init_hw(struct dc *dc) dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support; dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable; - dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; if (dc->ctx->dmub_srv->dmub->fw_version < - DMUB_FW_VERSION(7, 0, 35)) { + DMUB_FW_VERSION(7, 0, 35)) { dc->debug.force_disable_subvp = true; dc->debug.disable_fpo_optimizations = true; } @@ -992,7 +989,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) if (enable) { struct dsc_config dsc_cfg; - struct dsc_optc_config dsc_optc_cfg; + struct dsc_optc_config dsc_optc_cfg = {0}; enum optc_dsc_mode optc_dsc_mode; /* Enable DSC hw block */ @@ -1545,7 +1542,7 @@ void dcn32_init_blank( struct output_pixel_processor *opp = NULL; struct output_pixel_processor *bottom_opp = NULL; uint32_t num_opps, opp_id_src0, opp_id_src1; - uint32_t otg_active_width, otg_active_height; + uint32_t otg_active_width = 0, otg_active_height = 0; uint32_t i; /* program opp dpg blank color */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index eb6c6ba64c..dcced89c07 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -349,7 +349,7 @@ void dcn35_init_hw(struct dc *dc) if (dc->ctx->dmub_srv) { dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; - dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; } if (dc->res_pool->pg_cntl) { @@ -373,7 +373,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) if (enable) { struct dsc_config dsc_cfg; - struct dsc_optc_config dsc_optc_cfg; + struct dsc_optc_config dsc_optc_cfg = {0}; enum optc_dsc_mode optc_dsc_mode; /* Enable DSC hw block */ @@ -495,6 +495,17 @@ void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, } } +void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on) +{ + if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpstream) + return; + + if (hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating) { + hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating( + hws->ctx->dc->res_pool->dccg, dp_hpo_inst, clock_on); + } +} + void dcn35_dsc_pg_control( struct dce_hwseq *hws, unsigned int dsc_inst, @@ -700,6 +711,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) struct hubbub *hubbub = dc->res_pool->hubbub; struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; bool can_apply_seamless_boot = false; + bool tg_enabled[MAX_PIPES] = {false}; for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->apply_seamless_boot_optimization) { @@ -781,6 +793,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) // requesting data while in PSR. tg->funcs->tg_init(tg); hubp->power_gated = true; + tg_enabled[i] = true; continue; } @@ -822,6 +835,20 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) tg->funcs->tg_init(tg); } + /* Clean up MPC tree */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (tg_enabled[i]) { + if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) { + if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) { + int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id; + + if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id])) + dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL; + } + } + } + } + if (pg_cntl != NULL) { if (pg_cntl->funcs->dsc_pg_control != NULL) { uint32_t num_opps = 0; @@ -982,6 +1009,9 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired) update_state->pg_res_update[PG_HPO] = true; + if (hpo_frl_stream_enc_acquired) + update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true; + update_state->pg_res_update[PG_DWB] = true; for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1007,6 +1037,9 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (pipe_ctx->stream_res.opp) update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false; + + if (pipe_ctx->stream_res.hpo_dp_stream_enc) + update_state->pg_pipe_res_update[PG_DPSTREAM][pipe_ctx->stream_res.hpo_dp_stream_enc->inst] = false; } /*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/ for (i = 0; i < dc->res_pool->timing_generator_count; i++) { @@ -1064,6 +1097,9 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, if (j == PG_OPTC && new_pipe->stream_res.tg) update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true; + + if (j == PG_DPSTREAM && new_pipe->stream_res.hpo_dp_stream_enc) + update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true; } } else if (cur_pipe->plane_state == new_pipe->plane_state || cur_pipe == new_pipe) { @@ -1093,6 +1129,11 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, cur_pipe->stream_res.tg != new_pipe->stream_res.tg && new_pipe->stream_res.tg) update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true; + + if (j == PG_DPSTREAM && + cur_pipe->stream_res.hpo_dp_stream_enc != new_pipe->stream_res.hpo_dp_stream_enc && + new_pipe->stream_res.hpo_dp_stream_enc) + update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true; } } } @@ -1108,6 +1149,9 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, if (hpo_frl_stream_enc_acquired || hpo_dp_stream_enc_acquired) update_state->pg_res_update[PG_HPO] = true; + if (hpo_frl_stream_enc_acquired) + update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true; + } /** @@ -1232,14 +1276,19 @@ void dcn35_root_clock_control(struct dc *dc, if (!pg_cntl) return; /*enable root clock first when power up*/ - if (power_on) + if (power_on) { for (i = 0; i < dc->res_pool->pipe_count; i++) { if (update_state->pg_pipe_res_update[PG_HUBP][i] && update_state->pg_pipe_res_update[PG_DPP][i]) { if (dc->hwseq->funcs.dpp_root_clock_control) dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on); } + if (update_state->pg_pipe_res_update[PG_DPSTREAM][i]) + if (dc->hwseq->funcs.dpstream_root_clock_control) + dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on); } + + } for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { if (update_state->pg_pipe_res_update[PG_DSC][i]) { if (power_on) { @@ -1252,14 +1301,19 @@ void dcn35_root_clock_control(struct dc *dc, } } /*disable root clock first when power down*/ - if (!power_on) + if (!power_on) { for (i = 0; i < dc->res_pool->pipe_count; i++) { if (update_state->pg_pipe_res_update[PG_HUBP][i] && update_state->pg_pipe_res_update[PG_DPP][i]) { if (dc->hwseq->funcs.dpp_root_clock_control) dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on); } + if (update_state->pg_pipe_res_update[PG_DPSTREAM][i]) + if (dc->hwseq->funcs.dpstream_root_clock_control) + dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on); } + + } } void dcn35_prepare_bandwidth( @@ -1300,22 +1354,6 @@ void dcn35_optimize_bandwidth( } } -void dcn35_set_idle_state(const struct dc *dc, bool allow_idle) -{ - // TODO: Find a more suitable communcation - if (dc->clk_mgr->funcs->set_idle_state) - dc->clk_mgr->funcs->set_idle_state(dc->clk_mgr, allow_idle); -} - -uint32_t dcn35_get_idle_state(const struct dc *dc) -{ - // TODO: Find a more suitable communcation - if (dc->clk_mgr->funcs->get_idle_state) - return dc->clk_mgr->funcs->get_idle_state(dc->clk_mgr); - - return 0; -} - void dcn35_set_drr(struct pipe_ctx **pipe_ctx, int num_pipes, struct dc_crtc_timing_adjust adjust) { @@ -1374,6 +1412,34 @@ void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx, triggers, params->num_frames); } +void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx, + int num_pipes, uint32_t v_total_min, uint32_t v_total_max) +{ + int i = 0; + struct long_vtotal_params params = {0}; + + params.vertical_total_max = v_total_max; + params.vertical_total_min = v_total_min; + + for (i = 0; i < num_pipes; i++) { + if (!pipe_ctx[i]) + continue; + + if (pipe_ctx[i]->stream) { + struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing; + + if (timing) + params.vertical_blank_start = timing->v_total - timing->v_front_porch; + else + params.vertical_blank_start = 0; + + if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs && + pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal) + pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal(pipe_ctx[i]->stream_res.tg, ¶ms); + } + } +} + static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx) { /* Calculate average pixel count per TU, return false if under ~2.00 to diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index 91f5d1136a..f0ea7d1511 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -37,6 +37,8 @@ void dcn35_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool pow void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on); +void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on); + void dcn35_enable_power_gating_plane(struct dce_hwseq *hws, bool enable); void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable); @@ -84,15 +86,15 @@ void dcn35_dsc_pg_control( unsigned int dsc_inst, bool power_on); -void dcn35_set_idle_state(const struct dc *dc, bool allow_idle); -uint32_t dcn35_get_idle_state(const struct dc *dc); - void dcn35_set_drr(struct pipe_ctx **pipe_ctx, int num_pipes, struct dc_crtc_timing_adjust adjust); void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx, int num_pipes, const struct dc_static_screen_params *params); +void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx, + int num_pipes, uint32_t v_total_min, uint32_t v_total_max); + bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx); #endif /* __DC_HWSS_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index 6c8da59b79..199781233f 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -121,8 +121,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .hw_block_power_up = dcn35_hw_block_power_up, .hw_block_power_down = dcn35_hw_block_power_down, .root_clock_control = dcn35_root_clock_control, - .set_idle_state = dcn35_set_idle_state, - .get_idle_state = dcn35_get_idle_state + .set_long_vtotal = dcn35_set_long_vblank, }; static const struct hwseq_private_funcs dcn35_private_funcs = { @@ -148,6 +147,7 @@ static const struct hwseq_private_funcs dcn35_private_funcs = { //.hubp_pg_control = dcn35_hubp_pg_control, .enable_power_gating_plane = dcn35_enable_power_gating_plane, .dpp_root_clock_control = dcn35_dpp_root_clock_control, + .dpstream_root_clock_control = dcn35_dpstream_root_clock_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, .update_odm = dcn35_update_odm, .set_hdr_multiplier = dcn10_set_hdr_multiplier, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile index b24ad27fe6..a4b3c1e99e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile @@ -1,16 +1,27 @@ # -# (c) Copyright 2022 Advanced Micro Devices, Inc. All the rights reserved +# Copyright (c) 2022-2024 Advanced Micro Devices, Inc. # -# All rights reserved. This notice is intended as a precaution against -# inadvertent publication and does not imply publication or any waiver -# of confidentiality. The year included in the foregoing notice is the -# year of creation of the work. +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: # -# Authors: AMD +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. # # Makefile for DCN351. -DCN351 = dcn351_init.o +DCN351 = dcn351_hwseq.o dcn351_init.o AMD_DAL_DCN351 = $(addprefix $(AMDDALPATH)/dc/dcn351/,$(DCN351)) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c new file mode 100644 index 0000000000..93fe5b262a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "resource.h" +#include "dcn351_hwseq.h" +#include "dcn35/dcn35_hwseq.h" + +#define DC_LOGGER_INIT(logger) \ + struct dal_logger *dc_logger = logger + +#define DC_LOGGER \ + dc_logger + +void dcn351_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, + struct pg_block_update *update_state) +{ + int i, j; + + dcn35_calc_blocks_to_gate(dc, context, update_state); + + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (!update_state->pg_pipe_res_update[PG_HUBP][i] && + !update_state->pg_pipe_res_update[PG_DPP][i]) { + for (j = i - 1; j >= 0; j--) { + update_state->pg_pipe_res_update[PG_HUBP][j] = false; + update_state->pg_pipe_res_update[PG_DPP][j] = false; + } + + break; + } + } +} + +void dcn351_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, + struct pg_block_update *update_state) +{ + int i, j; + + dcn35_calc_blocks_to_ungate(dc, context, update_state); + + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (update_state->pg_pipe_res_update[PG_HUBP][i] && + update_state->pg_pipe_res_update[PG_DPP][i]) { + for (j = i - 1; j >= 0; j--) { + update_state->pg_pipe_res_update[PG_HUBP][j] = true; + update_state->pg_pipe_res_update[PG_DPP][j] = true; + } + + break; + } + } +} + +/** + * dcn351_hw_block_power_down() - power down sequence + * + * The following sequence describes the ON-OFF (ONO) for power down: + * + * ONO Region 11, DCPG 19: dsc3 + * ONO Region 10, DCPG 3: dchubp3, dpp3 + * ONO Region 9, DCPG 18: dsc2 + * ONO Region 8, DCPG 2: dchubp2, dpp2 + * ONO Region 7, DCPG 17: dsc1 + * ONO Region 6, DCPG 1: dchubp1, dpp1 + * ONO Region 5, DCPG 16: dsc0 + * ONO Region 4, DCPG 0: dchubp0, dpp0 + * ONO Region 3, DCPG 25: hpo - SKIPPED. Should be kept on + * ONO Region 2, DCPG 24: mpc opp optc dwb + * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry + * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed + * + * @dc: Current DC state + * @update_state: update PG sequence states for HW block + */ +void dcn351_hw_block_power_down(struct dc *dc, + struct pg_block_update *update_state) +{ + int i = 0; + struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; + + if (!pg_cntl || dc->debug.ignore_pg) + return; + + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (update_state->pg_pipe_res_update[PG_DSC][i]) { + if (pg_cntl->funcs->dsc_pg_control) + pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false); + } + + if (update_state->pg_pipe_res_update[PG_HUBP][i] && + update_state->pg_pipe_res_update[PG_DPP][i]) { + if (pg_cntl->funcs->hubp_dpp_pg_control) + pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false); + } + } + + // domain25 currently always on. + + /* this will need all the clients to unregister optc interrupts, let dmubfw handle this */ + if (pg_cntl->funcs->plane_otg_pg_control) + pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false); + + // domain23 currently always on. + // domain22 currently always on. +} + +/** + * dcn351_hw_block_power_up() - power up sequence + * + * The following sequence describes the ON-OFF (ONO) for power up: + * + * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED + * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit + * ONO Region 2, DCPG 24: mpc opp optc dwb + * ONO Region 3, DCPG 25: hpo - SKIPPED + * ONO Region 4, DCPG 0: dchubp0, dpp0 + * ONO Region 5, DCPG 16: dsc0 + * ONO Region 6, DCPG 1: dchubp1, dpp1 + * ONO Region 7, DCPG 17: dsc1 + * ONO Region 8, DCPG 2: dchubp2, dpp2 + * ONO Region 9, DCPG 18: dsc2 + * ONO Region 10, DCPG 3: dchubp3, dpp3 + * ONO Region 11, DCPG 19: dsc3 + * + * @dc: Current DC state + * @update_state: update PG sequence states for HW block + */ +void dcn351_hw_block_power_up(struct dc *dc, + struct pg_block_update *update_state) +{ + int i = 0; + struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; + + if (!pg_cntl || dc->debug.ignore_pg) + return; + + // domain22 currently always on. + // domain23 currently always on. + + /* this will need all the clients to unregister optc interrupts, let dmubfw handle this */ + if (pg_cntl->funcs->plane_otg_pg_control) + pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true); + + // domain25 currently always on. + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (update_state->pg_pipe_res_update[PG_HUBP][i] && + update_state->pg_pipe_res_update[PG_DPP][i]) { + if (pg_cntl->funcs->hubp_dpp_pg_control) + pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true); + } + + if (update_state->pg_pipe_res_update[PG_DSC][i]) { + if (pg_cntl->funcs->dsc_pg_control) + pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true); + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h new file mode 100644 index 0000000000..6d8f3bfb66 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HWSS_DCN351_H__ +#define __DC_HWSS_DCN351_H__ + +#include "hw_sequencer_private.h" + +void dcn351_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, + struct pg_block_update *update_state); +void dcn351_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, + struct pg_block_update *update_state); +void dcn351_hw_block_power_up(struct dc *dc, + struct pg_block_update *update_state); +void dcn351_hw_block_power_down(struct dc *dc, + struct pg_block_update *update_state); + +#endif /* __DC_HWSS_DCN351_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index 670255c9bc..a53092cd61 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -32,6 +32,7 @@ #include "dcn31/dcn31_hwseq.h" #include "dcn32/dcn32_hwseq.h" #include "dcn35/dcn35_hwseq.h" +#include "dcn351/dcn351_hwseq.h" #include "dcn351_init.h" @@ -120,8 +121,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .hw_block_power_up = dcn35_hw_block_power_up, .hw_block_power_down = dcn35_hw_block_power_down, .root_clock_control = dcn35_root_clock_control, - .set_idle_state = dcn35_set_idle_state, - .get_idle_state = dcn35_get_idle_state }; static const struct hwseq_private_funcs dcn351_private_funcs = { @@ -147,6 +146,7 @@ static const struct hwseq_private_funcs dcn351_private_funcs = { //.hubp_pg_control = dcn35_hubp_pg_control, .enable_power_gating_plane = dcn35_enable_power_gating_plane, .dpp_root_clock_control = dcn35_dpp_root_clock_control, + .dpstream_root_clock_control = dcn35_dpstream_root_clock_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, .update_odm = dcn35_update_odm, .set_hdr_multiplier = dcn10_set_hdr_multiplier, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index f89f205e42..7c339e7e71 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -377,7 +377,10 @@ struct hw_sequencer_funcs { /* Idle Optimization Related */ bool (*apply_idle_power_optimizations)(struct dc *dc, bool enable); - bool (*does_plane_fit_in_mall)(struct dc *dc, struct dc_plane_state *plane, + bool (*does_plane_fit_in_mall)(struct dc *dc, + unsigned int pitch, + unsigned int height, + enum surface_pixel_format format, struct dc_cursor_attributes *cursor_attr); void (*commit_subvp_config)(struct dc *dc, struct dc_state *context); void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context); @@ -424,11 +427,10 @@ struct hw_sequencer_funcs { struct pg_block_update *update_state); void (*root_clock_control)(struct dc *dc, struct pg_block_update *update_state, bool power_on); - void (*set_idle_state)(const struct dc *dc, bool allow_idle); - uint32_t (*get_idle_state)(const struct dc *dc); bool (*is_pipe_topology_transition_seamless)(struct dc *dc, const struct dc_state *cur_ctx, const struct dc_state *new_ctx); + void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max); }; void color_space_to_black_color( @@ -478,9 +480,10 @@ void hwss_build_fast_sequence(struct dc *dc, struct dc_dmub_cmd *dc_dmub_cmd, unsigned int dmub_cmd_count, struct block_sequence block_sequence[], - int *num_steps, + unsigned int *num_steps, struct pipe_ctx *pipe_ctx, - struct dc_stream_status *stream_status); + struct dc_stream_status *stream_status, + struct dc_state *context); void hwss_send_dmcub_cmd(union block_sequence_params *params); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 554cfab5ab..341219cf41 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -120,6 +120,10 @@ struct hwseq_private_funcs { struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on); + void (*dpstream_root_clock_control)( + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool clock_on); void (*dpp_pg_control)(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on); diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index b1b72e688f..028b2f971e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -90,6 +90,9 @@ struct resource_funcs { void (*update_soc_for_wm_a)( struct dc *dc, struct dc_state *context); + unsigned int (*calculate_mall_ways_from_bytes)( + const struct dc *dc, + unsigned int total_size_in_mall_bytes); /** * @populate_dml_pipes - Populate pipe data struct * @@ -336,7 +339,9 @@ struct stream_resource { }; struct plane_resource { + /* scl_data is scratch space required to program a plane */ struct scaler_data scl_data; + /* Below pointers to hw objects are required to enable the plane */ struct hubp *hubp; struct mem_input *mi; struct input_pixel_processor *ipp; @@ -496,7 +501,7 @@ struct dcn_bw_writeback { struct dcn_bw_output { struct dc_clocks clk; - struct dcn_watermark_set watermarks; + union dcn_watermark_set watermarks; struct dcn_bw_writeback bw_writeback; int compbuf_size_kb; unsigned int mall_ss_size_bytes; @@ -515,6 +520,7 @@ struct bw_context { union bw_output bw; struct display_mode_lib dml; struct dml2_context *dml2; + struct dml2_context *dml2_dc_power_source; }; struct dc_dmub_cmd { @@ -522,25 +528,6 @@ struct dc_dmub_cmd { enum dm_dmub_wait_type wait_type; }; -struct dc_scratch_space { - /* used to temporarily backup plane states of a stream during - * dc update. The reason is that plane states are overwritten - * with surface updates in dc update. Once they are overwritten - * current state is no longer valid. We want to temporarily - * store current value in plane states so we can still recover - * a valid current state during dc update. - */ - struct dc_plane_state plane_states[MAX_SURFACE_NUM]; - struct dc_gamma gamma_correction[MAX_SURFACE_NUM]; - struct dc_transfer_func in_transfer_func[MAX_SURFACE_NUM]; - struct dc_3dlut lut3d_func[MAX_SURFACE_NUM]; - struct dc_transfer_func in_shaper_func[MAX_SURFACE_NUM]; - struct dc_transfer_func blend_tf[MAX_SURFACE_NUM]; - - struct dc_stream_state stream_state; - struct dc_transfer_func out_transfer_func; -}; - /** * struct dc_state - The full description of a state requested by users */ @@ -623,8 +610,7 @@ struct dc_state { unsigned int stutter_period_us; } perf_params; - - struct dc_scratch_space scratch; + enum dc_power_source_type power_source; }; struct replay_context { diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index 9e4ddc9852..55529c5f47 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h @@ -31,7 +31,7 @@ #define __DCN_CALCS_H__ #include "bw_fixed.h" -#include "../dml/display_mode_lib.h" +#include "dml/display_mode_lib.h" struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 17e014d3bd..4f7480f60c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -281,8 +281,6 @@ struct clk_mgr_funcs { void (*set_low_power_state)(struct clk_mgr *clk_mgr); void (*exit_low_power_state)(struct clk_mgr *clk_mgr); bool (*is_ips_supported)(struct clk_mgr *clk_mgr); - void (*set_idle_state)(struct clk_mgr *clk_mgr, bool allow_idle); - uint32_t (*get_idle_state)(struct clk_mgr *clk_mgr); void (*init_clocks)(struct clk_mgr *clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index f4d4a68c91..4ba18ea57a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -349,7 +349,7 @@ struct clk_mgr_internal { enum dm_pp_clocks_state cur_min_clks_state; bool periodic_retraining_disabled; - unsigned int cur_phyclk_req_table[MAX_PIPES * 2]; + unsigned int cur_phyclk_req_table[MAX_LINKS]; bool smu_present; void *wm_range_table; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index b9a06bf84c..d4c7885fc9 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -59,6 +59,7 @@ enum dentist_dispclk_change_mode { struct dp_dto_params { int otg_inst; enum signal_type signal; + enum streamclk_source clk_src; uint64_t pixclk_hz; uint64_t refclk_hz; }; @@ -105,6 +106,10 @@ struct dccg_funcs { void (*otg_drop_pixel)(struct dccg *dccg, uint32_t otg_inst); void (*dccg_init)(struct dccg *dccg); + void (*set_dpstreamclk_root_clock_gating)( + struct dccg *dccg, + int dp_hpo_inst, + bool enable); void (*set_dpstreamclk)( struct dccg *dccg, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 2ae7484d18..305fdc127b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -160,7 +160,7 @@ struct hubbub_funcs { bool (*program_watermarks)( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 0f24afbf43..ca8de345d0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -322,7 +322,7 @@ struct dpp_funcs { const struct pwl_params *params); bool (*dpp_program_3dlut)( struct dpp *dpp, - struct tetrahedral_params *params); + const struct tetrahedral_params *params); void (*dpp_cnv_set_alpha_keyer)( struct dpp *dpp_base, struct cnv_color_keyer_params *color_keyer); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h index 729ca0064e..063efc8128 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h @@ -147,9 +147,10 @@ struct dwb_caps { unsigned int support_ogam :1; unsigned int support_wbscl :1; unsigned int support_ocsc :1; - unsigned int support_stereo :1; + unsigned int support_stereo :1; + unsigned int support_4k_120p :1; } caps; - unsigned int reserved2[9]; /* Reserved for future use, MUST BE 0. */ + unsigned int reserved2[10]; /* Reserved for future use, MUST BE 0. */ }; struct dwbc { @@ -166,8 +167,9 @@ struct dwbc { bool dwb_is_drc; int wb_src_plane_inst;/*hubp, mpcc, inst*/ uint32_t mask_id; - int otg_inst; - bool mvc_cfg; + int otg_inst; + bool mvc_cfg; + struct dc_dwb_params params; }; struct dwbc_funcs { @@ -192,6 +194,10 @@ struct dwbc_funcs { struct dwbc *dwbc, enum dwb_frame_capture_enable enable); + void (*dwb_set_scaler)( + struct dwbc *dwbc, + struct dc_dwb_params *params); + void (*set_stereo)( struct dwbc *dwbc, struct dwb_stereo_params *stereo_params); @@ -205,9 +211,11 @@ struct dwbc_funcs { struct dwbc *dwbc, struct dwb_warmup_params *warmup_params); - + bool (*dwb_get_mcifbuf_line)( + struct dwbc *dwbc, unsigned int *buf_idx, + unsigned int *cur_line, + unsigned int *over_run); #if defined(CONFIG_DRM_AMD_DC_FP) - void (*dwb_program_output_csc)( struct dwbc *dwbc, enum dc_color_space color_space, @@ -216,17 +224,17 @@ struct dwbc_funcs { bool (*dwb_ogam_set_output_transfer_func)( struct dwbc *dwbc, const struct dc_transfer_func *in_transfer_func_dwb_ogam); - +#endif //TODO: merge with output_transfer_func? bool (*dwb_ogam_set_input_transfer_func)( struct dwbc *dwbc, const struct dc_transfer_func *in_transfer_func_dwb_ogam); -#endif + + void (*get_drr_time_stamp)( + struct dwbc *dwbc, uint32_t *time_stamp); + bool (*get_dwb_status)( struct dwbc *dwbc); - void (*dwb_set_scaler)( - struct dwbc *dwbc, - struct dc_dwb_params *params); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index dcae23faee..c80ebb407a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -44,10 +44,11 @@ */ #define MAX_PIPES 6 #define MAX_PHANTOM_PIPES (MAX_PIPES / 2) +#define MAX_LINKS (MAX_PIPES * 2) #define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 #define MAX_HPO_DP2_ENCODERS 4 -#define MAX_HPO_DP2_LINK_ENCODERS 2 +#define MAX_HPO_DP2_LINK_ENCODERS 4 struct gamma_curve { uint32_t offset; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index dbe7afa9d3..af9183f5d6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -163,12 +163,11 @@ struct link_encoder_funcs { enum signal_type (*get_dig_mode)( struct link_encoder *enc); + void (*set_dio_phy_mux)( struct link_encoder *enc, enum encoder_type_select sel, uint32_t hpo_inst); - void (*set_dig_output_mode)( - struct link_encoder *enc, uint8_t pix_per_container); }; /* diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index b72fb314d8..86c12cd6f4 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -50,11 +50,13 @@ struct dcn_watermarks { uint32_t usr_retraining_ns; }; -struct dcn_watermark_set { - struct dcn_watermarks a; - struct dcn_watermarks b; - struct dcn_watermarks c; - struct dcn_watermarks d; +union dcn_watermark_set { + struct { + struct dcn_watermarks a; + struct dcn_watermarks b; + struct dcn_watermarks c; + struct dcn_watermarks d; + }; // legacy }; struct dce_watermarks { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h index 9a8bf6ec70..8d32e525f0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h @@ -93,6 +93,8 @@ struct dcn_otg_state { uint32_t vertical_interrupt1_line; uint32_t vertical_interrupt2_en; uint32_t vertical_interrupt2_line; + uint32_t otg_master_update_lock; + uint32_t otg_double_buffer_control; }; void optc1_read_otg_state(struct optc *optc1, struct dcn_otg_state *s); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index a15efadb91..75b9ec21f2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -178,10 +178,6 @@ struct stream_encoder_funcs { void (*stop_dp_info_packets)( struct stream_encoder *enc); - void (*reset_fifo)( - struct stream_encoder *enc - ); - void (*dp_blank)( struct dc_link *link, struct stream_encoder *enc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index ffad8fe16c..cd68ecc242 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -64,6 +64,12 @@ struct drr_params { bool immediate_flip; }; +struct long_vtotal_params { + uint32_t vertical_total_min; + uint32_t vertical_total_max; + uint32_t vertical_blank_start; +}; + #define LEFT_EYE_3D_PRIMARY_SURFACE 1 #define RIGHT_EYE_3D_PRIMARY_SURFACE 0 @@ -331,6 +337,7 @@ struct timing_generator_funcs { void (*init_odm)(struct timing_generator *tg); void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg); + void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params); void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h new file mode 100644 index 0000000000..51da368f5c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h @@ -0,0 +1,53 @@ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + */ + +#ifndef __DC_VPG_H__ +#define __DC_VPG_H__ + +struct dc_context; +struct dc_info_packet; + +struct vpg; + +struct vpg_funcs { + void (*update_generic_info_packet)( + struct vpg *vpg, + uint32_t packet_index, + const struct dc_info_packet *info_packet, + bool immediate_update); + + void (*vpg_poweron)( + struct vpg *vpg); + + void (*vpg_powerdown)( + struct vpg *vpg); +}; + +struct vpg { + const struct vpg_funcs *funcs; + struct dc_context *ctx; + int inst; +}; + +#endif /* DC_INC_VPG_H_ */ \ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index bf29fc58ea..7ab8ba5e23 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -288,7 +288,7 @@ struct link_service { struct dc_link *link, uint32_t coasting_vtotal); bool (*edp_replay_residency)(const struct dc_link *link, unsigned int *residency, const bool is_start, - const bool is_alpm); + const enum pr_residency_mode mode); bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link, const unsigned int *power_opts, uint32_t coasting_vtotal); diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 77a60aa9f2..361ad6b16b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -508,6 +508,17 @@ int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx( struct resource_context *new_res_ctx, const struct resource_pool *pool); +/* + * Look for a free pipe in new resource context that is used as a secondary DPP + * pipe in current resource context. + * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise + * pipe idx of the free pipe + */ +int resource_find_free_pipe_used_as_cur_sec_dpp( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool); + /* * Look for a free pipe in new resource context that is used as a secondary DPP * pipe in any MPCC combine in current resource context. @@ -573,13 +584,6 @@ bool get_temp_dp_link_res(struct dc_link *link, struct link_resource *link_res, struct dc_link_settings *link_settings); -#if defined(CONFIG_DRM_AMD_DC_FP) -struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt( - const struct resource_context *res_ctx, - const struct resource_pool *pool, - const struct dc_link *link); -#endif - void reset_syncd_pipes_from_disabled_pipes(struct dc *dc, struct dc_state *context); @@ -615,4 +619,10 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, struct pipe_ctx *pipe_ctx); bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream); + +/* Setup dc callbacks for dml2 + * @dc: the display core structure + * @dml2_options: struct to hold callbacks + */ +void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options); #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 22b24749c9..8d1a1cc94a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -884,7 +884,7 @@ void dp_set_preferred_link_settings(struct dc *dc, { int i; struct pipe_ctx *pipe; - struct dc_stream_state *link_stream; + struct dc_stream_state *link_stream = 0; struct dc_link_settings store_settings = *link_setting; link->preferred_link_setting = store_settings; diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c index fbcd8fb58e..c8c55f196f 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c @@ -24,7 +24,6 @@ */ #include "link_dp_trace.h" #include "link/protocols/link_dpcd.h" -#include "link.h" void dp_trace_init(struct dc_link *link) { diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index b8c4a04dd1..0d523dc43d 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -516,8 +516,8 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link) static void read_current_link_settings_on_detect(struct dc_link *link) { union lane_count_set lane_count_set = {0}; - uint8_t link_bw_set; - uint8_t link_rate_set; + uint8_t link_bw_set = 0; + uint8_t link_rate_set = 0; uint32_t read_dpcd_retry_cnt = 10; enum dc_status status = DC_ERROR_UNEXPECTED; int i; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index a72de44a57..b53ad18dbf 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -55,6 +55,8 @@ #include "dccg.h" #include "clk_mgr.h" #include "atomfirmware.h" +#include "vpg.h" + #define DC_LOGGER \ dc_logger #define DC_LOGGER_INIT(logger) \ @@ -67,7 +69,6 @@ #define RETIMER_REDRIVER_INFO(...) \ DC_LOG_RETIMER_REDRIVER( \ __VA_ARGS__) -#include "dc/dcn30/dcn30_vpg.h" #define MAX_MTP_SLOT_COUNT 64 #define LINK_TRAINING_ATTEMPTS 4 @@ -127,7 +128,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init) if (link->ep_type == DISPLAY_ENDPOINT_PHY && link->link_enc->funcs->get_dig_frontend && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { - unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); + int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); if (fe != ENGINE_ID_UNKNOWN) for (j = 0; j < dc->res_pool->stream_enc_count; j++) { @@ -725,7 +726,7 @@ static void set_avmute(struct pipe_ctx *pipe_ctx, bool enable) static void enable_mst_on_sink(struct dc_link *link, bool enable) { - unsigned char mstmCntl; + unsigned char mstmCntl = 0; core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1); if (enable) @@ -803,7 +804,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) if (enable) { struct dsc_config dsc_cfg; - struct dsc_optc_config dsc_optc_cfg; + struct dsc_optc_config dsc_optc_cfg = {0}; enum optc_dsc_mode optc_dsc_mode; /* Enable DSC hw block */ @@ -1575,7 +1576,7 @@ static bool write_128b_132b_sst_payload_allocation_table( break; } } else { - union dpcd_rev dpcdRev; + union dpcd_rev dpcdRev = {0}; if (core_link_read_dpcd( link, @@ -2119,7 +2120,7 @@ static enum dc_status enable_link_dp_mst( struct pipe_ctx *pipe_ctx) { struct dc_link *link = pipe_ctx->stream->link; - unsigned char mstm_cntl; + unsigned char mstm_cntl = 0; /* sink signal type after MST branch is MST. Multiple MST sinks * share one link. Link DP PHY is enable or training only once. @@ -2285,6 +2286,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; + enum dp_panel_mode panel_mode_dp = dp_get_panel_mode(link); DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); @@ -2311,6 +2313,8 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) dc->hwss.disable_audio_stream(pipe_ctx); + edp_set_panel_assr(link, pipe_ctx, &panel_mode_dp, false); + update_psp_stream_config(pipe_ctx, true); dc->hwss.blank_stream(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index f608dd3bba..d487dfcd21 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -992,7 +992,7 @@ enum dp_link_encoding mst_decide_link_encoding_format(const struct dc_link *link static void read_dp_device_vendor_id(struct dc_link *link) { - struct dp_device_vendor_id dp_id; + struct dp_device_vendor_id dp_id = {0}; /* read IEEE branch device id */ core_link_read_dpcd( @@ -1087,7 +1087,7 @@ static void get_active_converter_info( } if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) { - uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/ + uint8_t det_caps[16] = {0}; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/ union dwnstream_port_caps_byte0 *port_caps = (union dwnstream_port_caps_byte0 *)det_caps; if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0, @@ -1172,7 +1172,7 @@ static void get_active_converter_info( set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); { - struct dp_sink_hw_fw_revision dp_hw_fw_revision; + struct dp_sink_hw_fw_revision dp_hw_fw_revision = {0}; core_link_read_dpcd( link, @@ -1242,7 +1242,7 @@ static void apply_usbc_combo_phy_reset_wa(struct dc_link *link, bool dp_overwrite_extended_receiver_cap(struct dc_link *link) { - uint8_t dpcd_data[16]; + uint8_t dpcd_data[16] = {0}; uint32_t read_dpcd_retry_cnt = 3; enum dc_status status = DC_ERROR_UNEXPECTED; union dp_downstream_port_present ds_port = { 0 }; @@ -1408,7 +1408,7 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id) static void retrieve_cable_id(struct dc_link *link) { - union dp_cable_id usbc_cable_id; + union dp_cable_id usbc_cable_id = {0}; link->dpcd_caps.cable_id.raw = 0; core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX, @@ -1475,7 +1475,7 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link) enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) { - uint8_t lttpr_dpcd_data[8]; + uint8_t lttpr_dpcd_data[8] = {0}; enum dc_status status; bool is_lttpr_present; @@ -1939,8 +1939,8 @@ void detect_edp_sink_caps(struct dc_link *link) uint32_t entry; uint32_t link_rate_in_khz; enum dc_link_rate link_rate = LINK_RATE_UNKNOWN; - uint8_t backlight_adj_cap; - uint8_t general_edp_cap; + uint8_t backlight_adj_cap = 0; + uint8_t general_edp_cap = 0; retrieve_link_cap(link); link->dpcd_caps.edp_supported_link_rates_count = 0; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c index 5a965c26bf..0f1c411523 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -166,7 +166,7 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link) uint8_t idx = 0xFF; int i; - for (i = 0; i < MAX_PIPES * 2; ++i) { + for (i = 0; i < MAX_LINKS; ++i) { if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) @@ -196,7 +196,7 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in struct dc_link *link_dpia_primary, *link_dpia_secondary; int total_bw = 0; - for (uint8_t i = 0; i < (MAX_PIPES * 2) - 1; ++i) { + for (uint8_t i = 0; i < MAX_LINKS - 1; ++i) { if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) continue; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index ba69874be5..0fcf0b8530 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -120,7 +120,7 @@ bool dp_parse_link_loss_status( static bool handle_hpd_irq_psr_sink(struct dc_link *link) { - union dpcd_psr_configuration psr_configuration; + union dpcd_psr_configuration psr_configuration = {0}; if (!link->psr_settings.psr_feature_enabled) return false; @@ -186,9 +186,9 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link) static void handle_hpd_irq_replay_sink(struct dc_link *link) { - union dpcd_replay_configuration replay_configuration; + union dpcd_replay_configuration replay_configuration = {0}; /*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/ - union psr_error_status replay_error_status; + union psr_error_status replay_error_status = {0}; if (!link->replay_settings.replay_feature_enabled) return; @@ -280,7 +280,7 @@ void dp_handle_link_loss(struct dc_link *link) static void read_dpcd204h_on_irq_hpd(struct dc_link *link, union hpd_irq_data *irq_data) { enum dc_status retval; - union lane_align_status_updated dpcd_lane_status_updated; + union lane_align_status_updated dpcd_lane_status_updated = {0}; retval = core_link_read_dpcd( link, @@ -320,7 +320,7 @@ enum dc_status dp_read_hpd_rx_irq_data( /* Read 14 bytes in a single read and then copy only the required fields. * This is more efficient than doing it in two separate AUX reads. */ - uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1]; + uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1] = {0}; retval = core_link_read_dpcd( link, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index e538c67d3e..1818970b8e 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -1071,7 +1071,7 @@ enum dc_status dpcd_set_link_settings( * MUX chip gets link rate set back before link training. */ if (link->connector_signal == SIGNAL_TYPE_EDP) { - uint8_t supported_link_rates[16]; + uint8_t supported_link_rates[16] = {0}; core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, supported_link_rates, sizeof(supported_link_rates)); @@ -1587,21 +1587,7 @@ bool perform_link_training_with_retries( msleep(delay_dp_power_up_in_ms); } - if (panel_mode == DP_PANEL_MODE_EDP) { - struct cp_psp *cp_psp = &stream->ctx->cp_psp; - - if (cp_psp && cp_psp->funcs.enable_assr) { - /* ASSR is bound to fail with unsigned PSP - * verstage used during devlopment phase. - * Report and continue with eDP panel mode to - * perform eDP link training with right settings - */ - bool result; - result = cp_psp->funcs.enable_assr(cp_psp->handle, link); - if (!result && link->panel_mode != DP_PANEL_MODE_EDP) - panel_mode = DP_PANEL_MODE_DEFAULT; - } - } + edp_set_panel_assr(link, pipe_ctx, &panel_mode, true); dp_set_panel_mode(link, panel_mode); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c index 5d36bab002..edb21d2195 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c @@ -291,7 +291,7 @@ static enum link_training_result dpia_training_cr_non_transparent( { enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0; uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ - enum dc_status status; + enum dc_status status = DC_ERROR_UNEXPECTED; uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */ uint32_t retry_count = 0; uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; /* From DP spec, CR read interval is always 100us. */ @@ -617,7 +617,7 @@ static enum link_training_result dpia_training_eq_non_transparent( enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ; uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ uint32_t retries_eq = 0; - enum dc_status status; + enum dc_status status = DC_ERROR_UNEXPECTED; enum dc_dp_training_pattern tr_pattern; uint32_t wait_time_microsec = 0; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c index c5de6ed5bf..a72c898b64 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c @@ -130,7 +130,7 @@ static uint32_t dpcd_get_next_partition_size(const uint32_t address, const uint3 * XXX: Do not allow any two address ranges in this array to overlap */ static const struct dpcd_address_range mandatory_dpcd_blocks[] = { - { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT }}; + { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_128B132B_RATES }}; /* * extend addresses to read all mandatory blocks together diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 3baa2bdd6d..ad9aca790d 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -38,6 +38,7 @@ #include "dc/dc_dmub_srv.h" #include "dce/dmub_replay.h" #include "abm.h" +#include "resource.h" #define DC_LOGGER \ link->ctx->logger #define DC_LOGGER_INIT(logger) @@ -320,8 +321,8 @@ bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing) { struct dc_link_settings link_setting; - uint8_t link_bw_set; - uint8_t link_rate_set; + uint8_t link_bw_set = 0; + uint8_t link_rate_set = 0; uint32_t req_bw; union lane_count_set lane_count_set = {0}; @@ -1055,7 +1056,7 @@ bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal) } bool edp_replay_residency(const struct dc_link *link, - unsigned int *residency, const bool is_start, const bool is_alpm) + unsigned int *residency, const bool is_start, const enum pr_residency_mode mode) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; @@ -1064,8 +1065,11 @@ bool edp_replay_residency(const struct dc_link *link, if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; + if (!residency) + return false; + if (replay != NULL && link->replay_settings.replay_feature_enabled) - replay->funcs->replay_residency(replay, panel_inst, residency, is_start, is_alpm); + replay->funcs->replay_residency(replay, panel_inst, residency, is_start, mode); else *residency = 0; @@ -1145,3 +1149,66 @@ int edp_get_target_backlight_pwm(const struct dc_link *link) return (int) abm->funcs->get_target_backlight(abm); } + +static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link, + struct link_resource *link_res, bool enable) +{ + union dmub_rb_cmd cmd; + bool use_hpo_dp_link_enc = false; + uint8_t link_enc_index = 0; + uint8_t phy_type = 0; + uint8_t phy_id = 0; + + if (!pDC->config.use_assr_psp_message) + return; + + memset(&cmd, 0, sizeof(cmd)); + + link_enc_index = link->link_enc->transmitter - TRANSMITTER_UNIPHY_A; + + if (link_res->hpo_dp_link_enc) { + link_enc_index = link_res->hpo_dp_link_enc->inst; + use_hpo_dp_link_enc = true; + } + + if (enable) + phy_type = ((dp_get_panel_mode(link) == DP_PANEL_MODE_EDP) ? 1 : 0); + + phy_id = resource_transmitter_to_phy_idx(pDC, link->link_enc->transmitter); + + cmd.assr_enable.header.type = DMUB_CMD__PSP; + cmd.assr_enable.header.sub_type = DMUB_CMD__PSP_ASSR_ENABLE; + cmd.assr_enable.assr_data.enable = enable; + cmd.assr_enable.assr_data.phy_port_type = phy_type; + cmd.assr_enable.assr_data.phy_port_id = phy_id; + cmd.assr_enable.assr_data.link_enc_index = link_enc_index; + cmd.assr_enable.assr_data.hpo_mode = use_hpo_dp_link_enc; + + dc_wake_and_execute_dmub_cmd(pDC->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +void edp_set_panel_assr(struct dc_link *link, struct pipe_ctx *pipe_ctx, + enum dp_panel_mode *panel_mode, bool enable) +{ + struct link_resource *link_res = &pipe_ctx->link_res; + struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; + + if (*panel_mode != DP_PANEL_MODE_EDP) + return; + + if (link->dc->config.use_assr_psp_message) { + edp_set_assr_enable(link->dc, link, link_res, enable); + } else if (cp_psp && cp_psp->funcs.enable_assr && enable) { + /* ASSR is bound to fail with unsigned PSP + * verstage used during devlopment phase. + * Report and continue with eDP panel mode to + * perform eDP link training with right settings + */ + bool result; + + result = cp_psp->funcs.enable_assr(cp_psp->handle, link); + + if (!result && link->panel_mode != DP_PANEL_MODE_EDP) + *panel_mode = DP_PANEL_MODE_DEFAULT; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index a158c6234d..cb6d95cc36 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -61,7 +61,7 @@ bool edp_send_replay_cmd(struct dc_link *link, union dmub_replay_cmd_set *cmd_data); bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal); bool edp_replay_residency(const struct dc_link *link, - unsigned int *residency, const bool is_start, const bool is_alpm); + unsigned int *residency, const bool is_start, const enum pr_residency_mode mode); bool edp_get_replay_state(const struct dc_link *link, uint64_t *state); bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, const unsigned int *power_opts, uint32_t coasting_vtotal); @@ -76,4 +76,6 @@ bool edp_receiver_ready_T9(struct dc_link *link); bool edp_receiver_ready_T7(struct dc_link *link); bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable); void edp_set_panel_power(struct dc_link *link, bool powerOn); +void edp_set_panel_assr(struct dc_link *link, struct pipe_ctx *pipe_ctx, + enum dp_panel_mode *panel_mode, bool enable); #endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c index e3d729ab5b..caa617883f 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c @@ -35,7 +35,7 @@ bool link_get_hpd_state(struct dc_link *link) { - uint32_t state; + uint32_t state = 0; dal_gpio_lock_pin(link->hpd_gpio); dal_gpio_get_value(link->hpd_gpio, &state); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c index 0e8f4f36c8..f109a101d8 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c @@ -1383,6 +1383,9 @@ void optc1_read_otg_state(struct optc *optc1, REG_GET(OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, &s->vertical_interrupt2_line); + + s->otg_master_update_lock = REG_READ(OTG_MASTER_UPDATE_LOCK); + s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL); } bool optc1_get_otg_active_size(struct timing_generator *optc, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h index 6c2e84d396..2f3bd7648b 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -129,6 +129,8 @@ struct dcn_optc_registers { uint32_t OTG_V_TOTAL_MID; uint32_t OTG_V_TOTAL_MIN; uint32_t OTG_V_TOTAL_CONTROL; + uint32_t OTG_V_COUNT_STOP_CONTROL; + uint32_t OTG_V_COUNT_STOP_CONTROL2; uint32_t OTG_TRIGA_CNTL; uint32_t OTG_TRIGA_MANUAL_TRIG; uint32_t OTG_MANUAL_FLOW_CONTROL; @@ -515,12 +517,15 @@ struct dcn_optc_registers { type MANUAL_FLOW_CONTROL;\ type MANUAL_FLOW_CONTROL_SEL; +#define V_TOTAL_REGS(type) + #define TG_REG_FIELD_LIST(type) \ TG_REG_FIELD_LIST_DCN1_0(type)\ type OTG_V_SYNC_MODE;\ type OTG_DRR_TRIGGER_WINDOW_START_X;\ type OTG_DRR_TRIGGER_WINDOW_END_X;\ type OTG_DRR_V_TOTAL_CHANGE_LIMIT;\ + V_TOTAL_REGS(type)\ type OTG_OUT_MUX;\ type OTG_M_CONST_DTO_PHASE;\ type OTG_M_CONST_DTO_MODULO;\ @@ -581,7 +586,9 @@ struct dcn_optc_registers { type OTG_CRC1_WINDOWB_X_END_READBACK;\ type OTG_CRC1_WINDOWB_Y_START_READBACK;\ type OTG_CRC1_WINDOWB_Y_END_READBACK;\ - type OPTC_FGCG_REP_DIS; + type OPTC_FGCG_REP_DIS;\ + type OTG_V_COUNT_STOP;\ + type OTG_V_COUNT_STOP_TIMER; struct dcn_optc_shift { TG_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c index 5b15475088..d393be30df 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c @@ -32,6 +32,7 @@ #include "reg_helper.h" #include "dc.h" #include "dcn_calc_math.h" +#include "dc_dmub_srv.h" #define REG(reg)\ optc1->tg_regs->reg @@ -213,6 +214,167 @@ static bool optc35_configure_crc(struct timing_generator *optc, return true; } +static void optc35_setup_manual_trigger(struct timing_generator *optc) +{ + if (!optc || !optc->ctx) + return; + + struct optc *optc1 = DCN10TG_FROM_TG(optc); + struct dc *dc = optc->ctx->dc; + + if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams) + dc_dmub_srv_set_drr_manual_trigger_cmd(dc, optc->inst); + else { + /* + * MIN_MASK_EN is gone and MASK is now always enabled. + * + * To get it to it work with manual trigger we need to make sure + * we program the correct bit. + */ + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ + + // Setup manual flow control for EOF via TRIG_A + if (optc->funcs && optc->funcs->setup_manual_trigger) + optc->funcs->setup_manual_trigger(optc); + } +} + +void optc35_set_drr( + struct timing_generator *optc, + const struct drr_params *params) +{ + if (!optc || !params) + return; + + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t max_otg_v_total = optc1->max_v_total - 1; + + if (params != NULL && + params->vertical_total_max > 0 && + params->vertical_total_min > 0) { + + if (params->vertical_total_mid != 0) { + + REG_SET(OTG_V_TOTAL_MID, 0, + OTG_V_TOTAL_MID, params->vertical_total_mid - 1); + + REG_UPDATE_2(OTG_V_TOTAL_CONTROL, + OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, + OTG_VTOTAL_MID_FRAME_NUM, + (uint8_t)params->vertical_total_mid_frame_num); + + } + + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, + params->vertical_total_min - 1, params->vertical_total_max - 1); + optc35_setup_manual_trigger(optc); + } else { + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, 0, 0); + } + + REG_WRITE(OTG_V_COUNT_STOP_CONTROL, max_otg_v_total); + REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, 0); +} + +static void optc35_set_long_vtotal( + struct timing_generator *optc, + const struct long_vtotal_params *params) +{ + if (!optc || !params) + return; + + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t vcount_stop_timer = 0, vcount_stop = 0; + uint32_t max_otg_v_total = optc1->max_v_total - 1; + + if (params->vertical_total_min <= max_otg_v_total && params->vertical_total_max <= max_otg_v_total) + return; + + if (params->vertical_total_max == 0 || params->vertical_total_min == 0) { + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, 0, 0); + } else if (params->vertical_total_max == params->vertical_total_min) { + vcount_stop = params->vertical_blank_start; + vcount_stop_timer = params->vertical_total_max - max_otg_v_total; + + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK, 0); + + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, max_otg_v_total, max_otg_v_total); + + REG_WRITE(OTG_V_COUNT_STOP_CONTROL, vcount_stop); + REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, vcount_stop_timer); + } else { + // Variable rate, keep DRR trigger mask + if (params->vertical_total_min > max_otg_v_total) { + // cannot be supported + // If MAX_OTG_V_COUNT < DRR trigger < v_total_min < v_total_max, + // DRR trigger will drop the vtotal counting directly to a new frame. + // But it should trigger between v_total_min and v_total_max. + ASSERT(0); + + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, 0, 0); + + REG_WRITE(OTG_V_COUNT_STOP_CONTROL, max_otg_v_total); + REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, 0); + } else { + // For total_min <= MAX_OTG_V_COUNT and total_max > MAX_OTG_V_COUNT + vcount_stop = params->vertical_total_min; + vcount_stop_timer = params->vertical_total_max - max_otg_v_total; + + // Example: + // params->vertical_total_min 1000 + // params->vertical_total_max 2000 + // MAX_OTG_V_COUNT_STOP = 1500 + // + // If DRR event not happened, + // time 0,1,2,3,4,...1000,1001,........,1500,1501,1502, ...1999 + // vcount 0,1,2,3,4....1000...................,1001,1002,1003,...1399 + // vcount2 0,1,2,3,4,..499, + // else (DRR event happened, ex : at line 1004) + // time 0,1,2,3,4,...1000,1001.....1004, 0 + // vcount 0,1,2,3,4....1000,.............. 0 (new frame) + // vcount2 0,1,2, 3, - + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, + params->vertical_total_min - 1, max_otg_v_total); + optc35_setup_manual_trigger(optc); + + REG_WRITE(OTG_V_COUNT_STOP_CONTROL, vcount_stop); + REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, vcount_stop_timer); + } + } +} + static struct timing_generator_funcs dcn35_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, @@ -245,7 +407,7 @@ static struct timing_generator_funcs dcn35_tg_funcs = { .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, .enable_optc_clock = optc1_enable_optc_clock, - .set_drr = optc31_set_drr, + .set_drr = optc35_set_drr, .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, .set_vtotal_min_max = optc1_set_vtotal_min_max, .set_static_screen_control = optc1_set_static_screen_control, @@ -275,6 +437,7 @@ static struct timing_generator_funcs dcn35_tg_funcs = { .setup_manual_trigger = optc2_setup_manual_trigger, .get_hw_timing = optc1_get_hw_timing, .init_odm = optc3_init_odm, + .set_long_vtotal = optc35_set_long_vtotal, }; void dcn35_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h index 1f422e4c46..d077e23923 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h @@ -65,10 +65,14 @@ SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK, OTG_CRC1_WINDOWB_X_END_READBACK, mask_sh),\ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_START_READBACK, mask_sh),\ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_END_READBACK, mask_sh),\ - SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh) + SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh),\ + SF(OTG0_OTG_V_COUNT_STOP_CONTROL, OTG_V_COUNT_STOP, mask_sh),\ + SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh) void dcn35_timing_generator_init(struct optc *optc1); void dcn35_timing_generator_set_fgcg(struct optc *optc1, bool enable); +void optc35_set_drr(struct timing_generator *optc, const struct drr_params *params); + #endif /* __DC_OPTC_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile index 184b1f23aa..db9048974d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/Makefile +++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile @@ -102,10 +102,6 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN21) ############################################################################### -############################################################################### - -############################################################################### - RESOURCE_DCN30 = dcn30_resource.o AMD_DAL_RESOURCE_DCN30 = $(addprefix $(AMDDALPATH)/dc/resource/dcn30/,$(RESOURCE_DCN30)) @@ -202,6 +198,4 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN351) ############################################################################### -############################################################################### - endif diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index d1edac46c9..88afb2a30e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -864,8 +864,6 @@ static struct clock_source *find_matching_pll( default: return NULL; } - - return NULL; } static enum dc_status build_mapped_resource( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c index 20662edd0a..621825a51f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c @@ -1060,7 +1060,7 @@ static bool dce120_resource_construct( struct irq_service_init_data irq_init_data; static const struct resource_create_funcs *res_funcs; bool is_vg20 = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev); - uint32_t pipe_fuses; + uint32_t pipe_fuses = 0; ctx->dc_bios->regs = &bios_regs; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index 35a2cce0c2..56ee45e12b 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -56,7 +56,6 @@ #include "dce/dce_aux.h" #include "dce/dce_abm.h" #include "dce/dce_i2c.h" -/* TODO remove this include */ #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_7_1_d.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index d08d109692..563c5eec83 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -513,7 +513,7 @@ static const struct dc_plane_cap plane_cap = { .argb8888 = true, .nv12 = true, .fp16 = true, - .p010 = true + .p010 = false }, .max_upscale_factor = { @@ -569,6 +569,7 @@ static const struct dc_debug_options debug_defaults_diags = { .disable_pplib_clock_request = true, .disable_pplib_wm_range = true, .underflow_assert_delay_us = 0xFFFFFFFF, + .enable_legacy_fast_update = true, }; static void dcn10_dpp_destroy(struct dpp **dpp) @@ -1631,6 +1632,7 @@ static bool dcn10_resource_construct( /* valid pipe num */ pool->base.pipe_count = j; pool->base.timing_generator_count = j; + pool->base.mpcc_count = j; /* within dml lib, it is hard code to 4. If ASIC pipe is fused, * the value may be changed diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index 622214b365..0a939437e1 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -62,6 +62,9 @@ #include "dcn20/dcn20_vmid.h" #include "dce/dce_panel_cntl.h" +#include "dcn20/dcn20_dwb.h" +#include "dcn20/dcn20_mmhubbub.h" + #include "navi10_ip_offset.h" #include "dcn/dcn_2_0_0_offset.h" @@ -71,9 +74,6 @@ #include "nbio/nbio_2_3_offset.h" -#include "dcn20/dcn20_dwb.h" -#include "dcn20/dcn20_mmhubbub.h" - #include "mmhub/mmhub_2_0_0_offset.h" #include "mmhub/mmhub_2_0_0_sh_mask.h" @@ -83,11 +83,10 @@ #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "vm_helper.h" -#include "link_enc_cfg.h" - -#include "amdgpu_socbb.h" +#include "link_enc_cfg.h" #include "link.h" + #define DC_LOGGER_INIT(logger) #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL @@ -1282,8 +1281,13 @@ void dcn20_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx) static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx) { + struct resource_pool *pool = pipe_ctx->stream->ctx->dc->res_pool; - dcn20_build_pipe_pix_clk_params(pipe_ctx); + if (pool->funcs->build_pipe_pix_clk_params) { + pool->funcs->build_pipe_pix_clk_params(pipe_ctx); + } else { + dcn20_build_pipe_pix_clk_params(pipe_ctx); + } pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index 914b234d7f..070a4efb30 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -55,7 +55,6 @@ #include "dce110/dce110_resource.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" -#include "dcn201/dcn201_hubbub.h" #include "dcn10/dcn10_resource.h" #include "cyan_skillfish_ip_offset.h" @@ -182,6 +181,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn201_soc = { .socclk_mhz = 1254.0, .dram_speed_mts = 14000.0, }, + /* state4 is not an actual state, just defines unsupported for dml*/ { .state = 4, .dscclk_mhz = 400.0, @@ -566,6 +566,8 @@ static const struct resource_caps res_cap_dnc201 = { .num_audio = 2, .num_stream_encoder = 2, .num_pll = 2, + .num_dwb = 0, + .num_dsc = 0, .num_ddc = 2, }; @@ -612,7 +614,7 @@ static const struct dc_debug_options debug_defaults_drv = { .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_tri_buf = false, + .enable_tri_buf = true, .enable_legacy_fast_update = true, .using_dml2 = false, }; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 65d337731f..8663cbc3d1 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -581,32 +581,6 @@ static const struct resource_caps res_cap_rn = { .num_dsc = 3, }; -#ifdef DIAGS_BUILD -static const struct resource_caps res_cap_rn_FPGA_4pipe = { - .num_timing_generator = 4, - .num_opp = 4, - .num_video_plane = 4, - .num_audio = 7, - .num_stream_encoder = 4, - .num_pll = 4, - .num_dwb = 1, - .num_ddc = 4, - .num_dsc = 0, -}; - -static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = { - .num_timing_generator = 2, - .num_opp = 2, - .num_video_plane = 2, - .num_audio = 7, - .num_stream_encoder = 2, - .num_pll = 4, - .num_dwb = 1, - .num_ddc = 4, - .num_dsc = 2, -}; -#endif - static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCN_UNIVERSAL, .per_pixel_alpha = true, @@ -1415,16 +1389,11 @@ static bool dcn21_resource_construct( struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; uint32_t pipe_fuses = read_pipe_fuses(ctx); - uint32_t num_pipes; + uint32_t num_pipes = 0; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap_rn; -#ifdef DIAGS_BUILD - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - //pool->base.res_cap = &res_cap_nv10_FPGA_2pipe_dsc; - pool->base.res_cap = &res_cap_rn_FPGA_4pipe; -#endif pool->base.funcs = &dcn21_res_pool_funcs; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index b427a98066..f35cc30783 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -1639,7 +1639,7 @@ noinline bool dcn30_internal_validate_bw( int split[MAX_PIPES] = { 0 }; bool merge[MAX_PIPES] = { false }; bool newly_split[MAX_PIPES] = { false }; - int pipe_cnt, i, pipe_idx, vlevel; + int pipe_cnt, i, pipe_idx, vlevel = 0; struct vba_vars_st *vba = &context->bw_ctx.dml.vba; ASSERT(pipes); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c index 25cd6236b0..8bc1bcaeaa 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -1143,7 +1143,7 @@ static bool dcn303_resource_construct( int i; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; - struct ddc_service_init_data ddc_init_data; + struct ddc_service_init_data ddc_init_data = {0}; ctx->dc_bios->regs = &bios_regs; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index f38de53911..d4c3e2754f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -75,7 +75,6 @@ #include "dcn30/dcn30_dwb.h" #include "dcn30/dcn30_mmhubbub.h" -// TODO: change include headers /amd/include/asic_reg after upstream #include "yellow_carp_offset.h" #include "dcn/dcn_3_1_2_offset.h" #include "dcn/dcn_3_1_2_sh_mask.h" @@ -1647,7 +1646,7 @@ int dcn31_populate_dml_pipes_from_context( { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; bool upscaled = false; DC_FP_START(); @@ -1776,7 +1775,7 @@ bool dcn31_validate_bandwidth(struct dc *dc, out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); DC_FP_END(); - // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg + // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg if (pipe_cnt == 0) fast_validate = false; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index 2791fc45bb..ff50f43e4c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -925,27 +925,10 @@ static const struct dc_debug_options debug_defaults_drv = { }, .seamless_boot_odm_combine = true, + .enable_legacy_fast_update = true, .using_dml2 = false, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true -}; - static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1943,8 +1926,6 @@ static bool dcn314_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else - dc->debug = debug_defaults_diags; /* Disable pipe power gating */ dc->debug.disable_dpp_power_gate = true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index efa5627b0c..5fd52c5fce 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -125,7 +125,6 @@ #include "link_enc_cfg.h" #define DCN3_16_MAX_DET_SIZE 384 -#define DCN3_16_MIN_COMPBUF_SIZE_KB 128 #define DCN3_16_CRB_SEGMENT_SIZE_KB 64 enum dcn31_clk_src_array_id { @@ -1616,7 +1615,7 @@ static int dcn316_populate_dml_pipes_from_context( { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB; DC_FP_START(); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 1f5a91b764..d84c8e0e5c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -1804,7 +1804,9 @@ bool dcn32_validate_bandwidth(struct dc *dc, bool out = false; if (dc->debug.using_dml2) - out = dml2_validate(dc, context, fast_validate); + out = dml2_validate(dc, context, + context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, + fast_validate); else out = dml1_validate(dc, context, fast_validate); return out; @@ -1820,9 +1822,48 @@ int dcn32_populate_dml_pipes_from_context( struct pipe_ctx *pipe = NULL; bool subvp_in_use = false; struct dc_crtc_timing *timing; + int subvp_main_pipe_index = -1; + enum mall_stream_type mall_type; + bool single_display_subvp = false; + struct dc_stream_state *stream = NULL; + int num_subvp_main = 0; + int num_subvp_phantom = 0; + int num_subvp_none = 0; + int odm_slice_count; dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + /* For single display subvp, look for subvp main so if we have phantom + * pipe, we can set odm policy to match main pipe + */ + for (i = 0; i < context->stream_count; i++) { + stream = context->streams[i]; + mall_type = dc_state_get_stream_subvp_type(context, stream); + if (mall_type == SUBVP_MAIN) + num_subvp_main++; + else if (mall_type == SUBVP_PHANTOM) + num_subvp_phantom++; + else + num_subvp_none++; + } + if (num_subvp_main == 1 && num_subvp_phantom == 1 && num_subvp_none == 0) + single_display_subvp = true; + + if (single_display_subvp) { + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &res_ctx->pipe_ctx[i]; + if (!res_ctx->pipe_ctx[i].stream) + continue; + + mall_type = dc_state_get_pipe_subvp_type(context, pipe); + if (mall_type == SUBVP_MAIN) { + if (resource_is_pipe_type(pipe, OTG_MASTER)) + subvp_main_pipe_index = i; + } + pipe_cnt++; + } + } + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { if (!res_ctx->pipe_ctx[i].stream) @@ -1837,7 +1878,21 @@ int dcn32_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; if (dc->config.enable_windowed_mpo_odm && dc->debug.enable_single_display_2to1_odm_policy) { - switch (resource_get_odm_slice_count(pipe)) { + /* For single display subvp, if pipe is phantom pipe, + * then copy odm policy from subvp main pipe + */ + mall_type = dc_state_get_pipe_subvp_type(context, pipe); + if (single_display_subvp && (mall_type == SUBVP_PHANTOM)) { + if (subvp_main_pipe_index < 0) { + odm_slice_count = -1; + ASSERT(0); + } else { + odm_slice_count = resource_get_odm_slice_count(&res_ctx->pipe_ctx[subvp_main_pipe_index]); + } + } else { + odm_slice_count = resource_get_odm_slice_count(pipe); + } + switch (odm_slice_count) { case 2: pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; break; @@ -1850,6 +1905,7 @@ int dcn32_populate_dml_pipes_from_context( } else { pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; } + pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19; @@ -1917,6 +1973,22 @@ int dcn32_populate_dml_pipes_from_context( return pipe_cnt; } +unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes) +{ + uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways; + + /* add 2 lines for worst case alignment */ + cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2; + + total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; + lines_per_way = total_cache_lines / dc->caps.cache_num_ways; + num_ways = cache_lines_used / lines_per_way; + if (cache_lines_used % lines_per_way > 0) + num_ways++; + + return num_ways; +} + static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, .get_subvp_en = dcn32_subvp_in_use, @@ -1934,10 +2006,22 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context, static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { + struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; + + memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); + DC_FP_START(); + dcn32_update_bw_bounding_box_fpu(dc, bw_params); + + dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); + + dml2_opt->use_clock_dc_limits = true; + if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) + dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + DC_FP_END(); } @@ -1965,6 +2049,7 @@ static struct resource_funcs dcn32_res_pool_funcs = { .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .add_phantom_pipes = dcn32_add_phantom_pipes, .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, + .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, }; static uint32_t read_pipe_fuses(struct dc_context *ctx) @@ -2053,7 +2138,8 @@ static bool dcn32_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; - dc->caps.mall_size_total = 0; + /* total size = mall per channel * num channels * 1024 * 1024 */ + dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576; dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.cache_line_size = 64; @@ -2367,30 +2453,10 @@ static bool dcn32_resource_construct( dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; - dc->dml2_options.callbacks.dc = dc; - dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; + resource_init_common_dml2_callbacks(dc, &dc->dml2_options); dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; - dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; - dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; - dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; - dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; - dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; - dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; - - dc->dml2_options.svp_pstate.callbacks.dc = dc; - dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream; - dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; - dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream; - dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream; - dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream; dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc; - dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type; - dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type; - dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream; + dc->dml2_options.svp_pstate.callbacks.calculate_mall_ways_from_bytes = pool->base.funcs->calculate_mall_ways_from_bytes; dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us; dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us; @@ -2488,7 +2554,7 @@ struct resource_pool *dcn32_create_resource_pool( * full update which delays the flip for 1 frame. If we use the original pipe * we don't have to toggle its power. So we can flip faster. */ -static int find_optimal_free_pipe_as_secondary_dpp_pipe( +int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( const struct resource_context *cur_res_ctx, struct resource_context *new_res_ctx, const struct resource_pool *pool, @@ -2671,7 +2737,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( return dcn32_acquire_idle_pipe_for_head_pipe_in_layer( new_ctx, pool, opp_head_pipe->stream, opp_head_pipe); - free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe( + free_pipe_idx = dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( &cur_ctx->res_ctx, &new_ctx->res_ctx, pool, opp_head_pipe); if (free_pipe_idx >= 0) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index 2258c5c721..fee67fbab8 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -113,10 +113,6 @@ void dcn32_calculate_wm_and_dlg( int pipe_cnt, int vlevel); -uint32_t dcn32_helper_mall_bytes_to_ways( - struct dc *dc, - uint32_t total_size_in_mall_bytes); - uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( struct dc *dc, struct pipe_ctx *pipe_ctx, @@ -141,6 +137,12 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context); bool dcn32_is_center_timing(struct pipe_ctx *pipe); bool dcn32_is_psr_capable(struct pipe_ctx *pipe); +int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *new_opp_head); + struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( const struct dc_state *cur_ctx, struct dc_state *new_ctx, @@ -184,6 +186,8 @@ void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context); +unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes); + /* definitions for run time init of reg offsets */ /* CLK SRC */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index e83d340ed6..9a3cc0514a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1581,10 +1581,22 @@ static struct dc_cap_funcs cap_funcs = { static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { + struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; + + memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); + DC_FP_START(); + dcn321_update_bw_bounding_box_fpu(dc, bw_params); + + dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); + + dml2_opt->use_clock_dc_limits = true; + if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) + dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + DC_FP_END(); } @@ -1612,6 +1624,7 @@ static struct resource_funcs dcn321_res_pool_funcs = { .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .add_phantom_pipes = dcn32_add_phantom_pipes, .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, + .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, }; static uint32_t read_pipe_fuses(struct dc_context *ctx) @@ -1699,7 +1712,9 @@ static bool dcn321_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; - dc->caps.mall_size_total = 0; + /* total size = mall per channel * num channels * 1024 * 1024 */ + dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576; + dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.cache_line_size = 64; dc->caps.cache_num_ways = 16; @@ -2000,30 +2015,10 @@ static bool dcn321_resource_construct( dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; - dc->dml2_options.callbacks.dc = dc; - dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; + resource_init_common_dml2_callbacks(dc, &dc->dml2_options); dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; - dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; - dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; - dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; - dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; - dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; - dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; - - dc->dml2_options.svp_pstate.callbacks.dc = dc; - dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream; - dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; - dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream; - dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream; - dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream; dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc; - dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type; - dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type; - dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream; + dc->dml2_options.svp_pstate.callbacks.calculate_mall_ways_from_bytes = pool->base.funcs->calculate_mall_ways_from_bytes; dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us; dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index cf0cb5cf4b..2df8a74251 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -721,7 +721,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dpp_power_gate = true, .disable_hubp_power_gate = true, .disable_optc_power_gate = true, /*should the same as above two*/ - .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/ + .disable_hpo_power_gate = false, /*dmubfw force domain25 on*/ .disable_clock_gate = false, .disable_dsc_power_gate = true, .vsr_support = true, @@ -764,12 +764,12 @@ static const struct dc_debug_options debug_defaults_drv = { }, .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ - .minimum_z8_residency_time = 2100, + .minimum_z8_residency_time = 1, /* Always allow when other conditions are met */ .using_dml2 = true, .support_eDP1_5 = true, .enable_hpo_pg_support = false, .enable_legacy_fast_update = true, - .enable_single_display_2to1_odm_policy = false, + .enable_single_display_2to1_odm_policy = true, .disable_idle_power_optimizations = false, .dmcub_emulation = false, .disable_boot_optimizations = false, @@ -783,7 +783,7 @@ static const struct dc_debug_options debug_defaults_drv = { .psp_disabled_wa = true, .ips2_eval_delay_us = 2000, .ips2_entry_delay_us = 800, - .disable_dmub_reallow_idle = true, + .disable_dmub_reallow_idle = false, .static_screen_wait_frames = 2, }; @@ -1736,7 +1736,9 @@ static bool dcn35_validate_bandwidth(struct dc *dc, { bool out = false; - out = dml2_validate(dc, context, fast_validate); + out = dml2_validate(dc, context, + context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, + fast_validate); if (fast_validate) return out; @@ -2140,15 +2142,9 @@ static bool dcn35_resource_construct( dc->dml2_options.minimize_dispclk_using_odm = true; dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm; - dc->dml2_options.callbacks.dc = dc; - dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; + resource_init_common_dml2_callbacks(dc, &dc->dml2_options); dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; - dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; - dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; - dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; - dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; - dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; - dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; + dc->dml2_options.max_segments_per_hubp = 24; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h index a51c4a9eaa..f97bb4cb37 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h @@ -240,6 +240,8 @@ struct resource_pool *dcn35_create_resource_pool( SRI_ARR(OTG_V_TOTAL_MAX, OTG, inst),\ SRI_ARR(OTG_V_TOTAL_MIN, OTG, inst),\ SRI_ARR(OTG_V_TOTAL_CONTROL, OTG, inst),\ + SRI_ARR(OTG_V_COUNT_STOP_CONTROL, OTG, inst),\ + SRI_ARR(OTG_V_COUNT_STOP_CONTROL2, OTG, inst),\ SRI_ARR(OTG_TRIGA_CNTL, OTG, inst),\ SRI_ARR(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\ SRI_ARR(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 116b591231..ddf9560ab7 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -758,7 +758,7 @@ static const struct dc_debug_options debug_defaults_drv = { //must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions .enable_double_buffered_dsc_pg_support = true, .enable_dp_dig_pixel_rate_div_policy = 1, - .disable_z10 = true, + .disable_z10 = false, .ignore_pg = true, .psp_disabled_wa = true, .ips2_eval_delay_us = 2000, @@ -1716,19 +1716,20 @@ static bool dcn351_validate_bandwidth(struct dc *dc, { bool out = false; - out = dml2_validate(dc, context, fast_validate); + out = dml2_validate(dc, context, + context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, + fast_validate); if (fast_validate) return out; DC_FP_START(); - dcn351_decide_zstate_support(dc, context); + dcn35_decide_zstate_support(dc, context); DC_FP_END(); return out; } - static struct resource_funcs dcn351_res_pool_funcs = { .destroy = dcn351_destroy_resource_pool, .link_enc_create = dcn35_link_encoder_create, @@ -1871,6 +1872,9 @@ static bool dcn351_resource_construct( /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; + /* Use psp mailbox to enable assr */ + dc->config.use_assr_psp_message = true; + /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { @@ -1890,6 +1894,8 @@ static bool dcn351_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; + /*HW default is to have all the FGCG enabled, SW no need to program them*/ + dc->debug.enable_fine_grain_clock_gating.u32All = 0xFFFF; // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -2120,15 +2126,9 @@ static bool dcn351_resource_construct( dc->dml2_options.minimize_dispclk_using_odm = true; dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm; - dc->dml2_options.callbacks.dc = dc; - dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; + resource_init_common_dml2_callbacks(dc, &dc->dml2_options); dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; - dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; - dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; - dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; - dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; - dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; - dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; + dc->dml2_options.max_segments_per_hubp = 24; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 7785908a66..2fde1f043d 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -71,6 +71,8 @@ extern "C" { #endif +#define DMUB_PC_SNAPSHOT_COUNT 10 + /* Forward declarations */ struct dmub_srv; struct dmub_srv_common_regs; @@ -295,10 +297,22 @@ struct dmub_srv_hw_params { bool dpia_hpd_int_enable_supported; bool disable_clock_gate; bool disallow_dispclk_dppclk_ds; + bool ips_sequential_ono; enum dmub_memory_access_type mem_access_type; enum dmub_ips_disable_type disable_ips; }; +/** + * struct dmub_srv_debug - Debug info for dmub_srv + * @timeout_occured: Indicates a timeout occured on any message from driver to dmub + * @timeout_cmd: first cmd sent from driver that timed out - subsequent timeouts are not stored + */ +struct dmub_srv_debug { + bool timeout_occured; + union dmub_rb_cmd timeout_cmd; + unsigned long long timestamp; +}; + /** * struct dmub_diagnostic_data - Diagnostic data retrieved from DMCUB for * debugging purposes, including logging, crash analysis, etc. @@ -306,7 +320,7 @@ struct dmub_srv_hw_params { struct dmub_diagnostic_data { uint32_t dmcub_version; uint32_t scratch[17]; - uint32_t pc; + uint32_t pc[DMUB_PC_SNAPSHOT_COUNT]; uint32_t undefined_address_fault_addr; uint32_t inst_fetch_fault_addr; uint32_t data_write_fault_addr; @@ -317,6 +331,7 @@ struct dmub_diagnostic_data { uint32_t inbox0_wptr; uint32_t inbox0_size; uint32_t gpint_datain0; + struct dmub_srv_debug timeout_info; uint8_t is_dmcub_enabled : 1; uint8_t is_dmcub_soft_reset : 1; uint8_t is_dmcub_secure_reset : 1; @@ -506,6 +521,7 @@ struct dmub_srv { struct dmub_visual_confirm_color visual_confirm_color; enum dmub_srv_power_state_type power_state; + struct dmub_srv_debug debug; }; /** diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index af3fe8bb07..e85fd3ac52 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -97,6 +97,9 @@ /* Maximum number of planes on any ASIC. */ #define DMUB_MAX_PLANES 6 +/* Maximum number of phantom planes on any ASIC */ +#define DMUB_MAX_PHANTOM_PLANES ((DMUB_MAX_PLANES) / 2) + /* Trace buffer offset for entry */ #define TRACE_BUFFER_ENTRY_OFFSET 16 @@ -194,6 +197,11 @@ union abm_flags { * of user backlight level. */ unsigned int abm_gradual_bl_change : 1; + + /** + * @abm_new_frame: Indicates if a new frame update needed for ABM to ramp up into steady + */ + unsigned int abm_new_frame : 1; } bitfields; unsigned int u32All; @@ -461,7 +469,7 @@ struct dmub_feature_caps { * Max PSR version supported by FW. */ uint8_t psr; - uint8_t fw_assisted_mclk_switch; + uint8_t fw_assisted_mclk_switch_ver; uint8_t reserved[4]; uint8_t subvp_psr_support; uint8_t gecc_enable; @@ -619,6 +627,7 @@ enum dmub_ips_disable_type { DMUB_IPS_DISABLE_IPS2 = 3, DMUB_IPS_DISABLE_IPS2_Z10 = 4, DMUB_IPS_DISABLE_DYNAMIC = 5, + DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF = 6, }; #define DMUB_IPS1_ALLOW_MASK 0x00000001 @@ -653,6 +662,7 @@ union dmub_fw_boot_options { uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */ uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/ uint32_t ips_disable: 3; /* options to disable ips support*/ + uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */ uint32_t reserved : 9; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ @@ -695,7 +705,8 @@ union dmub_shared_state_ips_fw_signals { struct { uint32_t ips1_commit : 1; /**< 1 if in IPS1 */ uint32_t ips2_commit : 1; /**< 1 if in IPS2 */ - uint32_t reserved_bits : 30; /**< Reversed */ + uint32_t in_idle : 1; /**< 1 if DMCUB is in idle */ + uint32_t reserved_bits : 29; /**< Reversed */ } bits; uint32_t all; }; @@ -724,7 +735,13 @@ union dmub_shared_state_ips_driver_signals { */ struct dmub_shared_state_ips_fw { union dmub_shared_state_ips_fw_signals signals; /**< 4 bytes, IPS signal bits */ - uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */ + uint32_t rcg_entry_count; /**< Entry counter for RCG */ + uint32_t rcg_exit_count; /**< Exit counter for RCG */ + uint32_t ips1_entry_count; /**< Entry counter for IPS1 */ + uint32_t ips1_exit_count; /**< Exit counter for IPS1 */ + uint32_t ips2_entry_count; /**< Entry counter for IPS2 */ + uint32_t ips2_exit_count; /**< Exit counter for IPS2 */ + uint32_t reserved[55]; /**< Reversed, to be updated when adding new fields. */ }; /* 248-bytes, fixed */ /** @@ -811,6 +828,10 @@ enum dmub_cmd_vbios_type { * Query DP alt status on a transmitter. */ DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT = 26, + /** + * Control PHY FSM + */ + DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM = 29, /** * Controls domain power gating */ @@ -1186,6 +1207,11 @@ enum dmub_cmd_type { */ DMUB_CMD__DPIA_HPD_INT_ENABLE = 86, + /** + * Command type used for all PSP commands. + */ + DMUB_CMD__PSP = 88, + DMUB_CMD__VBIOS = 128, }; @@ -1588,7 +1614,7 @@ struct dmub_rb_cmd_idle_opt_dcn_restore { */ struct dmub_dcn_notify_idle_cntl_data { uint8_t driver_idle; - uint8_t pad[1]; + uint8_t reserved[59]; }; /** @@ -2309,6 +2335,11 @@ enum phy_link_rate { * UHBR10 - 20.0 Gbps/Lane */ PHY_RATE_2000 = 11, + + PHY_RATE_675 = 12, + /** + * Rate 12 - 6.75 Gbps/Lane + */ }; /** @@ -2327,6 +2358,7 @@ enum dmub_phy_fsm_state { DMUB_PHY_FSM_POWER_DOWN, DMUB_PHY_FSM_PLL_EN, DMUB_PHY_FSM_TX_EN, + DMUB_PHY_FSM_TX_EN_TEST_MODE, DMUB_PHY_FSM_FAST_LP, DMUB_PHY_FSM_P2_PLL_OFF_CPM, DMUB_PHY_FSM_P2_PLL_OFF_PG, @@ -2931,18 +2963,49 @@ struct dmub_rb_cmd_psr_set_power_opt { struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data; }; +/** + * Definition of Replay Residency GPINT command. + * Bit[0] - Residency mode for Revision 0 + * Bit[1] - Enable/Disable state + * Bit[2-3] - Revision number + * Bit[4-7] - Residency mode for Revision 1 + * Bit[8] - Panel instance + * Bit[9-15] - Reserved + */ + +enum pr_residency_mode { + PR_RESIDENCY_MODE_PHY = 0x0, + PR_RESIDENCY_MODE_ALPM, + PR_RESIDENCY_MODE_IPS2, + PR_RESIDENCY_MODE_FRAME_CNT, + PR_RESIDENCY_MODE_ENABLEMENT_PERIOD, +}; + #define REPLAY_RESIDENCY_MODE_SHIFT (0) #define REPLAY_RESIDENCY_ENABLE_SHIFT (1) +#define REPLAY_RESIDENCY_REVISION_SHIFT (2) +#define REPLAY_RESIDENCY_MODE2_SHIFT (4) #define REPLAY_RESIDENCY_MODE_MASK (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) -# define REPLAY_RESIDENCY_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT) -# define REPLAY_RESIDENCY_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) -# define REPLAY_RESIDENCY_MODE_IPS 0x10 +# define REPLAY_RESIDENCY_FIELD_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT) +# define REPLAY_RESIDENCY_FIELD_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) + +#define REPLAY_RESIDENCY_MODE2_MASK (0xF << REPLAY_RESIDENCY_MODE2_SHIFT) +# define REPLAY_RESIDENCY_FIELD_MODE2_IPS (0x1 << REPLAY_RESIDENCY_MODE2_SHIFT) +# define REPLAY_RESIDENCY_FIELD_MODE2_FRAME_CNT (0x2 << REPLAY_RESIDENCY_MODE2_SHIFT) +# define REPLAY_RESIDENCY_FIELD_MODE2_EN_PERIOD (0x3 << REPLAY_RESIDENCY_MODE2_SHIFT) #define REPLAY_RESIDENCY_ENABLE_MASK (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT) # define REPLAY_RESIDENCY_DISABLE (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT) # define REPLAY_RESIDENCY_ENABLE (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT) +#define REPLAY_RESIDENCY_REVISION_MASK (0x3 << REPLAY_RESIDENCY_REVISION_SHIFT) +# define REPLAY_RESIDENCY_REVISION_0 (0x0 << REPLAY_RESIDENCY_REVISION_SHIFT) +# define REPLAY_RESIDENCY_REVISION_1 (0x1 << REPLAY_RESIDENCY_REVISION_SHIFT) + +/** + * Definition of a replay_state. + */ enum replay_state { REPLAY_STATE_0 = 0x0, REPLAY_STATE_1 = 0x10, @@ -3004,6 +3067,11 @@ enum dmub_cmd_replay_type { * Set pseudo vtotal */ DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7, + /** + * Set adaptive sync sdp enabled + */ + DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8, + }; /** @@ -3205,6 +3273,20 @@ struct dmub_cmd_replay_set_pseudo_vtotal { */ uint8_t pad; }; +struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * enabled: set adaptive sync sdp enabled + */ + uint8_t force_disabled; + + uint8_t pad[2]; +}; /** * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. @@ -3308,6 +3390,20 @@ struct dmub_rb_cmd_replay_set_pseudo_vtotal { struct dmub_cmd_replay_set_pseudo_vtotal data; }; +/** + * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command. + */ +struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command. + */ + struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data data; +}; + /** * Data passed from driver to FW in DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command. */ @@ -3363,6 +3459,11 @@ union dmub_replay_cmd_set { * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data. */ struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data; + /** + * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command data. + */ + struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data; + }; /** @@ -3445,7 +3546,7 @@ enum hw_lock_client { /** * Replay is the client of HW Lock Manager. */ - HW_LOCK_CLIENT_REPLAY = 4, + HW_LOCK_CLIENT_REPLAY = 4, /** * Invalid client. */ @@ -4038,6 +4139,10 @@ enum dmub_cmd_panel_cntl_type { * Queries backlight info for the embedded panel. */ DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO = 1, + /** + * Sets the PWM Freq as per user's requirement. + */ + DMUB_CMD__PANEL_DEBUG_PWM_FREQ = 2, }; /** @@ -4139,6 +4244,34 @@ struct dmub_rb_cmd_transmitter_query_dp_alt { struct dmub_rb_cmd_transmitter_query_dp_alt_data data; /**< payload */ }; +struct phy_test_mode { + uint8_t mode; + uint8_t pat0; + uint8_t pad[2]; +}; + +/** + * Data passed in/out in a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command. + */ +struct dmub_rb_cmd_transmitter_set_phy_fsm_data { + uint8_t phy_id; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */ + uint8_t mode; /**< HDMI/DP/DP2 etc */ + uint8_t lane_num; /**< Number of lanes */ + uint32_t symclk_100Hz; /**< PLL symclock in 100hz */ + struct phy_test_mode test_mode; + enum dmub_phy_fsm_state state; + uint32_t status; + uint8_t pad; +}; + +/** + * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command. + */ +struct dmub_rb_cmd_transmitter_set_phy_fsm { + struct dmub_cmd_header header; /**< header */ + struct dmub_rb_cmd_transmitter_set_phy_fsm_data data; /**< payload */ +}; + /** * Maximum number of bytes a chunk sent to DMUB for parsing */ @@ -4260,6 +4393,65 @@ struct dmub_rb_cmd_secure_display { } roi_info; }; +/** + * Command type of a DMUB_CMD__PSP command + */ +enum dmub_cmd_psp_type { + DMUB_CMD__PSP_ASSR_ENABLE = 0 +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__PSP_ASSR_ENABLE command. + */ +struct dmub_cmd_assr_enable_data { + /** + * ASSR enable or disable. + */ + uint8_t enable; + /** + * PHY port type. + * Indicates eDP / non-eDP port type + */ + uint8_t phy_port_type; + /** + * PHY port ID. + */ + uint8_t phy_port_id; + /** + * Link encoder index. + */ + uint8_t link_enc_index; + /** + * HPO mode. + */ + uint8_t hpo_mode; + + /** + * Reserved field. + */ + uint8_t reserved[7]; +}; + +/** + * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command. + */ +struct dmub_rb_cmd_assr_enable { + /** + * Command header. + */ + struct dmub_cmd_header header; + + /** + * Assr data. + */ + struct dmub_cmd_assr_enable_data assr_data; + + /** + * Reserved field. + */ + uint32_t reserved[3]; +}; + /** * union dmub_rb_cmd - DMUB inbox command. */ @@ -4450,6 +4642,10 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command. */ struct dmub_rb_cmd_transmitter_query_dp_alt query_dp_alt; + /** + * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command. + */ + struct dmub_rb_cmd_transmitter_set_phy_fsm set_phy_fsm; /** * Definition of a DMUB_CMD__DPIA_DIG1_CONTROL command. */ @@ -4518,6 +4714,15 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. */ struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal; + /** + * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command. + */ + struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp replay_disabled_adaptive_sync_sdp; + /** + * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command. + */ + struct dmub_rb_cmd_assr_enable assr_enable; + }; /** diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index cae96fba63..e500ca9ae0 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -472,4 +472,5 @@ void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); diag_data->is_cw6_enabled = is_cw6_enabled; + diag_data->timeout_info = dmub->debug; } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 2bcf5fb87d..662c34e949 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -466,6 +466,7 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); diag_data->is_cw6_enabled = is_cw6_enabled; + diag_data->timeout_info = dmub->debug; } bool dmub_dcn31_should_detect(struct dmub_srv *dmub) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index 0d521eeda0..e1da270502 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -478,6 +478,8 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti diag_data->is_cw6_enabled = is_cw6_enabled; diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); + + diag_data->timeout_info = dmub->debug; } void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub) { diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 53f359f3fa..70e63aeb8f 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -420,6 +420,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.disable_clk_ds = params->disallow_dispclk_dppclk_ds; boot_options.bits.disable_clk_gate = params->disable_clock_gate; boot_options.bits.ips_disable = params->disable_ips; + boot_options.bits.ips_sequential_ono = params->ips_sequential_ono; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } @@ -516,6 +517,7 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti diag_data->is_cw6_enabled = is_cw6_enabled; diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); + diag_data->timeout_info = dmub->debug; } void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub) { diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h index 1c6f24cb1d..447768dec8 100644 --- a/drivers/gpu/drm/amd/display/include/dal_types.h +++ b/drivers/gpu/drm/amd/display/include/dal_types.h @@ -27,7 +27,6 @@ #define __DAL_TYPES_H__ #include "signal_types.h" -#include "dc_types.h" struct dal_logger; struct dc_bios; diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h index c6bbd262f1..54e33062b3 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_id.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h @@ -226,8 +226,8 @@ enum dp_alt_mode { struct graphics_object_id { uint32_t id:8; - uint32_t enum_id:4; - uint32_t type:4; + enum object_enum_id enum_id :4; + enum object_type type :4; uint32_t reserved:16; /* for padding. total size should be u32 */ }; diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index 92dbff22a7..1867aac57c 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -73,7 +73,6 @@ struct link_training_settings { enum dc_pre_emphasis *pre_emphasis; enum dc_post_cursor2 *post_cursor2; bool should_set_fec_ready; - /* TODO - factor lane_settings out because it changes during LT */ union dc_dp_ffe_preset *ffe_preset; uint16_t cr_pattern_time; diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index f39e2785e6..8347995173 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -64,6 +64,7 @@ #define DC_LOG_DWB(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_DP2(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_AUTO_DPM_TEST(...) pr_debug("[AutoDPMTest]: "__VA_ARGS__) +#define DC_LOG_IPS(...) pr_debug("[IPS]: "__VA_ARGS__) struct dc_log_buffer_ctx { char *buf; diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index 1b14b17a79..a10d6b988a 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h @@ -118,6 +118,19 @@ static inline bool dc_is_dvi_signal(enum signal_type signal) } } +static inline bool dc_is_tmds_signal(enum signal_type signal) +{ + switch (signal) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + return true; + break; + default: + return false; + } +} + static inline bool dc_is_dvi_single_link_signal(enum signal_type signal) { return (signal == SIGNAL_TYPE_DVI_SINGLE_LINK); diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 8b5c278576..3699e63380 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1059,7 +1059,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, struct fixed31_32 min_display; struct fixed31_32 max_content; struct fixed31_32 clip = dc_fixpt_one; - struct fixed31_32 output; + struct fixed31_32 output = dc_fixpt_zero; bool use_eetf = false; bool is_clipped = false; struct fixed31_32 sdr_white_level; diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 3955b7e4b2..d09627c15b 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -158,13 +158,13 @@ static unsigned int calc_v_total_from_duration( if (duration_in_us > vrr->max_duration_in_us) duration_in_us = vrr->max_duration_in_us; - if (dc_is_hdmi_signal(stream->signal)) { + if (dc_is_hdmi_signal(stream->signal)) { // change for HDMI to comply with spec uint32_t h_total_up_scaled; h_total_up_scaled = stream->timing.h_total * 10000; v_total = div_u64((unsigned long long)duration_in_us * stream->timing.pix_clk_100hz + (h_total_up_scaled - 1), - h_total_up_scaled); + h_total_up_scaled); //ceiling for MMax and MMin for MVRR } else { v_total = div64_u64(div64_u64(((unsigned long long)( duration_in_us) * (stream->timing.pix_clk_100hz / 10)), @@ -1057,7 +1057,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->fixed_refresh_in_uhz = 0; refresh_range = div_u64(in_out_vrr->max_refresh_in_uhz + 500000, 1000000) - -+ div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000); + div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000); in_out_vrr->supported = true; } @@ -1126,6 +1126,8 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->adjust.v_total_min = stream->timing.v_total; in_out_vrr->adjust.v_total_max = stream->timing.v_total; } + + in_out_vrr->adjust.allow_otg_v_count_halt = (in_config->state == VRR_STATE_ACTIVE_FIXED) ? true : false; } void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync, diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index 733f22bed0..c996365e84 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -151,7 +151,7 @@ out: static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) { - enum mod_hdcp_status status; + enum mod_hdcp_status status = MOD_HDCP_STATUS_FAILURE; uint8_t size; uint16_t max_wait = 20; // units of ms uint16_t num_polls = 5; diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 84f9b412a4..a344e2e49b 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -536,8 +536,6 @@ void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream, mod_build_adaptive_sync_infopacket_v2(stream, param, info_packet); break; case FREESYNC_TYPE_PCON_IN_WHITELIST: - mod_build_adaptive_sync_infopacket_v1(info_packet); - break; case ADAPTIVE_SYNC_TYPE_EDP: mod_build_adaptive_sync_infopacket_v1(info_packet); break; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index b0a6256e89..7536c173a5 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -24,6 +24,7 @@ #define __AMD_SHARED_H__ #include +#include #define AMD_MAX_USEC_TIMEOUT 1000000 /* 1000 ms */ @@ -321,6 +322,8 @@ struct amd_ip_funcs { int (*set_powergating_state)(void *handle, enum amd_powergating_state state); void (*get_clockgating_state)(void *handle, u64 *flags); + void (*dump_ip_state)(void *handle); + void (*print_ip_state)(void *handle, struct drm_printer *p); }; diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h index f2f8f9b39c..fc72c22670 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h @@ -311,6 +311,10 @@ #define mmPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2 #define mmPHYFSYMCLK_CLOCK_CNTL 0x0057 #define mmPHYFSYMCLK_CLOCK_CNTL_BASE_IDX 2 +#define regHDMICHARCLK0_CLOCK_CNTL 0x004a +#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2 +#define mmHDMICHARCLK0_CLOCK_CNTL 0x004a +#define mmHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2 // addressBlock: dce_dc_dccg_dccg_dfs_dispdec @@ -4513,6 +4517,10 @@ #define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0e18 #define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_INDEX 0x0e19 +#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_DATA 0x0e1a +#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -5201,6 +5209,10 @@ #define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f83 #define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f84 +#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM1_CM_TEST_DEBUG_DATA 0x0f85 +#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -5888,6 +5900,10 @@ #define mmCM2_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM2_CM_3DLUT_OUT_OFFSET_B 0x10ee #define mmCM2_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM2_CM_TEST_DEBUG_INDEX 0x10ef +#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM2_CM_TEST_DEBUG_DATA 0x10f0 +#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -6576,6 +6592,10 @@ #define mmCM3_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM3_CM_3DLUT_OUT_OFFSET_B 0x1259 #define mmCM3_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM3_CM_TEST_DEBUG_INDEX 0x125a +#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM3_CM_TEST_DEBUG_DATA 0x125b +#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -7264,6 +7284,10 @@ #define mmCM4_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM4_CM_3DLUT_OUT_OFFSET_B 0x13c4 #define mmCM4_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM4_CM_TEST_DEBUG_INDEX 0x13c5 +#define mmCM4_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM4_CM_TEST_DEBUG_DATA 0x13c6 +#define mmCM4_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp4_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -7952,6 +7976,10 @@ #define mmCM5_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM5_CM_3DLUT_OUT_OFFSET_B 0x152f #define mmCM5_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM5_CM_TEST_DEBUG_INDEX 0x1530 +#define mmCM5_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM5_CM_TEST_DEBUG_DATA 0x1531 +#define mmCM5_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp5_dispdec_dpp_dcperfmon_dc_perfmon_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h index e0a4473516..daf71e82f0 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h @@ -1189,6 +1189,11 @@ #define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_SRC_SEL__SHIFT 0x4 #define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_EN_MASK 0x00000001L #define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_SRC_SEL_MASK 0x00000010L +//HDMICHARCLK0_CLOCK_CNTL +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0 +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4 +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L // addressBlock: dce_dc_dccg_dccg_dfs_dispdec @@ -16739,6 +16744,15 @@ #define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10 #define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL #define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L +//CM0_CM_TEST_DEBUG_INDEX +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L +//CM0_CM_TEST_DEBUG_DATA +#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL + // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec //DC_PERFMON12_PERFCOUNTER_CNTL diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h index b45a35aae2..bf84f97d91 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h @@ -4466,6 +4466,10 @@ #define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0e18 #define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_INDEX 0x0e19 +#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_DATA 0x0e1a +#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -5154,6 +5158,10 @@ #define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f83 #define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f84 +#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM1_CM_TEST_DEBUG_DATA 0x0f85 +#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -5841,6 +5849,10 @@ #define mmCM2_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM2_CM_3DLUT_OUT_OFFSET_B 0x10ee #define mmCM2_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM2_CM_TEST_DEBUG_INDEX 0x10ef +#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM2_CM_TEST_DEBUG_DATA 0x10f0 +#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -6529,6 +6541,10 @@ #define mmCM3_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM3_CM_3DLUT_OUT_OFFSET_B 0x1259 #define mmCM3_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM3_CM_TEST_DEBUG_INDEX 0x125a +#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM3_CM_TEST_DEBUG_DATA 0x125b +#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -7217,6 +7233,10 @@ #define mmCM4_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM4_CM_3DLUT_OUT_OFFSET_B 0x13c4 #define mmCM4_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM4_CM_TEST_DEBUG_INDEX 0x13c5 +#define mmCM4_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM4_CM_TEST_DEBUG_DATA 0x13c6 +#define mmCM4_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp4_dispdec_dpp_dcperfmon_dc_perfmon_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h index 3dae29f958..56cdb21987 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h @@ -15676,6 +15676,14 @@ #define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10 #define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL #define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L +//CM0_CM_TEST_DEBUG_INDEX +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L +//CM0_CM_TEST_DEBUG_DATA +#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h index daa8130636..8b0d2638a6 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h @@ -3110,6 +3110,10 @@ #define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0e18 #define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_INDEX 0x0e19 +#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_DATA 0x0e1a +#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -3798,6 +3802,10 @@ #define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f83 #define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f84 +#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM1_CM_TEST_DEBUG_DATA 0x0f85 +#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -5687,6 +5695,16 @@ #define mmDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define mmDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035 #define mmDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define mmDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a +#define mmDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define mmDSCC0_DSCC_TEST_DEBUG_DATA0 0x303b +#define mmDSCC0_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 +#define mmDSCC0_DSCC_TEST_DEBUG_DATA1 0x303c +#define mmDSCC0_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2 +#define mmDSCC0_DSCC_TEST_DEBUG_DATA2 0x303d +#define mmDSCC0_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2 +#define mmDSCC0_DSCC_TEST_DEBUG_DATA3 0x303e +#define mmDSCC0_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2 // addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec @@ -5817,6 +5835,16 @@ #define mmDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define mmDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091 #define mmDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define mmDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096 +#define mmDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define mmDSCC1_DSCC_TEST_DEBUG_DATA0 0x3097 +#define mmDSCC1_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 +#define mmDSCC1_DSCC_TEST_DEBUG_DATA1 0x3098 +#define mmDSCC1_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2 +#define mmDSCC1_DSCC_TEST_DEBUG_DATA2 0x3099 +#define mmDSCC1_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2 +#define mmDSCC1_DSCC_TEST_DEBUG_DATA3 0x309a +#define mmDSCC1_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2 // addressBlock: dce_dc_dsc1_dispdec_dsc_dcperfmon_dc_perfmon_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h index 5c469cf635..53f1705f8d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h @@ -10701,6 +10701,13 @@ #define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L #define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L #define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L + +//CM0_CM_TEST_DEBUG_INDEX +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L + //CM0_CM_SHAPER_CONTROL #define CM0_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0 #define CM0_CM_SHAPER_CONTROL__CM_SHAPER_MODE_CURRENT__SHIFT 0x2 @@ -22258,7 +22265,9 @@ #define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L //DSC_TOP0_DSC_DEBUG_CONTROL #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4 #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L // addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec @@ -22631,6 +22640,15 @@ //DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0 #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL +//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L // addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h index f268d33c47..7fd906f108 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h @@ -424,6 +424,8 @@ #define regDTBCLK_DTO2_MODULO_BASE_IDX 2 #define regDTBCLK_DTO3_MODULO 0x0022 #define regDTBCLK_DTO3_MODULO_BASE_IDX 2 +#define regHDMICHARCLK0_CLOCK_CNTL 0x004a +#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2 #define regPHYASYMCLK_CLOCK_CNTL 0x0052 #define regPHYASYMCLK_CLOCK_CNTL_BASE_IDX 2 #define regPHYBSYMCLK_CLOCK_CNTL 0x0053 @@ -434,6 +436,8 @@ #define regPHYDSYMCLK_CLOCK_CNTL_BASE_IDX 2 #define regPHYESYMCLK_CLOCK_CNTL 0x0056 #define regPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2 +#define regHDMISTREAMCLK_CNTL 0x0059 +#define regHDMISTREAMCLK_CNTL_BASE_IDX 2 #define regDCCG_GATE_DISABLE_CNTL3 0x005a #define regDCCG_GATE_DISABLE_CNTL3_BASE_IDX 2 #define regHDMISTREAMCLK0_DTO_PARAM 0x005b diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h index cf3398f156..07fbfafe60 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h @@ -1372,6 +1372,11 @@ //DTBCLK_DTO3_MODULO #define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO__SHIFT 0x0 #define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO_MASK 0xFFFFFFFFL +//HDMICHARCLK0_CLOCK_CNTL +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0 +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4 +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L //PHYASYMCLK_CLOCK_CNTL #define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_EN__SHIFT 0x0 #define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_SRC_SEL__SHIFT 0x4 @@ -1397,6 +1402,13 @@ #define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_SRC_SEL__SHIFT 0x4 #define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_EN_MASK 0x00000001L #define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_SRC_SEL_MASK 0x00000030L +//HDMISTREAMCLK_CNTL +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL__SHIFT 0x0 +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_EN__SHIFT 0x3 +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS__SHIFT 0x4 +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL_MASK 0x00000007L +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_EN_MASK 0x00000008L +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS_MASK 0x00000010L //DCCG_GATE_DISABLE_CNTL3 #define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE__SHIFT 0x0 #define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE__SHIFT 0x1 @@ -46978,6 +46990,13 @@ #define DSC_TOP0_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L #define DSC_TOP0_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L #define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L + + +//DSC_TOP0_DSC_DEBUG_CONTROL +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4 +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L //DSC_TOP0_DSC_DEBUG_CONTROL #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_offset.h index 50c34d88c1..16a69d17bb 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_offset.h @@ -213,6 +213,8 @@ #define regDTBCLK_DTO2_MODULO_BASE_IDX 2 #define regDTBCLK_DTO3_MODULO 0x0022 #define regDTBCLK_DTO3_MODULO_BASE_IDX 2 +#define regHDMICHARCLK0_CLOCK_CNTL 0x004a +#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2 #define regPHYASYMCLK_CLOCK_CNTL 0x0052 #define regPHYASYMCLK_CLOCK_CNTL_BASE_IDX 2 #define regPHYBSYMCLK_CLOCK_CNTL 0x0053 @@ -233,6 +235,8 @@ #define regDCCG_AUDIO_DTBCLK_DTO_MODULO_BASE_IDX 2 #define regDTBCLK_DTO_DBUF_EN 0x0063 #define regDTBCLK_DTO_DBUF_EN_BASE_IDX 2 +#define regHDMISTREAMCLK_CNTL 0x0059 +#define regHDMISTREAMCLK_CNTL_BASE_IDX 2 // addressBlock: dce_dc_dccg_dccg_dcperfmon0_dc_perfmon_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h index 295e0dac9f..6473362e39 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h @@ -886,6 +886,11 @@ //DTBCLK_DTO3_MODULO #define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO__SHIFT 0x0 #define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO_MASK 0xFFFFFFFFL +//HDMICHARCLK0_CLOCK_CNTL +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0 +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4 +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L //PHYASYMCLK_CLOCK_CNTL #define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_EN__SHIFT 0x0 #define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_SRC_SEL__SHIFT 0x4 @@ -911,6 +916,11 @@ #define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_SRC_SEL__SHIFT 0x4 #define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_EN_MASK 0x00000001L #define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_SRC_SEL_MASK 0x00000030L +//HDMISTREAMCLK_CNTL +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL__SHIFT 0x0 +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS__SHIFT 0x10 +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL_MASK 0x00000003L +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS_MASK 0x00010000L //DCCG_GATE_DISABLE_CNTL3 #define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE__SHIFT 0x0 #define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE__SHIFT 0x1 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h index 14c29ce4c7..78cb61d580 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h @@ -1719,6 +1719,10 @@ #define regDCHUBBUB_TIMEOUT_INTERRUPT_STATUS_BASE_IDX 2 #define regFMON_CTRL 0x0541 #define regFMON_CTRL_BASE_IDX 2 +#define regDCHUBBUB_TEST_DEBUG_INDEX 0x0542 +#define regDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regDCHUBBUB_TEST_DEBUG_DATA 0x0543 +#define regDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dcn_dc_dchubbubl_hubbub_sdpif_dispdec @@ -3574,6 +3578,10 @@ #define regCM0_CM_DEALPHA_BASE_IDX 2 #define regCM0_CM_COEF_FORMAT 0x0d8c #define regCM0_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM0_CM_TEST_DEBUG_INDEX 0x0d8d +#define regCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM0_CM_TEST_DEBUG_DATA 0x0d8e +#define regCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dcn_dc_dpp0_dispdec_dpp_top_dispdec @@ -3960,6 +3968,10 @@ #define regCM1_CM_DEALPHA_BASE_IDX 2 #define regCM1_CM_COEF_FORMAT 0x0ef7 #define regCM1_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM1_CM_TEST_DEBUG_INDEX 0x0ef8 +#define regCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM1_CM_TEST_DEBUG_DATA 0x0ef9 +#define regCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dcn_dc_dpp1_dispdec_dpp_top_dispdec @@ -4346,6 +4358,10 @@ #define regCM2_CM_DEALPHA_BASE_IDX 2 #define regCM2_CM_COEF_FORMAT 0x1062 #define regCM2_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM2_CM_TEST_DEBUG_INDEX 0x1063 +#define regCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM2_CM_TEST_DEBUG_DATA 0x1064 +#define regCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dcn_dc_dpp2_dispdec_dpp_top_dispdec @@ -4732,6 +4748,10 @@ #define regCM3_CM_DEALPHA_BASE_IDX 2 #define regCM3_CM_COEF_FORMAT 0x11cd #define regCM3_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM3_CM_TEST_DEBUG_INDEX 0x11ce +#define regCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM3_CM_TEST_DEBUG_DATA 0x11cf +#define regCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dcn_dc_dpp3_dispdec_dpp_top_dispdec @@ -11780,6 +11800,16 @@ #define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035 #define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a +#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC0_DSCC_TEST_DEBUG_DATA0 0x303b +#define regDSCC0_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 +#define regDSCC0_DSCC_TEST_DEBUG_DATA1 0x303c +#define regDSCC0_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2 +#define regDSCC0_DSCC_TEST_DEBUG_DATA2 0x303d +#define regDSCC0_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2 +#define regDSCC0_DSCC_TEST_DEBUG_DATA3 0x303e +#define regDSCC0_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2 // addressBlock: dcn_dc_dsc0_dispdec_dsccif_dispdec @@ -11888,6 +11918,16 @@ #define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091 #define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096 +#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC1_DSCC_TEST_DEBUG_DATA0 0x3097 +#define regDSCC1_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 +#define regDSCC1_DSCC_TEST_DEBUG_DATA1 0x3098 +#define regDSCC1_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2 +#define regDSCC1_DSCC_TEST_DEBUG_DATA2 0x3099 +#define regDSCC1_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2 +#define regDSCC1_DSCC_TEST_DEBUG_DATA3 0x309a +#define regDSCC1_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2 // addressBlock: dcn_dc_dsc1_dispdec_dsccif_dispdec @@ -11996,6 +12036,16 @@ #define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x30ed #define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE 0x30f2 +#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC2_DSCC_TEST_DEBUG_DATA0 0x30f3 +#define regDSCC2_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 +#define regDSCC2_DSCC_TEST_DEBUG_DATA1 0x30f4 +#define regDSCC2_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2 +#define regDSCC2_DSCC_TEST_DEBUG_DATA2 0x30f5 +#define regDSCC2_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2 +#define regDSCC2_DSCC_TEST_DEBUG_DATA3 0x30f6 +#define regDSCC2_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2 // addressBlock: dcn_dc_dsc2_dispdec_dsccif_dispdec @@ -12104,6 +12154,16 @@ #define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3149 #define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE 0x314e +#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC3_DSCC_TEST_DEBUG_DATA0 0x314f +#define regDSCC3_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 +#define regDSCC3_DSCC_TEST_DEBUG_DATA1 0x3150 +#define regDSCC3_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2 +#define regDSCC3_DSCC_TEST_DEBUG_DATA2 0x3151 +#define regDSCC3_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2 +#define regDSCC3_DSCC_TEST_DEBUG_DATA3 0x3152 +#define regDSCC3_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2 // addressBlock: dcn_dc_dsc3_dispdec_dsccif_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h index 0691e328d0..1093105ca3 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h @@ -11544,6 +11544,11 @@ #define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L #define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L #define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L +//CM0_CM_TEST_DEBUG_INDEX +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L // addressBlock: dcn_dc_dpp0_dispdec_dpp_top_dispdec @@ -42267,6 +42272,18 @@ //DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0 #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL +//DSCC0_DSCC_TEST_DEBUG_INDEX2 +#define DSCC0_DSCC_TEST_DEBUG_INDEX2__DSCC_TEST_DEBUG_INDEX2__SHIFT 0x0 +#define DSCC0_DSCC_TEST_DEBUG_INDEX2__DSCC_TEST_DEBUG_INDEX2_MASK 0x000000FFL +//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L // addressBlock: dcn_dc_dsc0_dispdec_dsccif_dispdec @@ -42300,6 +42317,16 @@ #define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L //DSC_TOP0_DSC_DEBUG_CONTROL #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4 +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L + + +//DSC_TOP0_DSC_DEBUG_CONTROL +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4 +//DSC_TOP0_DSC_DEBUG_CONTROL +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h index 3bd8792fd7..a04b8c32c5 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h @@ -1719,6 +1719,10 @@ #define regDCHUBBUB_TIMEOUT_INTERRUPT_STATUS_BASE_IDX 2 #define regFMON_CTRL 0x0541 #define regFMON_CTRL_BASE_IDX 2 +#define regDCHUBBUB_TEST_DEBUG_INDEX 0x0542 +#define regDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regDCHUBBUB_TEST_DEBUG_DATA 0x0543 +#define regDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dchubbubl_hubbub_sdpif_dispdec @@ -3573,6 +3577,10 @@ #define regCM0_CM_DEALPHA_BASE_IDX 2 #define regCM0_CM_COEF_FORMAT 0x0d8c #define regCM0_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM0_CM_TEST_DEBUG_INDEX 0x0d8d +#define regCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM0_CM_TEST_DEBUG_DATA 0x0d8e +#define regCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec @@ -3959,6 +3967,10 @@ #define regCM1_CM_DEALPHA_BASE_IDX 2 #define regCM1_CM_COEF_FORMAT 0x0ef7 #define regCM1_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM1_CM_TEST_DEBUG_INDEX 0x0ef8 +#define regCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM1_CM_TEST_DEBUG_DATA 0x0ef9 +#define regCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec @@ -4345,6 +4357,10 @@ #define regCM2_CM_DEALPHA_BASE_IDX 2 #define regCM2_CM_COEF_FORMAT 0x1062 #define regCM2_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM2_CM_TEST_DEBUG_INDEX 0x1063 +#define regCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM2_CM_TEST_DEBUG_DATA 0x1064 +#define regCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec @@ -4731,6 +4747,10 @@ #define regCM3_CM_DEALPHA_BASE_IDX 2 #define regCM3_CM_COEF_FORMAT 0x11cd #define regCM3_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM3_CM_TEST_DEBUG_INDEX 0x11ce +#define regCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM3_CM_TEST_DEBUG_DATA 0x11cf +#define regCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec @@ -11789,6 +11809,10 @@ #define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035 #define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a +#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC0_DSCC_TEST_DEBUG_DATA0 0x303b +#define regDSCC0_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 // addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec @@ -11897,6 +11921,10 @@ #define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091 #define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096 +#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC1_DSCC_TEST_DEBUG_DATA0 0x3097 +#define regDSCC1_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 // addressBlock: dce_dc_dsc1_dispdec_dsccif_dispdec @@ -12005,7 +12033,10 @@ #define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x30ed #define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 - +#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE 0x30f2 +#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC2_DSCC_TEST_DEBUG_DATA0 0x30f3 +#define regDSCC2_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 // addressBlock: dce_dc_dsc2_dispdec_dsccif_dispdec // base address: 0x2e0 @@ -12113,6 +12144,10 @@ #define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3149 #define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE 0x314e +#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC3_DSCC_TEST_DEBUG_DATA0 0x314f +#define regDSCC3_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 // addressBlock: dce_dc_dsc3_dispdec_dsccif_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h index e82dffc2b9..ce773fca62 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h @@ -11547,6 +11547,11 @@ #define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L #define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L #define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L +//CM0_CM_TEST_DEBUG_INDEX +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L // addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec @@ -42315,6 +42320,15 @@ //DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0 #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL +//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L // addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec @@ -42348,7 +42362,9 @@ #define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L //DSC_TOP0_DSC_DEBUG_CONTROL #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4 #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L // addressBlock: dce_dc_dsc1_dispdec_dscc_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h index 0bb47e06ee..081e726afb 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h @@ -24,6 +24,8 @@ #define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 #define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA 0x292d #define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG 0x292e +#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 // addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec @@ -50,6 +52,8 @@ #define mmRDPCSTX0_RDPCSTX_CNTL2_BASE_IDX 2 #define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x293c #define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG 0x293d +#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define mmRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940 #define mmRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define mmRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941 @@ -120,6 +124,8 @@ #define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 #define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA 0x2a05 #define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG 0x2a06 +#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 // addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec @@ -146,6 +152,8 @@ #define mmRDPCSTX1_RDPCSTX_CNTL2_BASE_IDX 2 #define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2a14 #define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG 0x2a15 +#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define mmRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18 #define mmRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define mmRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19 @@ -216,6 +224,8 @@ #define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 #define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA 0x2add #define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG 0x2ade +#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 // addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec @@ -242,6 +252,8 @@ #define mmRDPCSTX2_RDPCSTX_CNTL2_BASE_IDX 2 #define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2aec #define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG 0x2aed +#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define mmRDPCSTX2_RDPCSTX_PHY_CNTL0 0x2af0 #define mmRDPCSTX2_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define mmRDPCSTX2_RDPCSTX_PHY_CNTL1 0x2af1 @@ -312,6 +324,8 @@ #define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 #define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA 0x2bb5 #define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG 0x2bb6 +#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 // addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec @@ -338,6 +352,8 @@ #define mmRDPCSTX3_RDPCSTX_CNTL2_BASE_IDX 2 #define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2bc4 #define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG 0x2bc5 +#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define mmRDPCSTX3_RDPCSTX_PHY_CNTL0 0x2bc8 #define mmRDPCSTX3_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define mmRDPCSTX3_RDPCSTX_PHY_CNTL1 0x2bc9 @@ -408,6 +424,8 @@ #define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 #define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA 0x2c8d #define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG 0x2c8e +#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 // addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec @@ -434,6 +452,8 @@ #define mmRDPCSTX4_RDPCSTX_CNTL2_BASE_IDX 2 #define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2c9c #define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG 0x2c9d +#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define mmRDPCSTX4_RDPCSTX_PHY_CNTL0 0x2ca0 #define mmRDPCSTX4_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define mmRDPCSTX4_RDPCSTX_PHY_CNTL1 0x2ca1 @@ -504,6 +524,8 @@ #define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 #define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA 0x2d65 #define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG 0x2d66 +#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 // addressBlock: dpcssys_dpcs0_rdpcstx5_dispdec @@ -530,6 +552,8 @@ #define mmRDPCSTX5_RDPCSTX_CNTL2_BASE_IDX 2 #define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2d74 #define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG 0x2d75 +#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define mmRDPCSTX5_RDPCSTX_PHY_CNTL0 0x2d78 #define mmRDPCSTX5_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define mmRDPCSTX5_RDPCSTX_PHY_CNTL1 0x2d79 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h index 23fa1121a9..1f846fa6c1 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h @@ -70,7 +70,9 @@ //DPCSTX0_DPCSTX_PLL_UPDATE_DATA #define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 #define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL - +//DPCSTX0_DPCSTX_DEBUG_CONFIG +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L // addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec //RDPCSTX0_RDPCSTX_CNTL diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h index 55743d06f7..e55ff0e8d7 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h @@ -70,7 +70,9 @@ //DPCSTX0_DPCSTX_PLL_UPDATE_DATA #define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 #define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL - +//DPCSTX0_DPCSTX_DEBUG_CONFIG +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L // addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec //RDPCSTX0_RDPCSTX_CNTL diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h index 01a56556cd..5b4fdeda10 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h @@ -155,6 +155,8 @@ #define regRDPCSTX0_RDPCSTX_CNTL2_BASE_IDX 2 #define regRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x293c #define regRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define regRDPCSTX0_RDPCSTX_DEBUG_CONFIG 0x293d +#define regRDPCSTX0_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define regRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940 #define regRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define regRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941 @@ -239,6 +241,8 @@ #define regRDPCSTX1_RDPCSTX_CNTL2_BASE_IDX 2 #define regRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2a14 #define regRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define regRDPCSTX1_RDPCSTX_DEBUG_CONFIG 0x2a15 +#define regRDPCSTX1_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define regRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18 #define regRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define regRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19 @@ -323,6 +327,8 @@ #define regRDPCSTX2_RDPCSTX_CNTL2_BASE_IDX 2 #define regRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2aec #define regRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define regRDPCSTX2_RDPCSTX_DEBUG_CONFIG 0x2aed +#define regRDPCSTX2_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define regRDPCSTX2_RDPCSTX_PHY_CNTL0 0x2af0 #define regRDPCSTX2_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define regRDPCSTX2_RDPCSTX_PHY_CNTL1 0x2af1 @@ -407,6 +413,8 @@ #define regRDPCSTX3_RDPCSTX_CNTL2_BASE_IDX 2 #define regRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2bc4 #define regRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define regRDPCSTX3_RDPCSTX_DEBUG_CONFIG 0x2bc5 +#define regRDPCSTX3_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define regRDPCSTX3_RDPCSTX_PHY_CNTL0 0x2bc8 #define regRDPCSTX3_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define regRDPCSTX3_RDPCSTX_PHY_CNTL1 0x2bc9 @@ -491,6 +499,8 @@ #define regRDPCSTX4_RDPCSTX_CNTL2_BASE_IDX 2 #define regRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2c9c #define regRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define regRDPCSTX4_RDPCSTX_DEBUG_CONFIG 0x2c9d +#define regRDPCSTX4_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define regRDPCSTX4_RDPCSTX_PHY_CNTL0 0x2ca0 #define regRDPCSTX4_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define regRDPCSTX4_RDPCSTX_PHY_CNTL1 0x2ca1 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h index 4908044f74..4c8e7fdb69 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h @@ -4830,6 +4830,8 @@ #define mmCP_ECC_FIRSTOCCURRENCE_RING2_BASE_IDX 0 #define mmGB_EDC_MODE 0x1e1e #define mmGB_EDC_MODE_BASE_IDX 0 +#define mmCP_DEBUG 0x1e1f +#define mmCP_DEBUG_BASE_IDX 0 #define mmCP_FETCHER_SOURCE 0x1e22 #define mmCP_FETCHER_SOURCE_BASE_IDX 0 #define mmCP_PQ_WPTR_POLL_CNTL 0x1e23 @@ -7778,6 +7780,8 @@ #define mmCP_MES_DOORBELL_CONTROL5_BASE_IDX 1 #define mmCP_MES_DOORBELL_CONTROL6 0x2841 #define mmCP_MES_DOORBELL_CONTROL6_BASE_IDX 1 +#define mmCP_MES_DEBUG_INTERRUPT_INSTR_PNTR 0x2842 +#define mmCP_MES_DEBUG_INTERRUPT_INSTR_PNTR_BASE_IDX 1 #define mmCP_MES_GP0_LO 0x2843 #define mmCP_MES_GP0_LO_BASE_IDX 1 #define mmCP_MES_GP0_HI 0x2844 @@ -9332,10 +9336,16 @@ #define mmRLC_LB_CNTR_INIT_1_BASE_IDX 1 #define mmRLC_LB_CNTR_1 0x4c1c #define mmRLC_LB_CNTR_1_BASE_IDX 1 +#define mmRLC_GPM_DEBUG_INST_ADDR 0x4c1d +#define mmRLC_GPM_DEBUG_INST_ADDR_BASE_IDX 1 #define mmRLC_JUMP_TABLE_RESTORE 0x4c1e #define mmRLC_JUMP_TABLE_RESTORE_BASE_IDX 1 #define mmRLC_PG_DELAY_2 0x4c1f #define mmRLC_PG_DELAY_2_BASE_IDX 1 +#define mmRLC_GPM_DEBUG_INST_A 0x4c22 +#define mmRLC_GPM_DEBUG_INST_A_BASE_IDX 1 +#define mmRLC_GPM_DEBUG_INST_B 0x4c23 +#define mmRLC_GPM_DEBUG_INST_B_BASE_IDX 1 #define mmRLC_GPU_CLOCK_COUNT_LSB 0x4c24 #define mmRLC_GPU_CLOCK_COUNT_LSB_BASE_IDX 1 #define mmRLC_GPU_CLOCK_COUNT_MSB 0x4c25 @@ -9720,6 +9730,8 @@ #define mmRLC_SPM_THREAD_TRACE_CTRL_BASE_IDX 1 #define mmRLC_LB_CNTR_2 0x4de7 #define mmRLC_LB_CNTR_2_BASE_IDX 1 +#define mmRLC_LX6_CORE_PDEBUG_INST 0x4deb +#define mmRLC_LX6_CORE_PDEBUG_INST_BASE_IDX 1 #define mmRLC_CPAXI_DOORBELL_MON_CTRL 0x4df1 #define mmRLC_CPAXI_DOORBELL_MON_CTRL_BASE_IDX 1 #define mmRLC_CPAXI_DOORBELL_MON_STAT 0x4df2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h index efc16ddf27..2dfa0e5b1a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h @@ -6822,6 +6822,8 @@ #define VM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14 #define VM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18 #define VM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19 +#define VM_L2_PROTECTION_FAULT_STATUS__UCE__SHIFT 0x1d +#define VM_L2_PROTECTION_FAULT_STATUS__FED__SHIFT 0x1e #define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L #define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL #define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L @@ -6832,6 +6834,8 @@ #define VM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L #define VM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L #define VM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L +#define VM_L2_PROTECTION_FAULT_STATUS__UCE_MASK 0x20000000L +#define VM_L2_PROTECTION_FAULT_STATUS__FED_MASK 0x40000000L //VM_L2_PROTECTION_FAULT_ADDR_LO32 #define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0 #define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h index 8b931bbabe..969e006b85 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h @@ -237,6 +237,10 @@ #define regSEM_REGISTER_LAST_PART2_BASE_IDX 0 #define regIH_CLIENT_CFG 0x0184 #define regIH_CLIENT_CFG_BASE_IDX 0 +#define regIH_RING1_CLIENT_CFG_INDEX 0x0185 +#define regIH_RING1_CLIENT_CFG_INDEX_BASE_IDX 0 +#define regIH_RING1_CLIENT_CFG_DATA 0x0186 +#define regIH_RING1_CLIENT_CFG_DATA_BASE_IDX 0 #define regIH_CLIENT_CFG_INDEX 0x0188 #define regIH_CLIENT_CFG_INDEX_BASE_IDX 0 #define regIH_CLIENT_CFG_DATA 0x0189 diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h index f262f44fa6..a672a91e58 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h @@ -888,6 +888,16 @@ //IH_CLIENT_CFG #define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0 #define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000003FL +//IH_RING1_CLIENT_CFG_INDEX +#define IH_RING1_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0 +#define IH_RING1_CLIENT_CFG_INDEX__INDEX_MASK 0x00000007L +//IH_RING1_CLIENT_CFG_DATA +#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID__SHIFT 0x0 +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID__SHIFT 0x8 +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10 +#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID_MASK 0x000000FFL +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MASK 0x0000FF00L +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L //IH_CLIENT_CFG_INDEX #define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0 #define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h new file mode 100644 index 0000000000..da7e31fedd --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h @@ -0,0 +1,511 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _smuio_14_0_2_OFFSET_HEADER +#define _smuio_14_0_2_OFFSET_HEADER + + + +// addressBlock: smuio_smuio_tsc_SmuSmuioDec +// base address: 0x5a8a0 +#define regPWROK_REFCLK_GAP_CYCLES 0x0028 +#define regPWROK_REFCLK_GAP_CYCLES_BASE_IDX 1 +#define regGOLDEN_TSC_INCREMENT_UPPER 0x002b +#define regGOLDEN_TSC_INCREMENT_UPPER_BASE_IDX 1 +#define regGOLDEN_TSC_INCREMENT_LOWER 0x002c +#define regGOLDEN_TSC_INCREMENT_LOWER_BASE_IDX 1 +#define regGOLDEN_TSC_COUNT_UPPER 0x002d +#define regGOLDEN_TSC_COUNT_UPPER_BASE_IDX 1 +#define regGOLDEN_TSC_COUNT_LOWER 0x002e +#define regGOLDEN_TSC_COUNT_LOWER_BASE_IDX 1 +#define regSOC_GOLDEN_TSC_SHADOW_UPPER 0x002f +#define regSOC_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1 +#define regSOC_GOLDEN_TSC_SHADOW_LOWER 0x0030 +#define regSOC_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1 +#define regSOC_GAP_PWROK 0x0031 +#define regSOC_GAP_PWROK_BASE_IDX 1 + + +// addressBlock: smuio_smuio_swtimer_SmuSmuioDec +// base address: 0x5aca8 +#define regPWR_VIRT_RESET_REQ 0x012a +#define regPWR_VIRT_RESET_REQ_BASE_IDX 1 +#define regPWR_DISP_TIMER_CONTROL 0x012b +#define regPWR_DISP_TIMER_CONTROL_BASE_IDX 1 +#define regPWR_DISP_TIMER_DEBUG 0x012c +#define regPWR_DISP_TIMER_DEBUG_BASE_IDX 1 +#define regPWR_DISP_TIMER2_CONTROL 0x012d +#define regPWR_DISP_TIMER2_CONTROL_BASE_IDX 1 +#define regPWR_DISP_TIMER2_DEBUG 0x012e +#define regPWR_DISP_TIMER2_DEBUG_BASE_IDX 1 +#define regPWR_DISP_TIMER_GLOBAL_CONTROL 0x012f +#define regPWR_DISP_TIMER_GLOBAL_CONTROL_BASE_IDX 1 +#define regPWR_IH_CONTROL 0x0130 +#define regPWR_IH_CONTROL_BASE_IDX 1 + + +// addressBlock: smuio_smuio_misc_SmuSmuioDec +// base address: 0x5a000 +#define regSMUIO_MCM_CONFIG 0x0023 +#define regSMUIO_MCM_CONFIG_BASE_IDX 0 +#define regIP_DISCOVERY_VERSION 0x0000 +#define regIP_DISCOVERY_VERSION_BASE_IDX 1 +#define regSCRATCH_REGISTER0 0x01bd +#define regSCRATCH_REGISTER0_BASE_IDX 1 +#define regSCRATCH_REGISTER1 0x01be +#define regSCRATCH_REGISTER1_BASE_IDX 1 +#define regSCRATCH_REGISTER2 0x01bf +#define regSCRATCH_REGISTER2_BASE_IDX 1 +#define regSCRATCH_REGISTER3 0x01c0 +#define regSCRATCH_REGISTER3_BASE_IDX 1 +#define regSCRATCH_REGISTER4 0x01c1 +#define regSCRATCH_REGISTER4_BASE_IDX 1 +#define regSCRATCH_REGISTER5 0x01c2 +#define regSCRATCH_REGISTER5_BASE_IDX 1 +#define regSCRATCH_REGISTER6 0x01c3 +#define regSCRATCH_REGISTER6_BASE_IDX 1 +#define regSCRATCH_REGISTER7 0x01c4 +#define regSCRATCH_REGISTER7_BASE_IDX 1 + + +// addressBlock: smuio_smuio_i2c_SmuSmuioDec +// base address: 0x5a100 +#define regCKSVII2C_IC_CON 0x0040 +#define regCKSVII2C_IC_CON_BASE_IDX 0 +#define regCKSVII2C_IC_TAR 0x0041 +#define regCKSVII2C_IC_TAR_BASE_IDX 0 +#define regCKSVII2C_IC_SAR 0x0042 +#define regCKSVII2C_IC_SAR_BASE_IDX 0 +#define regCKSVII2C_IC_HS_MADDR 0x0043 +#define regCKSVII2C_IC_HS_MADDR_BASE_IDX 0 +#define regCKSVII2C_IC_DATA_CMD 0x0044 +#define regCKSVII2C_IC_DATA_CMD_BASE_IDX 0 +#define regCKSVII2C_IC_SS_SCL_HCNT 0x0045 +#define regCKSVII2C_IC_SS_SCL_HCNT_BASE_IDX 0 +#define regCKSVII2C_IC_SS_SCL_LCNT 0x0046 +#define regCKSVII2C_IC_SS_SCL_LCNT_BASE_IDX 0 +#define regCKSVII2C_IC_FS_SCL_HCNT 0x0047 +#define regCKSVII2C_IC_FS_SCL_HCNT_BASE_IDX 0 +#define regCKSVII2C_IC_FS_SCL_LCNT 0x0048 +#define regCKSVII2C_IC_FS_SCL_LCNT_BASE_IDX 0 +#define regCKSVII2C_IC_HS_SCL_HCNT 0x0049 +#define regCKSVII2C_IC_HS_SCL_HCNT_BASE_IDX 0 +#define regCKSVII2C_IC_HS_SCL_LCNT 0x004a +#define regCKSVII2C_IC_HS_SCL_LCNT_BASE_IDX 0 +#define regCKSVII2C_IC_INTR_STAT 0x004b +#define regCKSVII2C_IC_INTR_STAT_BASE_IDX 0 +#define regCKSVII2C_IC_INTR_MASK 0x004c +#define regCKSVII2C_IC_INTR_MASK_BASE_IDX 0 +#define regCKSVII2C_IC_RAW_INTR_STAT 0x004d +#define regCKSVII2C_IC_RAW_INTR_STAT_BASE_IDX 0 +#define regCKSVII2C_IC_RX_TL 0x004e +#define regCKSVII2C_IC_RX_TL_BASE_IDX 0 +#define regCKSVII2C_IC_TX_TL 0x004f +#define regCKSVII2C_IC_TX_TL_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_INTR 0x0050 +#define regCKSVII2C_IC_CLR_INTR_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_RX_UNDER 0x0051 +#define regCKSVII2C_IC_CLR_RX_UNDER_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_RX_OVER 0x0052 +#define regCKSVII2C_IC_CLR_RX_OVER_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_TX_OVER 0x0053 +#define regCKSVII2C_IC_CLR_TX_OVER_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_RD_REQ 0x0054 +#define regCKSVII2C_IC_CLR_RD_REQ_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_TX_ABRT 0x0055 +#define regCKSVII2C_IC_CLR_TX_ABRT_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_RX_DONE 0x0056 +#define regCKSVII2C_IC_CLR_RX_DONE_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_ACTIVITY 0x0057 +#define regCKSVII2C_IC_CLR_ACTIVITY_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_STOP_DET 0x0058 +#define regCKSVII2C_IC_CLR_STOP_DET_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_START_DET 0x0059 +#define regCKSVII2C_IC_CLR_START_DET_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_GEN_CALL 0x005a +#define regCKSVII2C_IC_CLR_GEN_CALL_BASE_IDX 0 +#define regCKSVII2C_IC_ENABLE 0x005b +#define regCKSVII2C_IC_ENABLE_BASE_IDX 0 +#define regCKSVII2C_IC_STATUS 0x005c +#define regCKSVII2C_IC_STATUS_BASE_IDX 0 +#define regCKSVII2C_IC_TXFLR 0x005d +#define regCKSVII2C_IC_TXFLR_BASE_IDX 0 +#define regCKSVII2C_IC_RXFLR 0x005e +#define regCKSVII2C_IC_RXFLR_BASE_IDX 0 +#define regCKSVII2C_IC_SDA_HOLD 0x005f +#define regCKSVII2C_IC_SDA_HOLD_BASE_IDX 0 +#define regCKSVII2C_IC_TX_ABRT_SOURCE 0x0060 +#define regCKSVII2C_IC_TX_ABRT_SOURCE_BASE_IDX 0 +#define regCKSVII2C_IC_SLV_DATA_NACK_ONLY 0x0061 +#define regCKSVII2C_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0 +#define regCKSVII2C_IC_DMA_CR 0x0062 +#define regCKSVII2C_IC_DMA_CR_BASE_IDX 0 +#define regCKSVII2C_IC_DMA_TDLR 0x0063 +#define regCKSVII2C_IC_DMA_TDLR_BASE_IDX 0 +#define regCKSVII2C_IC_DMA_RDLR 0x0064 +#define regCKSVII2C_IC_DMA_RDLR_BASE_IDX 0 +#define regCKSVII2C_IC_SDA_SETUP 0x0065 +#define regCKSVII2C_IC_SDA_SETUP_BASE_IDX 0 +#define regCKSVII2C_IC_ACK_GENERAL_CALL 0x0066 +#define regCKSVII2C_IC_ACK_GENERAL_CALL_BASE_IDX 0 +#define regCKSVII2C_IC_ENABLE_STATUS 0x0067 +#define regCKSVII2C_IC_ENABLE_STATUS_BASE_IDX 0 +#define regCKSVII2C_IC_FS_SPKLEN 0x0068 +#define regCKSVII2C_IC_FS_SPKLEN_BASE_IDX 0 +#define regCKSVII2C_IC_HS_SPKLEN 0x0069 +#define regCKSVII2C_IC_HS_SPKLEN_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_RESTART_DET 0x006a +#define regCKSVII2C_IC_CLR_RESTART_DET_BASE_IDX 0 +#define regCKSVII2C_IC_COMP_PARAM_1 0x006d +#define regCKSVII2C_IC_COMP_PARAM_1_BASE_IDX 0 +#define regCKSVII2C_IC_COMP_VERSION 0x006e +#define regCKSVII2C_IC_COMP_VERSION_BASE_IDX 0 +#define regCKSVII2C_IC_COMP_TYPE 0x006f +#define regCKSVII2C_IC_COMP_TYPE_BASE_IDX 0 +#define regCKSVII2C1_IC_CON 0x0080 +#define regCKSVII2C1_IC_CON_BASE_IDX 0 +#define regCKSVII2C1_IC_TAR 0x0081 +#define regCKSVII2C1_IC_TAR_BASE_IDX 0 +#define regCKSVII2C1_IC_SAR 0x0082 +#define regCKSVII2C1_IC_SAR_BASE_IDX 0 +#define regCKSVII2C1_IC_HS_MADDR 0x0083 +#define regCKSVII2C1_IC_HS_MADDR_BASE_IDX 0 +#define regCKSVII2C1_IC_DATA_CMD 0x0084 +#define regCKSVII2C1_IC_DATA_CMD_BASE_IDX 0 +#define regCKSVII2C1_IC_SS_SCL_HCNT 0x0085 +#define regCKSVII2C1_IC_SS_SCL_HCNT_BASE_IDX 0 +#define regCKSVII2C1_IC_SS_SCL_LCNT 0x0086 +#define regCKSVII2C1_IC_SS_SCL_LCNT_BASE_IDX 0 +#define regCKSVII2C1_IC_FS_SCL_HCNT 0x0087 +#define regCKSVII2C1_IC_FS_SCL_HCNT_BASE_IDX 0 +#define regCKSVII2C1_IC_FS_SCL_LCNT 0x0088 +#define regCKSVII2C1_IC_FS_SCL_LCNT_BASE_IDX 0 +#define regCKSVII2C1_IC_HS_SCL_HCNT 0x0089 +#define regCKSVII2C1_IC_HS_SCL_HCNT_BASE_IDX 0 +#define regCKSVII2C1_IC_HS_SCL_LCNT 0x008a +#define regCKSVII2C1_IC_HS_SCL_LCNT_BASE_IDX 0 +#define regCKSVII2C1_IC_INTR_STAT 0x008b +#define regCKSVII2C1_IC_INTR_STAT_BASE_IDX 0 +#define regCKSVII2C1_IC_INTR_MASK 0x008c +#define regCKSVII2C1_IC_INTR_MASK_BASE_IDX 0 +#define regCKSVII2C1_IC_RAW_INTR_STAT 0x008d +#define regCKSVII2C1_IC_RAW_INTR_STAT_BASE_IDX 0 +#define regCKSVII2C1_IC_RX_TL 0x008e +#define regCKSVII2C1_IC_RX_TL_BASE_IDX 0 +#define regCKSVII2C1_IC_TX_TL 0x008f +#define regCKSVII2C1_IC_TX_TL_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_INTR 0x0090 +#define regCKSVII2C1_IC_CLR_INTR_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_RX_UNDER 0x0091 +#define regCKSVII2C1_IC_CLR_RX_UNDER_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_RX_OVER 0x0092 +#define regCKSVII2C1_IC_CLR_RX_OVER_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_TX_OVER 0x0093 +#define regCKSVII2C1_IC_CLR_TX_OVER_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_RD_REQ 0x0094 +#define regCKSVII2C1_IC_CLR_RD_REQ_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_TX_ABRT 0x0095 +#define regCKSVII2C1_IC_CLR_TX_ABRT_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_RX_DONE 0x0096 +#define regCKSVII2C1_IC_CLR_RX_DONE_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_ACTIVITY 0x0097 +#define regCKSVII2C1_IC_CLR_ACTIVITY_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_STOP_DET 0x0098 +#define regCKSVII2C1_IC_CLR_STOP_DET_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_START_DET 0x0099 +#define regCKSVII2C1_IC_CLR_START_DET_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_GEN_CALL 0x009a +#define regCKSVII2C1_IC_CLR_GEN_CALL_BASE_IDX 0 +#define regCKSVII2C1_IC_ENABLE 0x009b +#define regCKSVII2C1_IC_ENABLE_BASE_IDX 0 +#define regCKSVII2C1_IC_STATUS 0x009c +#define regCKSVII2C1_IC_STATUS_BASE_IDX 0 +#define regCKSVII2C1_IC_TXFLR 0x009d +#define regCKSVII2C1_IC_TXFLR_BASE_IDX 0 +#define regCKSVII2C1_IC_RXFLR 0x009e +#define regCKSVII2C1_IC_RXFLR_BASE_IDX 0 +#define regCKSVII2C1_IC_SDA_HOLD 0x009f +#define regCKSVII2C1_IC_SDA_HOLD_BASE_IDX 0 +#define regCKSVII2C1_IC_TX_ABRT_SOURCE 0x00a0 +#define regCKSVII2C1_IC_TX_ABRT_SOURCE_BASE_IDX 0 +#define regCKSVII2C1_IC_SLV_DATA_NACK_ONLY 0x00a1 +#define regCKSVII2C1_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0 +#define regCKSVII2C1_IC_DMA_CR 0x00a2 +#define regCKSVII2C1_IC_DMA_CR_BASE_IDX 0 +#define regCKSVII2C1_IC_DMA_TDLR 0x00a3 +#define regCKSVII2C1_IC_DMA_TDLR_BASE_IDX 0 +#define regCKSVII2C1_IC_DMA_RDLR 0x00a4 +#define regCKSVII2C1_IC_DMA_RDLR_BASE_IDX 0 +#define regCKSVII2C1_IC_SDA_SETUP 0x00a5 +#define regCKSVII2C1_IC_SDA_SETUP_BASE_IDX 0 +#define regCKSVII2C1_IC_ACK_GENERAL_CALL 0x00a6 +#define regCKSVII2C1_IC_ACK_GENERAL_CALL_BASE_IDX 0 +#define regCKSVII2C1_IC_ENABLE_STATUS 0x00a7 +#define regCKSVII2C1_IC_ENABLE_STATUS_BASE_IDX 0 +#define regCKSVII2C1_IC_FS_SPKLEN 0x00a8 +#define regCKSVII2C1_IC_FS_SPKLEN_BASE_IDX 0 +#define regCKSVII2C1_IC_HS_SPKLEN 0x00a9 +#define regCKSVII2C1_IC_HS_SPKLEN_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_RESTART_DET 0x00aa +#define regCKSVII2C1_IC_CLR_RESTART_DET_BASE_IDX 0 +#define regCKSVII2C1_IC_COMP_PARAM_1 0x00ad +#define regCKSVII2C1_IC_COMP_PARAM_1_BASE_IDX 0 +#define regCKSVII2C1_IC_COMP_VERSION 0x00ae +#define regCKSVII2C1_IC_COMP_VERSION_BASE_IDX 0 +#define regCKSVII2C1_IC_COMP_TYPE 0x00af +#define regCKSVII2C1_IC_COMP_TYPE_BASE_IDX 0 +#define regSMUIO_PWRMGT 0x018c +#define regSMUIO_PWRMGT_BASE_IDX 0 + + +// addressBlock: smuio_smuio_rom_SmuSmuioDec +// base address: 0x5a380 +#define regROM_CNTL 0x00e0 +#define regROM_CNTL_BASE_IDX 0 +#define regPAGE_MIRROR_CNTL 0x00e1 +#define regPAGE_MIRROR_CNTL_BASE_IDX 0 +#define regROM_STATUS 0x00e2 +#define regROM_STATUS_BASE_IDX 0 +#define regCGTT_ROM_CLK_CTRL0 0x00e3 +#define regCGTT_ROM_CLK_CTRL0_BASE_IDX 0 +#define regROM_INDEX 0x00e4 +#define regROM_INDEX_BASE_IDX 0 +#define regROM_DATA 0x00e5 +#define regROM_DATA_BASE_IDX 0 +#define regROM_START 0x00e6 +#define regROM_START_BASE_IDX 0 +#define regROM_SW_CNTL 0x00e8 +#define regROM_SW_CNTL_BASE_IDX 0 +#define regROM_SW_STATUS 0x00e9 +#define regROM_SW_STATUS_BASE_IDX 0 +#define regROM_SW_COMMAND 0x00ea +#define regROM_SW_COMMAND_BASE_IDX 0 +#define regROM_SW_DATA_1 0x00ec +#define regROM_SW_DATA_1_BASE_IDX 0 +#define regROM_SW_DATA_2 0x00ed +#define regROM_SW_DATA_2_BASE_IDX 0 +#define regROM_SW_DATA_3 0x00ee +#define regROM_SW_DATA_3_BASE_IDX 0 +#define regROM_SW_DATA_4 0x00ef +#define regROM_SW_DATA_4_BASE_IDX 0 +#define regROM_SW_DATA_5 0x00f0 +#define regROM_SW_DATA_5_BASE_IDX 0 +#define regROM_SW_DATA_6 0x00f1 +#define regROM_SW_DATA_6_BASE_IDX 0 +#define regROM_SW_DATA_7 0x00f2 +#define regROM_SW_DATA_7_BASE_IDX 0 +#define regROM_SW_DATA_8 0x00f3 +#define regROM_SW_DATA_8_BASE_IDX 0 +#define regROM_SW_DATA_9 0x00f4 +#define regROM_SW_DATA_9_BASE_IDX 0 +#define regROM_SW_DATA_10 0x00f5 +#define regROM_SW_DATA_10_BASE_IDX 0 +#define regROM_SW_DATA_11 0x00f6 +#define regROM_SW_DATA_11_BASE_IDX 0 +#define regROM_SW_DATA_12 0x00f7 +#define regROM_SW_DATA_12_BASE_IDX 0 +#define regROM_SW_DATA_13 0x00f8 +#define regROM_SW_DATA_13_BASE_IDX 0 +#define regROM_SW_DATA_14 0x00f9 +#define regROM_SW_DATA_14_BASE_IDX 0 +#define regROM_SW_DATA_15 0x00fa +#define regROM_SW_DATA_15_BASE_IDX 0 +#define regROM_SW_DATA_16 0x00fb +#define regROM_SW_DATA_16_BASE_IDX 0 +#define regROM_SW_DATA_17 0x00fc +#define regROM_SW_DATA_17_BASE_IDX 0 +#define regROM_SW_DATA_18 0x00fd +#define regROM_SW_DATA_18_BASE_IDX 0 +#define regROM_SW_DATA_19 0x00fe +#define regROM_SW_DATA_19_BASE_IDX 0 +#define regROM_SW_DATA_20 0x00ff +#define regROM_SW_DATA_20_BASE_IDX 0 +#define regROM_SW_DATA_21 0x0100 +#define regROM_SW_DATA_21_BASE_IDX 0 +#define regROM_SW_DATA_22 0x0101 +#define regROM_SW_DATA_22_BASE_IDX 0 +#define regROM_SW_DATA_23 0x0102 +#define regROM_SW_DATA_23_BASE_IDX 0 +#define regROM_SW_DATA_24 0x0103 +#define regROM_SW_DATA_24_BASE_IDX 0 +#define regROM_SW_DATA_25 0x0104 +#define regROM_SW_DATA_25_BASE_IDX 0 +#define regROM_SW_DATA_26 0x0105 +#define regROM_SW_DATA_26_BASE_IDX 0 +#define regROM_SW_DATA_27 0x0106 +#define regROM_SW_DATA_27_BASE_IDX 0 +#define regROM_SW_DATA_28 0x0107 +#define regROM_SW_DATA_28_BASE_IDX 0 +#define regROM_SW_DATA_29 0x0108 +#define regROM_SW_DATA_29_BASE_IDX 0 +#define regROM_SW_DATA_30 0x0109 +#define regROM_SW_DATA_30_BASE_IDX 0 +#define regROM_SW_DATA_31 0x010a +#define regROM_SW_DATA_31_BASE_IDX 0 +#define regROM_SW_DATA_32 0x010b +#define regROM_SW_DATA_32_BASE_IDX 0 +#define regROM_SW_DATA_33 0x010c +#define regROM_SW_DATA_33_BASE_IDX 0 +#define regROM_SW_DATA_34 0x010d +#define regROM_SW_DATA_34_BASE_IDX 0 +#define regROM_SW_DATA_35 0x010e +#define regROM_SW_DATA_35_BASE_IDX 0 +#define regROM_SW_DATA_36 0x010f +#define regROM_SW_DATA_36_BASE_IDX 0 +#define regROM_SW_DATA_37 0x0110 +#define regROM_SW_DATA_37_BASE_IDX 0 +#define regROM_SW_DATA_38 0x0111 +#define regROM_SW_DATA_38_BASE_IDX 0 +#define regROM_SW_DATA_39 0x0112 +#define regROM_SW_DATA_39_BASE_IDX 0 +#define regROM_SW_DATA_40 0x0113 +#define regROM_SW_DATA_40_BASE_IDX 0 +#define regROM_SW_DATA_41 0x0114 +#define regROM_SW_DATA_41_BASE_IDX 0 +#define regROM_SW_DATA_42 0x0115 +#define regROM_SW_DATA_42_BASE_IDX 0 +#define regROM_SW_DATA_43 0x0116 +#define regROM_SW_DATA_43_BASE_IDX 0 +#define regROM_SW_DATA_44 0x0117 +#define regROM_SW_DATA_44_BASE_IDX 0 +#define regROM_SW_DATA_45 0x0118 +#define regROM_SW_DATA_45_BASE_IDX 0 +#define regROM_SW_DATA_46 0x0119 +#define regROM_SW_DATA_46_BASE_IDX 0 +#define regROM_SW_DATA_47 0x011a +#define regROM_SW_DATA_47_BASE_IDX 0 +#define regROM_SW_DATA_48 0x011b +#define regROM_SW_DATA_48_BASE_IDX 0 +#define regROM_SW_DATA_49 0x011c +#define regROM_SW_DATA_49_BASE_IDX 0 +#define regROM_SW_DATA_50 0x011d +#define regROM_SW_DATA_50_BASE_IDX 0 +#define regROM_SW_DATA_51 0x011e +#define regROM_SW_DATA_51_BASE_IDX 0 +#define regROM_SW_DATA_52 0x011f +#define regROM_SW_DATA_52_BASE_IDX 0 +#define regROM_SW_DATA_53 0x0120 +#define regROM_SW_DATA_53_BASE_IDX 0 +#define regROM_SW_DATA_54 0x0121 +#define regROM_SW_DATA_54_BASE_IDX 0 +#define regROM_SW_DATA_55 0x0122 +#define regROM_SW_DATA_55_BASE_IDX 0 +#define regROM_SW_DATA_56 0x0123 +#define regROM_SW_DATA_56_BASE_IDX 0 +#define regROM_SW_DATA_57 0x0124 +#define regROM_SW_DATA_57_BASE_IDX 0 +#define regROM_SW_DATA_58 0x0125 +#define regROM_SW_DATA_58_BASE_IDX 0 +#define regROM_SW_DATA_59 0x0126 +#define regROM_SW_DATA_59_BASE_IDX 0 +#define regROM_SW_DATA_60 0x0127 +#define regROM_SW_DATA_60_BASE_IDX 0 +#define regROM_SW_DATA_61 0x0128 +#define regROM_SW_DATA_61_BASE_IDX 0 +#define regROM_SW_DATA_62 0x0129 +#define regROM_SW_DATA_62_BASE_IDX 0 +#define regROM_SW_DATA_63 0x012a +#define regROM_SW_DATA_63_BASE_IDX 0 +#define regROM_SW_DATA_64 0x012b +#define regROM_SW_DATA_64_BASE_IDX 0 + + +// addressBlock: smuio_smuio_gpio_SmuSmuioDec +// base address: 0x5a500 +#define regSMU_GPIOPAD_SW_INT_STAT 0x0140 +#define regSMU_GPIOPAD_SW_INT_STAT_BASE_IDX 0 +#define regSMU_GPIOPAD_MASK 0x0141 +#define regSMU_GPIOPAD_MASK_BASE_IDX 0 +#define regSMU_GPIOPAD_A 0x0142 +#define regSMU_GPIOPAD_A_BASE_IDX 0 +#define regSMU_GPIOPAD_TXIMPSEL 0x0143 +#define regSMU_GPIOPAD_TXIMPSEL_BASE_IDX 0 +#define regSMU_GPIOPAD_EN 0x0144 +#define regSMU_GPIOPAD_EN_BASE_IDX 0 +#define regSMU_GPIOPAD_Y 0x0145 +#define regSMU_GPIOPAD_Y_BASE_IDX 0 +#define regSMU_GPIOPAD_RXEN 0x0146 +#define regSMU_GPIOPAD_RXEN_BASE_IDX 0 +#define regSMU_GPIOPAD_RCVR_SEL0 0x0147 +#define regSMU_GPIOPAD_RCVR_SEL0_BASE_IDX 0 +#define regSMU_GPIOPAD_RCVR_SEL1 0x0148 +#define regSMU_GPIOPAD_RCVR_SEL1_BASE_IDX 0 +#define regSMU_GPIOPAD_PU_EN 0x0149 +#define regSMU_GPIOPAD_PU_EN_BASE_IDX 0 +#define regSMU_GPIOPAD_PD_EN 0x014a +#define regSMU_GPIOPAD_PD_EN_BASE_IDX 0 +#define regSMU_GPIOPAD_PINSTRAPS 0x014b +#define regSMU_GPIOPAD_PINSTRAPS_BASE_IDX 0 +#define regDFT_PINSTRAPS 0x014c +#define regDFT_PINSTRAPS_BASE_IDX 0 +#define regSMU_GPIOPAD_INT_STAT_EN 0x014d +#define regSMU_GPIOPAD_INT_STAT_EN_BASE_IDX 0 +#define regSMU_GPIOPAD_INT_STAT 0x014e +#define regSMU_GPIOPAD_INT_STAT_BASE_IDX 0 +#define regSMU_GPIOPAD_INT_STAT_AK 0x014f +#define regSMU_GPIOPAD_INT_STAT_AK_BASE_IDX 0 +#define regSMU_GPIOPAD_INT_EN 0x0150 +#define regSMU_GPIOPAD_INT_EN_BASE_IDX 0 +#define regSMU_GPIOPAD_INT_TYPE 0x0151 +#define regSMU_GPIOPAD_INT_TYPE_BASE_IDX 0 +#define regSMU_GPIOPAD_INT_POLARITY 0x0152 +#define regSMU_GPIOPAD_INT_POLARITY_BASE_IDX 0 +#define regSMUIO_PCC_GPIO_SELECT 0x0155 +#define regSMUIO_PCC_GPIO_SELECT_BASE_IDX 0 +#define regSMU_GPIOPAD_S0 0x0156 +#define regSMU_GPIOPAD_S0_BASE_IDX 0 +#define regSMU_GPIOPAD_S1 0x0157 +#define regSMU_GPIOPAD_S1_BASE_IDX 0 +#define regSMU_GPIOPAD_SCHMEN 0x0158 +#define regSMU_GPIOPAD_SCHMEN_BASE_IDX 0 +#define regSMU_GPIOPAD_SCL_EN 0x0159 +#define regSMU_GPIOPAD_SCL_EN_BASE_IDX 0 +#define regSMU_GPIOPAD_SDA_EN 0x015a +#define regSMU_GPIOPAD_SDA_EN_BASE_IDX 0 +#define regSMUIO_GPIO_INT0_SELECT 0x015b +#define regSMUIO_GPIO_INT0_SELECT_BASE_IDX 0 +#define regSMUIO_GPIO_INT1_SELECT 0x015c +#define regSMUIO_GPIO_INT1_SELECT_BASE_IDX 0 +#define regSMUIO_GPIO_INT2_SELECT 0x015d +#define regSMUIO_GPIO_INT2_SELECT_BASE_IDX 0 +#define regSMUIO_GPIO_INT3_SELECT 0x015e +#define regSMUIO_GPIO_INT3_SELECT_BASE_IDX 0 +#define regSMU_GPIOPAD_MP_INT0_STAT 0x015f +#define regSMU_GPIOPAD_MP_INT0_STAT_BASE_IDX 0 +#define regSMU_GPIOPAD_MP_INT1_STAT 0x0160 +#define regSMU_GPIOPAD_MP_INT1_STAT_BASE_IDX 0 +#define regSMU_GPIOPAD_MP_INT2_STAT 0x0161 +#define regSMU_GPIOPAD_MP_INT2_STAT_BASE_IDX 0 +#define regSMU_GPIOPAD_MP_INT3_STAT 0x0162 +#define regSMU_GPIOPAD_MP_INT3_STAT_BASE_IDX 0 +#define regSMIO_INDEX 0x0163 +#define regSMIO_INDEX_BASE_IDX 0 +#define regS0_VID_SMIO_CNTL 0x0164 +#define regS0_VID_SMIO_CNTL_BASE_IDX 0 +#define regS1_VID_SMIO_CNTL 0x0165 +#define regS1_VID_SMIO_CNTL_BASE_IDX 0 +#define regOPEN_DRAIN_SELECT 0x0166 +#define regOPEN_DRAIN_SELECT_BASE_IDX 0 +#define regSMIO_ENABLE 0x0167 +#define regSMIO_ENABLE_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h new file mode 100644 index 0000000000..6204505e55 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h @@ -0,0 +1,1106 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _smuio_14_0_2_SH_MASK_HEADER +#define _smuio_14_0_2_SH_MASK_HEADER + + +// addressBlock: smuio_smuio_tsc_SmuSmuioDec +//PWROK_REFCLK_GAP_CYCLES +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles__SHIFT 0x0 +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles__SHIFT 0x8 +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles_MASK 0x000000FFL +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles_MASK 0x0000FF00L +//GOLDEN_TSC_INCREMENT_UPPER +#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper__SHIFT 0x0 +#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper_MASK 0x00FFFFFFL +//GOLDEN_TSC_INCREMENT_LOWER +#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower__SHIFT 0x0 +#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower_MASK 0xFFFFFFFFL +//GOLDEN_TSC_COUNT_UPPER +#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper__SHIFT 0x0 +#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper_MASK 0x00FFFFFFL +//GOLDEN_TSC_COUNT_LOWER +#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower__SHIFT 0x0 +#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower_MASK 0xFFFFFFFFL +//SOC_GOLDEN_TSC_SHADOW_UPPER +#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper__SHIFT 0x0 +#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper_MASK 0x00FFFFFFL +//SOC_GOLDEN_TSC_SHADOW_LOWER +#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower__SHIFT 0x0 +#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower_MASK 0xFFFFFFFFL +//SOC_GAP_PWROK +#define SOC_GAP_PWROK__soc_gap_pwrok__SHIFT 0x0 +#define SOC_GAP_PWROK__soc_gap_pwrok_MASK 0x00000001L + + +// addressBlock: smuio_smuio_swtimer_SmuSmuioDec +//PWR_VIRT_RESET_REQ +#define PWR_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0 +#define PWR_VIRT_RESET_REQ__PF_FLR__SHIFT 0x1f +#define PWR_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL +#define PWR_VIRT_RESET_REQ__PF_FLR_MASK 0x80000000L +//PWR_DISP_TIMER_CONTROL +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L +//PWR_DISP_TIMER_DEBUG +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT__SHIFT 0x2 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x00000001L +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT_MASK 0x00000002L +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_MASK 0x00000004L +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xFFFFFF80L +//PWR_DISP_TIMER2_CONTROL +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L +//PWR_DISP_TIMER2_DEBUG +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT__SHIFT 0x2 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x00000001L +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT_MASK 0x00000002L +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_MASK 0x00000004L +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xFFFFFF80L +//PWR_DISP_TIMER_GLOBAL_CONTROL +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0 +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN__SHIFT 0xa +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH_MASK 0x000003FFL +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN_MASK 0x00000400L +//PWR_IH_CONTROL +#define PWR_IH_CONTROL__MAX_CREDIT__SHIFT 0x0 +#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK__SHIFT 0x5 +#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK__SHIFT 0x6 +#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN__SHIFT 0x1f +#define PWR_IH_CONTROL__MAX_CREDIT_MASK 0x0000001FL +#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK_MASK 0x00000020L +#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK_MASK 0x00000040L +#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN_MASK 0x80000000L + + +// addressBlock: smuio_smuio_misc_SmuSmuioDec +//SMUIO_MCM_CONFIG +#define SMUIO_MCM_CONFIG__DIE_ID__SHIFT 0x0 +#define SMUIO_MCM_CONFIG__PKG_TYPE__SHIFT 0x2 +#define SMUIO_MCM_CONFIG__SOCKET_ID__SHIFT 0x8 +#define SMUIO_MCM_CONFIG__PKG_SUBTYPE__SHIFT 0xc +#define SMUIO_MCM_CONFIG__DIE_CONFIG__SHIFT 0xd +#define SMUIO_MCM_CONFIG__CONSOLE_K__SHIFT 0x10 +#define SMUIO_MCM_CONFIG__CONSOLE_A__SHIFT 0x11 +#define SMUIO_MCM_CONFIG__DIE_ID_MASK 0x00000003L +#define SMUIO_MCM_CONFIG__PKG_TYPE_MASK 0x0000001CL +#define SMUIO_MCM_CONFIG__SOCKET_ID_MASK 0x00000300L +#define SMUIO_MCM_CONFIG__PKG_SUBTYPE_MASK 0x00001000L +#define SMUIO_MCM_CONFIG__CONSOLE_K_MASK 0x00010000L +#define SMUIO_MCM_CONFIG__CONSOLE_A_MASK 0x00020000L +//IP_DISCOVERY_VERSION +#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION__SHIFT 0x0 +#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER0 +#define SCRATCH_REGISTER0__ScratchPad0__SHIFT 0x0 +#define SCRATCH_REGISTER0__ScratchPad0_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER1 +#define SCRATCH_REGISTER1__ScratchPad1__SHIFT 0x0 +#define SCRATCH_REGISTER1__ScratchPad1_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER2 +#define SCRATCH_REGISTER2__ScratchPad2__SHIFT 0x0 +#define SCRATCH_REGISTER2__ScratchPad2_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER3 +#define SCRATCH_REGISTER3__ScratchPad3__SHIFT 0x0 +#define SCRATCH_REGISTER3__ScratchPad3_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER4 +#define SCRATCH_REGISTER4__ScratchPad4__SHIFT 0x0 +#define SCRATCH_REGISTER4__ScratchPad4_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER5 +#define SCRATCH_REGISTER5__ScratchPad5__SHIFT 0x0 +#define SCRATCH_REGISTER5__ScratchPad5_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER6 +#define SCRATCH_REGISTER6__ScratchPad6__SHIFT 0x0 +#define SCRATCH_REGISTER6__ScratchPad6_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER7 +#define SCRATCH_REGISTER7__ScratchPad7__SHIFT 0x0 +#define SCRATCH_REGISTER7__ScratchPad7_MASK 0xFFFFFFFFL + + +// addressBlock: smuio_smuio_i2c_SmuSmuioDec +//CKSVII2C_IC_CON +#define CKSVII2C_IC_CON__IC_MASTER_MODE__SHIFT 0x0 +#define CKSVII2C_IC_CON__IC_MAX_SPEED_MODE__SHIFT 0x1 +#define CKSVII2C_IC_CON__IC_10BITADDR_SLAVE__SHIFT 0x3 +#define CKSVII2C_IC_CON__IC_10BITADDR_MASTER__SHIFT 0x4 +#define CKSVII2C_IC_CON__IC_RESTART_EN__SHIFT 0x5 +#define CKSVII2C_IC_CON__IC_SLAVE_DISABLE__SHIFT 0x6 +#define CKSVII2C_IC_CON__STOP_DET_IFADDRESSED__SHIFT 0x7 +#define CKSVII2C_IC_CON__TX_EMPTY_CTRL__SHIFT 0x8 +#define CKSVII2C_IC_CON__RX_FIFO_FULL_HLD_CTRL__SHIFT 0x9 +#define CKSVII2C_IC_CON__BUS_CLEAR_FEATURE_CTRL__SHIFT 0xb +#define CKSVII2C_IC_CON__IC_MASTER_MODE_MASK 0x00000001L +#define CKSVII2C_IC_CON__IC_MAX_SPEED_MODE_MASK 0x00000006L +#define CKSVII2C_IC_CON__IC_10BITADDR_SLAVE_MASK 0x00000008L +#define CKSVII2C_IC_CON__IC_10BITADDR_MASTER_MASK 0x00000010L +#define CKSVII2C_IC_CON__IC_RESTART_EN_MASK 0x00000020L +#define CKSVII2C_IC_CON__IC_SLAVE_DISABLE_MASK 0x00000040L +#define CKSVII2C_IC_CON__STOP_DET_IFADDRESSED_MASK 0x00000080L +#define CKSVII2C_IC_CON__TX_EMPTY_CTRL_MASK 0x00000100L +#define CKSVII2C_IC_CON__RX_FIFO_FULL_HLD_CTRL_MASK 0x00000200L +//CKSVII2C_IC_TAR +#define CKSVII2C_IC_TAR__IC_TAR__SHIFT 0x0 +#define CKSVII2C_IC_TAR__GC_OR_START__SHIFT 0xa +#define CKSVII2C_IC_TAR__SPECIAL__SHIFT 0xb +#define CKSVII2C_IC_TAR__IC_10BITADDR_MASTER__SHIFT 0xc +#define CKSVII2C_IC_TAR__IC_TAR_MASK 0x000003FFL +#define CKSVII2C_IC_TAR__GC_OR_START_MASK 0x00000400L +#define CKSVII2C_IC_TAR__SPECIAL_MASK 0x00000800L +#define CKSVII2C_IC_TAR__IC_10BITADDR_MASTER_MASK 0x00001000L +//CKSVII2C_IC_SAR +#define CKSVII2C_IC_SAR__IC_SAR__SHIFT 0x0 +#define CKSVII2C_IC_SAR__IC_SAR_MASK 0x000003FFL +//CKSVII2C_IC_HS_MADDR +#define CKSVII2C_IC_HS_MADDR__IC_HS_MADDR__SHIFT 0x0 +#define CKSVII2C_IC_HS_MADDR__IC_HS_MADDR_MASK 0x00000007L +//CKSVII2C_IC_DATA_CMD +#define CKSVII2C_IC_DATA_CMD__DAT__SHIFT 0x0 +#define CKSVII2C_IC_DATA_CMD__CMD__SHIFT 0x8 +#define CKSVII2C_IC_DATA_CMD__STOP__SHIFT 0x9 +#define CKSVII2C_IC_DATA_CMD__RESTART__SHIFT 0xa +#define CKSVII2C_IC_DATA_CMD__FIRST_DATA_BYTE__SHIFT 0xb +#define CKSVII2C_IC_DATA_CMD__DAT_MASK 0x000000FFL +#define CKSVII2C_IC_DATA_CMD__CMD_MASK 0x00000100L +#define CKSVII2C_IC_DATA_CMD__STOP_MASK 0x00000200L +#define CKSVII2C_IC_DATA_CMD__RESTART_MASK 0x00000400L +//CKSVII2C_IC_SS_SCL_HCNT +#define CKSVII2C_IC_SS_SCL_HCNT__IC_SS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C_IC_SS_SCL_HCNT__IC_SS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C_IC_SS_SCL_LCNT +#define CKSVII2C_IC_SS_SCL_LCNT__IC_SS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C_IC_SS_SCL_LCNT__IC_SS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C_IC_FS_SCL_HCNT +#define CKSVII2C_IC_FS_SCL_HCNT__IC_FS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C_IC_FS_SCL_HCNT__IC_FS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C_IC_FS_SCL_LCNT +#define CKSVII2C_IC_FS_SCL_LCNT__IC_FS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C_IC_FS_SCL_LCNT__IC_FS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C_IC_HS_SCL_HCNT +#define CKSVII2C_IC_HS_SCL_HCNT__IC_HS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C_IC_HS_SCL_HCNT__IC_HS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C_IC_HS_SCL_LCNT +#define CKSVII2C_IC_HS_SCL_LCNT__IC_HS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C_IC_HS_SCL_LCNT__IC_HS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C_IC_INTR_STAT +#define CKSVII2C_IC_INTR_STAT__R_RX_UNDER__SHIFT 0x0 +#define CKSVII2C_IC_INTR_STAT__R_RX_OVER__SHIFT 0x1 +#define CKSVII2C_IC_INTR_STAT__R_RX_FULL__SHIFT 0x2 +#define CKSVII2C_IC_INTR_STAT__R_TX_OVER__SHIFT 0x3 +#define CKSVII2C_IC_INTR_STAT__R_TX_EMPTY__SHIFT 0x4 +#define CKSVII2C_IC_INTR_STAT__R_RD_REQ__SHIFT 0x5 +#define CKSVII2C_IC_INTR_STAT__R_TX_ABRT__SHIFT 0x6 +#define CKSVII2C_IC_INTR_STAT__R_RX_DONE__SHIFT 0x7 +#define CKSVII2C_IC_INTR_STAT__R_ACTIVITY__SHIFT 0x8 +#define CKSVII2C_IC_INTR_STAT__R_STOP_DET__SHIFT 0x9 +#define CKSVII2C_IC_INTR_STAT__R_START_DET__SHIFT 0xa +#define CKSVII2C_IC_INTR_STAT__R_GEN_CALL__SHIFT 0xb +#define CKSVII2C_IC_INTR_STAT__R_RESTART_DET__SHIFT 0xc +#define CKSVII2C_IC_INTR_STAT__R_MST_ON_HOLD__SHIFT 0xd +#define CKSVII2C_IC_INTR_STAT__R_SCL_STUCK_AT_LOW__SHIFT 0xe +#define CKSVII2C_IC_INTR_STAT__R_RX_UNDER_MASK 0x00000001L +#define CKSVII2C_IC_INTR_STAT__R_RX_OVER_MASK 0x00000002L +#define CKSVII2C_IC_INTR_STAT__R_RX_FULL_MASK 0x00000004L +#define CKSVII2C_IC_INTR_STAT__R_TX_OVER_MASK 0x00000008L +#define CKSVII2C_IC_INTR_STAT__R_TX_EMPTY_MASK 0x00000010L +#define CKSVII2C_IC_INTR_STAT__R_RD_REQ_MASK 0x00000020L +#define CKSVII2C_IC_INTR_STAT__R_TX_ABRT_MASK 0x00000040L +#define CKSVII2C_IC_INTR_STAT__R_RX_DONE_MASK 0x00000080L +#define CKSVII2C_IC_INTR_STAT__R_ACTIVITY_MASK 0x00000100L +#define CKSVII2C_IC_INTR_STAT__R_STOP_DET_MASK 0x00000200L +#define CKSVII2C_IC_INTR_STAT__R_START_DET_MASK 0x00000400L +#define CKSVII2C_IC_INTR_STAT__R_GEN_CALL_MASK 0x00000800L +#define CKSVII2C_IC_INTR_STAT__R_RESTART_DET_MASK 0x00001000L +#define CKSVII2C_IC_INTR_STAT__R_MST_ON_HOLD_MASK 0x00002000L +//CKSVII2C_IC_INTR_MASK +#define CKSVII2C_IC_INTR_MASK__M_RX_UNDER__SHIFT 0x0 +#define CKSVII2C_IC_INTR_MASK__M_RX_OVER__SHIFT 0x1 +#define CKSVII2C_IC_INTR_MASK__M_RX_FULL__SHIFT 0x2 +#define CKSVII2C_IC_INTR_MASK__M_TX_OVER__SHIFT 0x3 +#define CKSVII2C_IC_INTR_MASK__M_TX_EMPTY__SHIFT 0x4 +#define CKSVII2C_IC_INTR_MASK__M_RD_REQ__SHIFT 0x5 +#define CKSVII2C_IC_INTR_MASK__M_TX_ABRT__SHIFT 0x6 +#define CKSVII2C_IC_INTR_MASK__M_RX_DONE__SHIFT 0x7 +#define CKSVII2C_IC_INTR_MASK__M_ACTIVITY__SHIFT 0x8 +#define CKSVII2C_IC_INTR_MASK__M_STOP_DET__SHIFT 0x9 +#define CKSVII2C_IC_INTR_MASK__M_START_DET__SHIFT 0xa +#define CKSVII2C_IC_INTR_MASK__M_GEN_CALL__SHIFT 0xb +#define CKSVII2C_IC_INTR_MASK__M_RESTART_DET__SHIFT 0xc +#define CKSVII2C_IC_INTR_MASK__M_MST_ON_HOLD__SHIFT 0xd +#define CKSVII2C_IC_INTR_MASK__M_SCL_STUCK_AT_LOW__SHIFT 0xe +#define CKSVII2C_IC_INTR_MASK__M_RX_UNDER_MASK 0x00000001L +#define CKSVII2C_IC_INTR_MASK__M_RX_OVER_MASK 0x00000002L +#define CKSVII2C_IC_INTR_MASK__M_RX_FULL_MASK 0x00000004L +#define CKSVII2C_IC_INTR_MASK__M_TX_OVER_MASK 0x00000008L +#define CKSVII2C_IC_INTR_MASK__M_TX_EMPTY_MASK 0x00000010L +#define CKSVII2C_IC_INTR_MASK__M_RD_REQ_MASK 0x00000020L +#define CKSVII2C_IC_INTR_MASK__M_TX_ABRT_MASK 0x00000040L +#define CKSVII2C_IC_INTR_MASK__M_RX_DONE_MASK 0x00000080L +#define CKSVII2C_IC_INTR_MASK__M_ACTIVITY_MASK 0x00000100L +#define CKSVII2C_IC_INTR_MASK__M_STOP_DET_MASK 0x00000200L +#define CKSVII2C_IC_INTR_MASK__M_START_DET_MASK 0x00000400L +#define CKSVII2C_IC_INTR_MASK__M_GEN_CALL_MASK 0x00000800L +#define CKSVII2C_IC_INTR_MASK__M_RESTART_DET_MASK 0x00001000L +#define CKSVII2C_IC_INTR_MASK__M_MST_ON_HOLD_MASK 0x00002000L +//CKSVII2C_IC_RAW_INTR_STAT +//CKSVII2C_IC_RX_TL +#define CKSVII2C_IC_RX_TL__RX_TL__SHIFT 0x0 +//CKSVII2C_IC_TX_TL +#define CKSVII2C_IC_TX_TL__TX_TL__SHIFT 0x0 +//CKSVII2C_IC_CLR_INTR +//CKSVII2C_IC_CLR_RX_UNDER +//CKSVII2C_IC_CLR_RX_OVER +//CKSVII2C_IC_CLR_TX_OVER +//CKSVII2C_IC_CLR_RD_REQ +//CKSVII2C_IC_CLR_TX_ABRT +//CKSVII2C_IC_CLR_RX_DONE +//CKSVII2C_IC_CLR_ACTIVITY +//CKSVII2C_IC_CLR_STOP_DET +//CKSVII2C_IC_CLR_START_DET +//CKSVII2C_IC_CLR_GEN_CALL +//CKSVII2C_IC_ENABLE +#define CKSVII2C_IC_ENABLE__ENABLE__SHIFT 0x0 +#define CKSVII2C_IC_ENABLE__ABORT__SHIFT 0x1 +#define CKSVII2C_IC_ENABLE__TX_CMD_BLOCK__SHIFT 0x2 +#define CKSVII2C_IC_ENABLE__SDA_STUCK_RECOVERY_ENABLE__SHIFT 0x3 +#define CKSVII2C_IC_ENABLE__ENABLE_MASK 0x00000001L +#define CKSVII2C_IC_ENABLE__ABORT_MASK 0x00000002L +//CKSVII2C_IC_STATUS +#define CKSVII2C_IC_STATUS__ACTIVITY__SHIFT 0x0 +#define CKSVII2C_IC_STATUS__TFNF__SHIFT 0x1 +#define CKSVII2C_IC_STATUS__TFE__SHIFT 0x2 +#define CKSVII2C_IC_STATUS__RFNE__SHIFT 0x3 +#define CKSVII2C_IC_STATUS__RFF__SHIFT 0x4 +#define CKSVII2C_IC_STATUS__MST_ACTIVITY__SHIFT 0x5 +#define CKSVII2C_IC_STATUS__SLV_ACTIVITY__SHIFT 0x6 +#define CKSVII2C_IC_STATUS__MST_HOLD_TX_FIFO_EMPTY__SHIFT 0x7 +#define CKSVII2C_IC_STATUS__MST_HOLD_RX_FIFO_FULL__SHIFT 0x8 +#define CKSVII2C_IC_STATUS__SLV_HOLD_TX_FIFO_EMPTY__SHIFT 0x9 +#define CKSVII2C_IC_STATUS__SLV_HOLD_RX_FIFO_FULL__SHIFT 0xa +#define CKSVII2C_IC_STATUS__SDA_STUCK_NOT_RECOVERED__SHIFT 0xb +#define CKSVII2C_IC_STATUS__ACTIVITY_MASK 0x00000001L +#define CKSVII2C_IC_STATUS__TFNF_MASK 0x00000002L +#define CKSVII2C_IC_STATUS__TFE_MASK 0x00000004L +#define CKSVII2C_IC_STATUS__RFNE_MASK 0x00000008L +#define CKSVII2C_IC_STATUS__RFF_MASK 0x00000010L +#define CKSVII2C_IC_STATUS__MST_ACTIVITY_MASK 0x00000020L +#define CKSVII2C_IC_STATUS__SLV_ACTIVITY_MASK 0x00000040L +//CKSVII2C_IC_TXFLR +#define CKSVII2C_IC_TXFLR__TXFLR__SHIFT 0x0 +//CKSVII2C_IC_RXFLR +#define CKSVII2C_IC_RXFLR__RXFLR__SHIFT 0x0 +//CKSVII2C_IC_SDA_HOLD +#define CKSVII2C_IC_SDA_HOLD__IC_SDA_TX_HOLD__SHIFT 0x0 +#define CKSVII2C_IC_SDA_HOLD__IC_SDA_RX_HOLD__SHIFT 0x10 +//CKSVII2C_IC_TX_ABRT_SOURCE +//CKSVII2C_IC_SLV_DATA_NACK_ONLY +//CKSVII2C_IC_DMA_CR +//CKSVII2C_IC_DMA_TDLR +//CKSVII2C_IC_DMA_RDLR +//CKSVII2C_IC_SDA_SETUP +#define CKSVII2C_IC_SDA_SETUP__SDA_SETUP__SHIFT 0x0 +#define CKSVII2C_IC_SDA_SETUP__SDA_SETUP_MASK 0x000000FFL +//CKSVII2C_IC_ACK_GENERAL_CALL +#define CKSVII2C_IC_ACK_GENERAL_CALL__ACK_GENERAL_CALL__SHIFT 0x0 +#define CKSVII2C_IC_ACK_GENERAL_CALL__ACK_GENERAL_CALL_MASK 0x00000001L +//CKSVII2C_IC_ENABLE_STATUS +#define CKSVII2C_IC_ENABLE_STATUS__IC_EN__SHIFT 0x0 +#define CKSVII2C_IC_ENABLE_STATUS__SLV_DISABLED_WHILE_BUSY__SHIFT 0x1 +#define CKSVII2C_IC_ENABLE_STATUS__SLV_RX_DATA_LOST__SHIFT 0x2 +#define CKSVII2C_IC_ENABLE_STATUS__IC_EN_MASK 0x00000001L +//CKSVII2C_IC_FS_SPKLEN +#define CKSVII2C_IC_FS_SPKLEN__FS_SPKLEN__SHIFT 0x0 +#define CKSVII2C_IC_FS_SPKLEN__FS_SPKLEN_MASK 0x000000FFL +//CKSVII2C_IC_HS_SPKLEN +#define CKSVII2C_IC_HS_SPKLEN__HS_SPKLEN__SHIFT 0x0 +#define CKSVII2C_IC_HS_SPKLEN__HS_SPKLEN_MASK 0x000000FFL +//CKSVII2C_IC_CLR_RESTART_DET +//CKSVII2C_IC_COMP_PARAM_1 +#define CKSVII2C_IC_COMP_PARAM_1__APB_DATA_WIDTH__SHIFT 0x0 +#define CKSVII2C_IC_COMP_PARAM_1__MAX_SPEED_MODE__SHIFT 0x2 +#define CKSVII2C_IC_COMP_PARAM_1__HC_COUNT_VALUES__SHIFT 0x4 +#define CKSVII2C_IC_COMP_PARAM_1__INTR_IO__SHIFT 0x5 +#define CKSVII2C_IC_COMP_PARAM_1__HAS_DMA__SHIFT 0x6 +#define CKSVII2C_IC_COMP_PARAM_1__ADD_ENCODED_PARAMS__SHIFT 0x7 +#define CKSVII2C_IC_COMP_PARAM_1__RX_BUFFER_DEPTH__SHIFT 0x8 +#define CKSVII2C_IC_COMP_PARAM_1__TX_BUFFER_DEPTH__SHIFT 0x10 +//CKSVII2C_IC_COMP_VERSION +#define CKSVII2C_IC_COMP_VERSION__COMP_VERSION__SHIFT 0x0 +#define CKSVII2C_IC_COMP_VERSION__COMP_VERSION_MASK 0xFFFFFFFFL +//CKSVII2C_IC_COMP_TYPE +#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE__SHIFT 0x0 +#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE_MASK 0xFFFFFFFFL +//CKSVII2C1_IC_CON +#define CKSVII2C1_IC_CON__IC1_MASTER_MODE__SHIFT 0x0 +#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE__SHIFT 0x1 +#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE__SHIFT 0x3 +#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER__SHIFT 0x4 +#define CKSVII2C1_IC_CON__IC1_RESTART_EN__SHIFT 0x5 +#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE__SHIFT 0x6 +#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED__SHIFT 0x7 +#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL__SHIFT 0x8 +#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL__SHIFT 0x9 +#define CKSVII2C1_IC_CON__BUS_CLEAR_FEATURE_CTRL1__SHIFT 0xb +#define CKSVII2C1_IC_CON__IC1_MASTER_MODE_MASK 0x00000001L +#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE_MASK 0x00000006L +#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE_MASK 0x00000008L +#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER_MASK 0x00000010L +#define CKSVII2C1_IC_CON__IC1_RESTART_EN_MASK 0x00000020L +#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE_MASK 0x00000040L +#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED_MASK 0x00000080L +#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL_MASK 0x00000100L +#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL_MASK 0x00000200L +//CKSVII2C1_IC_TAR +#define CKSVII2C1_IC_TAR__IC1_TAR__SHIFT 0x0 +#define CKSVII2C1_IC_TAR__GC1_OR_START__SHIFT 0xa +#define CKSVII2C1_IC_TAR__SPECIAL1__SHIFT 0xb +#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER__SHIFT 0xc +#define CKSVII2C1_IC_TAR__IC1_TAR_MASK 0x000003FFL +#define CKSVII2C1_IC_TAR__GC1_OR_START_MASK 0x00000400L +#define CKSVII2C1_IC_TAR__SPECIAL1_MASK 0x00000800L +#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER_MASK 0x00001000L +//CKSVII2C1_IC_SAR +#define CKSVII2C1_IC_SAR__IC1_SAR__SHIFT 0x0 +#define CKSVII2C1_IC_SAR__IC1_SAR_MASK 0x000003FFL +//CKSVII2C1_IC_HS_MADDR +#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR__SHIFT 0x0 +#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR_MASK 0x00000007L +//CKSVII2C1_IC_DATA_CMD +#define CKSVII2C1_IC_DATA_CMD__DAT1__SHIFT 0x0 +#define CKSVII2C1_IC_DATA_CMD__CMD1__SHIFT 0x8 +#define CKSVII2C1_IC_DATA_CMD__STOP1__SHIFT 0x9 +#define CKSVII2C1_IC_DATA_CMD__RESTART1__SHIFT 0xa +#define CKSVII2C1_IC_DATA_CMD__FIRST1_DATA_BYTE__SHIFT 0xb +#define CKSVII2C1_IC_DATA_CMD__DAT1_MASK 0x000000FFL +#define CKSVII2C1_IC_DATA_CMD__CMD1_MASK 0x00000100L +#define CKSVII2C1_IC_DATA_CMD__STOP1_MASK 0x00000200L +#define CKSVII2C1_IC_DATA_CMD__RESTART1_MASK 0x00000400L +//CKSVII2C1_IC_SS_SCL_HCNT +#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_SS_SCL_LCNT +#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_FS_SCL_HCNT +#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_FS_SCL_LCNT +#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_HS_SCL_HCNT +#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_HS_SCL_LCNT +#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_INTR_STAT +#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER__SHIFT 0x0 +#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER__SHIFT 0x1 +#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL__SHIFT 0x2 +#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER__SHIFT 0x3 +#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY__SHIFT 0x4 +#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ__SHIFT 0x5 +#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT__SHIFT 0x6 +#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE__SHIFT 0x7 +#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY__SHIFT 0x8 +#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET__SHIFT 0x9 +#define CKSVII2C1_IC_INTR_STAT__R1_START_DET__SHIFT 0xa +#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL__SHIFT 0xb +#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET__SHIFT 0xc +#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD__SHIFT 0xd +#define CKSVII2C1_IC_INTR_STAT__R1_SCL_STUCK_AT_LOW__SHIFT 0xe +#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER_MASK 0x00000001L +#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER_MASK 0x00000002L +#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL_MASK 0x00000004L +#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER_MASK 0x00000008L +#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY_MASK 0x00000010L +#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ_MASK 0x00000020L +#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT_MASK 0x00000040L +#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE_MASK 0x00000080L +#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY_MASK 0x00000100L +#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET_MASK 0x00000200L +#define CKSVII2C1_IC_INTR_STAT__R1_START_DET_MASK 0x00000400L +#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL_MASK 0x00000800L +#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET_MASK 0x00001000L +#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD_MASK 0x00002000L +//CKSVII2C1_IC_INTR_MASK +#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER__SHIFT 0x0 +#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER__SHIFT 0x1 +#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL__SHIFT 0x2 +#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER__SHIFT 0x3 +#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY__SHIFT 0x4 +#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ__SHIFT 0x5 +#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT__SHIFT 0x6 +#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE__SHIFT 0x7 +#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY__SHIFT 0x8 +#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET__SHIFT 0x9 +#define CKSVII2C1_IC_INTR_MASK__M1_START_DET__SHIFT 0xa +#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL__SHIFT 0xb +#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET__SHIFT 0xc +#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD__SHIFT 0xd +#define CKSVII2C1_IC_INTR_MASK__M1_SCL_STUCK_AT_LOW__SHIFT 0xe +#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER_MASK 0x00000001L +#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER_MASK 0x00000002L +#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL_MASK 0x00000004L +#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER_MASK 0x00000008L +#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY_MASK 0x00000010L +#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ_MASK 0x00000020L +#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT_MASK 0x00000040L +#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE_MASK 0x00000080L +#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY_MASK 0x00000100L +#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET_MASK 0x00000200L +#define CKSVII2C1_IC_INTR_MASK__M1_START_DET_MASK 0x00000400L +#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL_MASK 0x00000800L +#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET_MASK 0x00001000L +#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD_MASK 0x00002000L +//CKSVII2C1_IC_RAW_INTR_STAT +//CKSVII2C1_IC_RX_TL +#define CKSVII2C1_IC_RX_TL__RX1_TL__SHIFT 0x0 +//CKSVII2C1_IC_TX_TL +#define CKSVII2C1_IC_TX_TL__TX1_TL__SHIFT 0x0 +//CKSVII2C1_IC_CLR_INTR +//CKSVII2C1_IC_CLR_RX_UNDER +//CKSVII2C1_IC_CLR_RX_OVER +//CKSVII2C1_IC_CLR_TX_OVER +//CKSVII2C1_IC_CLR_RD_REQ +//CKSVII2C1_IC_CLR_TX_ABRT +//CKSVII2C1_IC_CLR_RX_DONE +//CKSVII2C1_IC_CLR_ACTIVITY +//CKSVII2C1_IC_CLR_STOP_DET +//CKSVII2C1_IC_CLR_START_DET +//CKSVII2C1_IC_CLR_GEN_CALL +//CKSVII2C1_IC_ENABLE +#define CKSVII2C1_IC_ENABLE__ENABLE1__SHIFT 0x0 +#define CKSVII2C1_IC_ENABLE__ABORT1__SHIFT 0x1 +#define CKSVII2C1_IC_ENABLE__TX1_CMD_BLOCK__SHIFT 0x2 +#define CKSVII2C1_IC_ENABLE__SDA1_STUCK_RECOVERY_ENABLE__SHIFT 0x3 +#define CKSVII2C1_IC_ENABLE__ENABLE1_MASK 0x00000001L +#define CKSVII2C1_IC_ENABLE__ABORT1_MASK 0x00000002L +//CKSVII2C1_IC_STATUS +#define CKSVII2C1_IC_STATUS__ACTIVITY1__SHIFT 0x0 +#define CKSVII2C1_IC_STATUS__TFNF1__SHIFT 0x1 +#define CKSVII2C1_IC_STATUS__TFE1__SHIFT 0x2 +#define CKSVII2C1_IC_STATUS__RFNE1__SHIFT 0x3 +#define CKSVII2C1_IC_STATUS__RFF1__SHIFT 0x4 +#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY__SHIFT 0x5 +#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY__SHIFT 0x6 +#define CKSVII2C1_IC_STATUS__MST1_HOLD_TX_FIFO_EMPTY__SHIFT 0x7 +#define CKSVII2C1_IC_STATUS__MST1_HOLD_RX_FIFO_FULL__SHIFT 0x8 +#define CKSVII2C1_IC_STATUS__SLV1_HOLD_TX_FIFO_EMPTY__SHIFT 0x9 +#define CKSVII2C1_IC_STATUS__SLV1_HOLD_RX_FIFO_FULL__SHIFT 0xa +#define CKSVII2C1_IC_STATUS__SDA1_STUCK_NOT_RECOVERED__SHIFT 0xb +#define CKSVII2C1_IC_STATUS__ACTIVITY1_MASK 0x00000001L +#define CKSVII2C1_IC_STATUS__TFNF1_MASK 0x00000002L +#define CKSVII2C1_IC_STATUS__TFE1_MASK 0x00000004L +#define CKSVII2C1_IC_STATUS__RFNE1_MASK 0x00000008L +#define CKSVII2C1_IC_STATUS__RFF1_MASK 0x00000010L +#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY_MASK 0x00000020L +#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY_MASK 0x00000040L +//CKSVII2C1_IC_TXFLR +#define CKSVII2C1_IC_TXFLR__TXFLR1__SHIFT 0x0 +//CKSVII2C1_IC_RXFLR +#define CKSVII2C1_IC_RXFLR__RXFLR1__SHIFT 0x0 +//CKSVII2C1_IC_SDA_HOLD +#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_TX_HOLD__SHIFT 0x0 +#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_RX_HOLD__SHIFT 0x10 +//CKSVII2C1_IC_TX_ABRT_SOURCE +//CKSVII2C1_IC_SLV_DATA_NACK_ONLY +//CKSVII2C1_IC_DMA_CR +//CKSVII2C1_IC_DMA_TDLR +//CKSVII2C1_IC_DMA_RDLR +//CKSVII2C1_IC_SDA_SETUP +#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP__SHIFT 0x0 +#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP_MASK 0x000000FFL +//CKSVII2C1_IC_ACK_GENERAL_CALL +#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL__SHIFT 0x0 +#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL_MASK 0x00000001L +//CKSVII2C1_IC_ENABLE_STATUS +#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN__SHIFT 0x0 +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_DISABLED_WHILE_BUSY__SHIFT 0x1 +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_DATA_LOST__SHIFT 0x2 +#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN_MASK 0x00000001L +//CKSVII2C1_IC_FS_SPKLEN +#define CKSVII2C1_IC_FS_SPKLEN__FS1_SPKLEN__SHIFT 0x0 +//CKSVII2C1_IC_HS_SPKLEN +#define CKSVII2C1_IC_HS_SPKLEN__HS1_SPKLEN__SHIFT 0x0 +//CKSVII2C1_IC_CLR_RESTART_DET +//CKSVII2C1_IC_COMP_PARAM_1 +#define CKSVII2C1_IC_COMP_PARAM_1__APB1_DATA_WIDTH__SHIFT 0x0 +#define CKSVII2C1_IC_COMP_PARAM_1__MAX1_SPEED_MODE__SHIFT 0x2 +#define CKSVII2C1_IC_COMP_PARAM_1__HC1_COUNT_VALUES__SHIFT 0x4 +#define CKSVII2C1_IC_COMP_PARAM_1__INTR1_IO__SHIFT 0x5 +#define CKSVII2C1_IC_COMP_PARAM_1__HAS1_DMA__SHIFT 0x6 +#define CKSVII2C1_IC_COMP_PARAM_1__ADD1_ENCODED_PARAMS__SHIFT 0x7 +#define CKSVII2C1_IC_COMP_PARAM_1__RX1_BUFFER_DEPTH__SHIFT 0x8 +#define CKSVII2C1_IC_COMP_PARAM_1__TX1_BUFFER_DEPTH__SHIFT 0x10 +//CKSVII2C1_IC_COMP_VERSION +#define CKSVII2C1_IC_COMP_VERSION__COMP1_VERSION__SHIFT 0x0 +//CKSVII2C1_IC_COMP_TYPE +#define CKSVII2C1_IC_COMP_TYPE__COMP1_TYPE__SHIFT 0x0 +//SMUIO_PWRMGT +#define SMUIO_PWRMGT__i2c_clk_gate_en__SHIFT 0x0 +#define SMUIO_PWRMGT__i2c1_clk_gate_en__SHIFT 0x4 +#define SMUIO_PWRMGT__i2c_clk_gate_en_MASK 0x00000001L +#define SMUIO_PWRMGT__i2c1_clk_gate_en_MASK 0x00000010L + + +// addressBlock: smuio_smuio_rom_SmuSmuioDec +//ROM_CNTL +#define ROM_CNTL__CLOCK_GATING_EN__SHIFT 0x0 +#define ROM_CNTL__READ_MODE__SHIFT 0x1 +#define ROM_CNTL__READ_MODE_OVERRIDE__SHIFT 0x3 +#define ROM_CNTL__SPI_TIMING_RELAX_SCK__SHIFT 0x4 +#define ROM_CNTL__SPI_TIMING_RELAX_SCK_OVERRIDE__SHIFT 0x5 +#define ROM_CNTL__FOUR_BYTE_ADDRESS_MODE__SHIFT 0x6 +#define ROM_CNTL__DUMMY_CYCLE_NUM__SHIFT 0x8 +#define ROM_CNTL__SPI_TIMING_RELAX__SHIFT 0x13 +#define ROM_CNTL__SPI_TIMING_RELAX_OVERRIDE__SHIFT 0x14 +#define ROM_CNTL__SPI_FAST_MODE__SHIFT 0x15 +#define ROM_CNTL__SPI_FAST_MODE_OVERRIDE__SHIFT 0x16 +#define ROM_CNTL__SCK_PRESCALE_REFCLK__SHIFT 0x17 +#define ROM_CNTL__SCK_PRESCALE_REFCLK_OVERRIDE__SHIFT 0x1c +#define ROM_CNTL__ROM_INDEX_ADDRESS_AUTO_INCREASE__SHIFT 0x1d +#define ROM_CNTL__PAD_SAMPLE_MODE__SHIFT 0x1e +#define ROM_CNTL__PAD_SAMPLE_MODE_OVERRIDE__SHIFT 0x1f +#define ROM_CNTL__CLOCK_GATING_EN_MASK 0x00000001L +#define ROM_CNTL__SPI_TIMING_RELAX_MASK 0x00080000L +#define ROM_CNTL__SPI_TIMING_RELAX_OVERRIDE_MASK 0x00100000L +#define ROM_CNTL__SPI_FAST_MODE_MASK 0x00200000L +#define ROM_CNTL__SPI_FAST_MODE_OVERRIDE_MASK 0x00400000L +#define ROM_CNTL__SCK_PRESCALE_REFCLK_MASK 0x0F800000L +#define ROM_CNTL__SCK_PRESCALE_REFCLK_OVERRIDE_MASK 0x10000000L +//PAGE_MIRROR_CNTL +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR__SHIFT 0x0 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE__SHIFT 0x19 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE__SHIFT 0x1a +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE__SHIFT 0x1c +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR_MASK 0x01FFFFFFL +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE_MASK 0x02000000L +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE_MASK 0x0C000000L +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE_MASK 0x10000000L +//ROM_STATUS +#define ROM_STATUS__ROM_BUSY__SHIFT 0x0 +#define ROM_STATUS__ROM_BUSY_MASK 0x00000001L +//CGTT_ROM_CLK_CTRL0 +#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT 0x0 +#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4 +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f +#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL +#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L +//ROM_INDEX +#define ROM_INDEX__ROM_INDEX__SHIFT 0x0 +#define ROM_INDEX__ROM_INDEX_MASK 0x01FFFFFFL +//ROM_DATA +#define ROM_DATA__ROM_DATA__SHIFT 0x0 +#define ROM_DATA__ROM_DATA_MASK 0xFFFFFFFFL +//ROM_START +#define ROM_START__ROM_START__SHIFT 0x0 +#define ROM_START__ROM_START_MASK 0x01FFFFFFL +//ROM_SW_CNTL +#define ROM_SW_CNTL__DATA_SIZE__SHIFT 0x0 +#define ROM_SW_CNTL__COMMAND_SIZE__SHIFT 0x10 +#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE__SHIFT 0x13 +#define ROM_SW_CNTL__DATA_SIZE_MASK 0x0000FFFFL +#define ROM_SW_CNTL__COMMAND_SIZE_MASK 0x00070000L +#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE_MASK 0x00080000L +//ROM_SW_STATUS +#define ROM_SW_STATUS__ROM_SW_DONE__SHIFT 0x0 +#define ROM_SW_STATUS__ROM_SW_DONE_MASK 0x00000001L +//ROM_SW_COMMAND +#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION__SHIFT 0x0 +#define ROM_SW_COMMAND__ROM_SW_ADDRESS__SHIFT 0x8 +#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION_MASK 0x000000FFL +#define ROM_SW_COMMAND__ROM_SW_ADDRESS_MASK 0xFFFFFF00L +//ROM_SW_DATA_1 +#define ROM_SW_DATA_1__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_1__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_2 +#define ROM_SW_DATA_2__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_2__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_3 +#define ROM_SW_DATA_3__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_3__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_4 +#define ROM_SW_DATA_4__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_4__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_5 +#define ROM_SW_DATA_5__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_5__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_6 +#define ROM_SW_DATA_6__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_6__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_7 +#define ROM_SW_DATA_7__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_7__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_8 +#define ROM_SW_DATA_8__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_8__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_9 +#define ROM_SW_DATA_9__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_9__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_10 +#define ROM_SW_DATA_10__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_10__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_11 +#define ROM_SW_DATA_11__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_11__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_12 +#define ROM_SW_DATA_12__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_12__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_13 +#define ROM_SW_DATA_13__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_13__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_14 +#define ROM_SW_DATA_14__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_14__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_15 +#define ROM_SW_DATA_15__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_15__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_16 +#define ROM_SW_DATA_16__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_16__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_17 +#define ROM_SW_DATA_17__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_17__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_18 +#define ROM_SW_DATA_18__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_18__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_19 +#define ROM_SW_DATA_19__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_19__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_20 +#define ROM_SW_DATA_20__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_20__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_21 +#define ROM_SW_DATA_21__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_21__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_22 +#define ROM_SW_DATA_22__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_22__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_23 +#define ROM_SW_DATA_23__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_23__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_24 +#define ROM_SW_DATA_24__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_24__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_25 +#define ROM_SW_DATA_25__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_25__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_26 +#define ROM_SW_DATA_26__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_26__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_27 +#define ROM_SW_DATA_27__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_27__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_28 +#define ROM_SW_DATA_28__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_28__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_29 +#define ROM_SW_DATA_29__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_29__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_30 +#define ROM_SW_DATA_30__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_30__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_31 +#define ROM_SW_DATA_31__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_31__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_32 +#define ROM_SW_DATA_32__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_32__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_33 +#define ROM_SW_DATA_33__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_33__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_34 +#define ROM_SW_DATA_34__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_34__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_35 +#define ROM_SW_DATA_35__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_35__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_36 +#define ROM_SW_DATA_36__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_36__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_37 +#define ROM_SW_DATA_37__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_37__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_38 +#define ROM_SW_DATA_38__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_38__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_39 +#define ROM_SW_DATA_39__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_39__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_40 +#define ROM_SW_DATA_40__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_40__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_41 +#define ROM_SW_DATA_41__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_41__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_42 +#define ROM_SW_DATA_42__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_42__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_43 +#define ROM_SW_DATA_43__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_43__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_44 +#define ROM_SW_DATA_44__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_44__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_45 +#define ROM_SW_DATA_45__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_45__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_46 +#define ROM_SW_DATA_46__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_46__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_47 +#define ROM_SW_DATA_47__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_47__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_48 +#define ROM_SW_DATA_48__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_48__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_49 +#define ROM_SW_DATA_49__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_49__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_50 +#define ROM_SW_DATA_50__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_50__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_51 +#define ROM_SW_DATA_51__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_51__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_52 +#define ROM_SW_DATA_52__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_52__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_53 +#define ROM_SW_DATA_53__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_53__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_54 +#define ROM_SW_DATA_54__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_54__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_55 +#define ROM_SW_DATA_55__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_55__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_56 +#define ROM_SW_DATA_56__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_56__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_57 +#define ROM_SW_DATA_57__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_57__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_58 +#define ROM_SW_DATA_58__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_58__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_59 +#define ROM_SW_DATA_59__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_59__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_60 +#define ROM_SW_DATA_60__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_60__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_61 +#define ROM_SW_DATA_61__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_61__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_62 +#define ROM_SW_DATA_62__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_62__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_63 +#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_63__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_64 +#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xFFFFFFFFL + + +// addressBlock: smuio_smuio_gpio_SmuSmuioDec +//SMU_GPIOPAD_SW_INT_STAT +#define SMU_GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x00000001L +//SMU_GPIOPAD_MASK +#define SMU_GPIOPAD_MASK__GPIO_MASK__SHIFT 0x0 +#define SMU_GPIOPAD_MASK__GPIO_MASK_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_A +#define SMU_GPIOPAD_A__GPIO_A__SHIFT 0x0 +#define SMU_GPIOPAD_A__GPIO_A_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_TXIMPSEL +#define SMU_GPIOPAD_TXIMPSEL__GPIO_TXIMPSEL__SHIFT 0x0 +#define SMU_GPIOPAD_TXIMPSEL__GPIO_TXIMPSEL_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_EN +#define SMU_GPIOPAD_EN__GPIO_EN__SHIFT 0x0 +#define SMU_GPIOPAD_EN__GPIO_EN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_Y +#define SMU_GPIOPAD_Y__GPIO_Y__SHIFT 0x0 +#define SMU_GPIOPAD_Y__GPIO_Y_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_RXEN +#define SMU_GPIOPAD_RXEN__GPIO_RXEN__SHIFT 0x0 +#define SMU_GPIOPAD_RXEN__GPIO_RXEN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_RCVR_SEL0 +#define SMU_GPIOPAD_RCVR_SEL0__GPIO_RCVR_SEL0__SHIFT 0x0 +#define SMU_GPIOPAD_RCVR_SEL0__GPIO_RCVR_SEL0_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_RCVR_SEL1 +#define SMU_GPIOPAD_RCVR_SEL1__GPIO_RCVR_SEL1__SHIFT 0x0 +#define SMU_GPIOPAD_RCVR_SEL1__GPIO_RCVR_SEL1_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_PU_EN +#define SMU_GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x0 +#define SMU_GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_PD_EN +#define SMU_GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x0 +#define SMU_GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_PINSTRAPS +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x0 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x1 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x2 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x3 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x4 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x5 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x6 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x7 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x8 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x9 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0xa +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0xb +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0xc +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0xd +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0xe +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0xf +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x10 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x11 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x12 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x13 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x14 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x15 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x16 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x17 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x18 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x19 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x1a +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x1b +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x1c +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x1d +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x1e +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x00000001L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x00000002L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x00000004L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x00000008L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x00000010L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x00000020L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x00000040L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x00000080L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x00000100L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x00000200L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x00000400L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x00000800L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x00001000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x00002000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x00004000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x00008000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x00010000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x00020000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x00040000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x00080000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x00100000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x00200000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x00400000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x00800000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x01000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x02000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x04000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x08000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000L +//DFT_PINSTRAPS +#define DFT_PINSTRAPS__DFT_PINSTRAPS__SHIFT 0x0 +#define DFT_PINSTRAPS__DFT_PINSTRAPS_MASK 0x000000FFL +//SMU_GPIOPAD_INT_STAT_EN +#define SMU_GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x0 +#define SMU_GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x1f +#define SMU_GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1FFFFFFFL +#define SMU_GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000L +//SMU_GPIOPAD_INT_STAT +#define SMU_GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x1f +#define SMU_GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1FFFFFFFL +#define SMU_GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000L +//SMU_GPIOPAD_INT_STAT_AK +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x0 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x1 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x2 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x3 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x4 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x5 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x6 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x7 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x8 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x9 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0xa +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0xb +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0xc +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0xd +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0xe +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0xf +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x10 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x11 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x12 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x13 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x14 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x15 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x16 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x17 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x18 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x19 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x1a +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x1b +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x1c +#define SMU_GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x1f +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x00000001L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x00000002L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x00000004L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x00000008L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x00000010L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x00000020L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x00000040L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x00000080L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x00000100L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x00000200L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x00000400L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x00000800L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x00001000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x00002000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x00004000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x00008000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x00010000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x00020000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x00040000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x00080000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x00100000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x00200000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x00400000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x00800000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x01000000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x02000000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x04000000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x08000000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000L +#define SMU_GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000L +//SMU_GPIOPAD_INT_EN +#define SMU_GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x0 +#define SMU_GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x1f +#define SMU_GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1FFFFFFFL +#define SMU_GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000L +//SMU_GPIOPAD_INT_TYPE +#define SMU_GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x0 +#define SMU_GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x1f +#define SMU_GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1FFFFFFFL +#define SMU_GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000L +//SMU_GPIOPAD_INT_POLARITY +#define SMU_GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x0 +#define SMU_GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x1f +#define SMU_GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1FFFFFFFL +#define SMU_GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000L +//SMUIO_PCC_GPIO_SELECT +#define SMUIO_PCC_GPIO_SELECT__GPIO__SHIFT 0x0 +#define SMUIO_PCC_GPIO_SELECT__GPIO_MASK 0xFFFFFFFFL +//SMU_GPIOPAD_S0 +#define SMU_GPIOPAD_S0__GPIO_S0__SHIFT 0x0 +#define SMU_GPIOPAD_S0__GPIO_S0_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_S1 +#define SMU_GPIOPAD_S1__GPIO_S1__SHIFT 0x0 +#define SMU_GPIOPAD_S1__GPIO_S1_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_SCHMEN +#define SMU_GPIOPAD_SCHMEN__GPIO_SCHMEN__SHIFT 0x0 +#define SMU_GPIOPAD_SCHMEN__GPIO_SCHMEN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_SCL_EN +#define SMU_GPIOPAD_SCL_EN__GPIO_SCL_EN__SHIFT 0x0 +#define SMU_GPIOPAD_SCL_EN__GPIO_SCL_EN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_SDA_EN +#define SMU_GPIOPAD_SDA_EN__GPIO_SDA_EN__SHIFT 0x0 +#define SMU_GPIOPAD_SDA_EN__GPIO_SDA_EN_MASK 0x7FFFFFFFL +//SMUIO_GPIO_INT0_SELECT +#define SMUIO_GPIO_INT0_SELECT__GPIO_INT0_SELECT__SHIFT 0x0 +#define SMUIO_GPIO_INT0_SELECT__GPIO_INT0_SELECT_MASK 0xFFFFFFFFL +//SMUIO_GPIO_INT1_SELECT +#define SMUIO_GPIO_INT1_SELECT__GPIO_INT1_SELECT__SHIFT 0x0 +#define SMUIO_GPIO_INT1_SELECT__GPIO_INT1_SELECT_MASK 0xFFFFFFFFL +//SMUIO_GPIO_INT2_SELECT +#define SMUIO_GPIO_INT2_SELECT__GPIO_INT2_SELECT__SHIFT 0x0 +#define SMUIO_GPIO_INT2_SELECT__GPIO_INT2_SELECT_MASK 0xFFFFFFFFL +//SMUIO_GPIO_INT3_SELECT +#define SMUIO_GPIO_INT3_SELECT__GPIO_INT3_SELECT__SHIFT 0x0 +#define SMUIO_GPIO_INT3_SELECT__GPIO_INT3_SELECT_MASK 0xFFFFFFFFL +//SMU_GPIOPAD_MP_INT0_STAT +#define SMU_GPIOPAD_MP_INT0_STAT__GPIO_MP_INT0_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_MP_INT0_STAT__GPIO_MP_INT0_STAT_MASK 0x1FFFFFFFL +//SMU_GPIOPAD_MP_INT1_STAT +#define SMU_GPIOPAD_MP_INT1_STAT__GPIO_MP_INT1_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_MP_INT1_STAT__GPIO_MP_INT1_STAT_MASK 0x1FFFFFFFL +//SMU_GPIOPAD_MP_INT2_STAT +#define SMU_GPIOPAD_MP_INT2_STAT__GPIO_MP_INT2_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_MP_INT2_STAT__GPIO_MP_INT2_STAT_MASK 0x1FFFFFFFL +//SMU_GPIOPAD_MP_INT3_STAT +#define SMU_GPIOPAD_MP_INT3_STAT__GPIO_MP_INT3_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_MP_INT3_STAT__GPIO_MP_INT3_STAT_MASK 0x1FFFFFFFL +//SMIO_INDEX +#define SMIO_INDEX__SW_SMIO_INDEX__SHIFT 0x0 +#define SMIO_INDEX__SW_SMIO_INDEX_MASK 0x00000001L +//S0_VID_SMIO_CNTL +#define S0_VID_SMIO_CNTL__S0_SMIO_VALUES__SHIFT 0x0 +#define S0_VID_SMIO_CNTL__S0_SMIO_VALUES_MASK 0xFFFFFFFFL +//S1_VID_SMIO_CNTL +#define S1_VID_SMIO_CNTL__S1_SMIO_VALUES__SHIFT 0x0 +#define S1_VID_SMIO_CNTL__S1_SMIO_VALUES_MASK 0xFFFFFFFFL +//OPEN_DRAIN_SELECT +#define OPEN_DRAIN_SELECT__OPEN_DRAIN_SELECT__SHIFT 0x0 +#define OPEN_DRAIN_SELECT__RESERVED__SHIFT 0x1f +#define OPEN_DRAIN_SELECT__OPEN_DRAIN_SELECT_MASK 0x7FFFFFFFL +#define OPEN_DRAIN_SELECT__RESERVED_MASK 0x80000000L +//SMIO_ENABLE +#define SMIO_ENABLE__SMIO_ENABLE__SHIFT 0x0 +#define SMIO_ENABLE__SMIO_ENABLE_MASK 0xFFFFFFFFL + +#endif diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 32054ecf0b..805c9d37a2 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -150,6 +150,7 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_VCN_POWER_STATE, AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK, AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK, + AMDGPU_PP_SENSOR_VCN_LOAD, }; enum amd_pp_task { @@ -420,7 +421,7 @@ struct amd_pm_funcs { int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock); int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock); int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock); - bool (*get_asic_baco_capability)(void *handle); + int (*get_asic_baco_capability)(void *handle); int (*get_asic_baco_state)(void *handle, int *state); int (*set_asic_baco_state)(void *handle, int state); int (*get_ppfeature_status)(void *handle, char *buf); diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index ec5b9ab67c..b72d5d3622 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -61,6 +61,7 @@ enum MES_SCH_API_OPCODE { MES_SCH_API_MISC = 14, MES_SCH_API_UPDATE_ROOT_PAGE_TABLE = 15, MES_SCH_API_AMD_LOG = 16, + MES_SCH_API_SET_HW_RSRC_1 = 19, MES_SCH_API_MAX = 0xFF }; @@ -238,6 +239,26 @@ union MESAPI_SET_HW_RESOURCES { uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; }; +union MESAPI_SET_HW_RESOURCES_1 { + struct { + union MES_API_HEADER header; + struct MES_API_STATUS api_status; + uint64_t timestamp; + union { + struct { + uint32_t enable_mes_info_ctx : 1; + uint32_t reserved : 31; + }; + uint32_t uint32_all; + }; + uint64_t mes_info_ctx_mc_addr; + uint32_t mes_info_ctx_size; + uint32_t mes_kiq_unmap_timeout; // unit is 100ms + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + union MESAPI__ADD_QUEUE { struct { union MES_API_HEADER header; @@ -278,10 +299,21 @@ union MESAPI__ADD_QUEUE { uint32_t skip_process_ctx_clear : 1; uint32_t map_legacy_kq : 1; uint32_t exclusively_scheduled : 1; - uint32_t reserved : 17; + uint32_t is_long_running : 1; + uint32_t is_dwm_queue : 1; + uint32_t is_video_blit_queue : 1; + uint32_t reserved : 14; }; - struct MES_API_STATUS api_status; - uint64_t tma_addr; + struct MES_API_STATUS api_status; + uint64_t tma_addr; + uint32_t sch_id; + uint64_t timestamp; + uint32_t process_context_array_index; + uint32_t gang_context_array_index; + uint32_t pipe_id; + uint32_t queue_id; + uint32_t alignment_mode_setting; + uint64_t unmap_flag_addr; }; uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index f84bfed506..eee919577b 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -199,14 +199,14 @@ int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en) return ret; } -bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) +int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; - bool ret; + int ret; if (!pp_funcs || !pp_funcs->get_asic_baco_capability) - return false; + return 0; /* Don't use baco for reset in S3. * This is a workaround for some platforms * where entering BACO during suspend @@ -217,7 +217,7 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) * devices. Needs more investigation. */ if (adev->in_s3) - return false; + return 0; mutex_lock(&adev->pm.mutex); diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index bbd0169010..c11952a438 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -38,6 +38,8 @@ #define MAX_NUM_OF_FEATURES_PER_SUBSET 8 #define MAX_NUM_OF_SUBSETS 8 +#define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name) + struct od_attribute { struct kobj_attribute attribute; struct list_head entry; @@ -1581,6 +1583,30 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, return sysfs_emit(buf, "%d\n", value); } +/** + * DOC: vcn_busy_percent + * + * The amdgpu driver provides a sysfs API for reading how busy the VCN + * is as a percentage. The file vcn_busy_percent is used for this. + * The SMU firmware computes a percentage of load based on the + * aggregate activity level in the IP cores. + */ +static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + unsigned int value; + int r; + + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value); + if (r) + return r; + + return sysfs_emit(buf, "%d\n", value); +} + /** * DOC: pcie_bw * @@ -2091,6 +2117,99 @@ static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_ return 0; } +static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, + uint32_t mask, enum amdgpu_device_attr_states *states) +{ + struct device_attribute *dev_attr = &attr->dev_attr; + enum amdgpu_device_attr_id attr_id = attr->attr_id; + uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0); + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); + + *states = ATTR_STATE_SUPPORTED; + + if (!(attr->flags & mask)) { + *states = ATTR_STATE_UNSUPPORTED; + return 0; + } + + if (DEVICE_ATTR_IS(pp_dpm_socclk)) { + if (gc_ver < IP_VERSION(9, 0, 0)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { + if (mp1_ver < IP_VERSION(10, 0, 0)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { + if (!(gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 3) || + gc_ver == IP_VERSION(10, 3, 6) || + gc_ver == IP_VERSION(10, 3, 7) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(10, 1, 2) || + gc_ver == IP_VERSION(11, 0, 0) || + gc_ver == IP_VERSION(11, 0, 1) || + gc_ver == IP_VERSION(11, 0, 4) || + gc_ver == IP_VERSION(11, 5, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3) || + gc_ver == IP_VERSION(9, 4, 3))) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) { + if (!((gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { + if (!(gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 3) || + gc_ver == IP_VERSION(10, 3, 6) || + gc_ver == IP_VERSION(10, 3, 7) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(10, 1, 2) || + gc_ver == IP_VERSION(11, 0, 0) || + gc_ver == IP_VERSION(11, 0, 1) || + gc_ver == IP_VERSION(11, 0, 4) || + gc_ver == IP_VERSION(11, 5, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3) || + gc_ver == IP_VERSION(9, 4, 3))) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) { + if (!((gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { + if (gc_ver == IP_VERSION(9, 4, 2) || + gc_ver == IP_VERSION(9, 4, 3)) + *states = ATTR_STATE_UNSUPPORTED; + } + + switch (gc_ver) { + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 4, 2): + /* the Mi series card does not support standalone mclk/socclk/fclk level setting */ + if (DEVICE_ATTR_IS(pp_dpm_mclk) || + DEVICE_ATTR_IS(pp_dpm_socclk) || + DEVICE_ATTR_IS(pp_dpm_fclk)) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + break; + default: + break; + } + + /* setting should not be allowed from VF if not in one VF mode */ + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + + return 0; +} + /* Following items will be read out to indicate current plpd policy: * - -1: none * - 0: disallow @@ -2162,17 +2281,26 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, .attr_update = pp_dpm_dcefclk_attr_update), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), @@ -2180,6 +2308,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { .attr_update = pp_od_clk_voltage_attr_update), AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), @@ -2201,28 +2330,28 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ uint32_t mask, enum amdgpu_device_attr_states *states) { struct device_attribute *dev_attr = &attr->dev_attr; - uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0); + enum amdgpu_device_attr_id attr_id = attr->attr_id; uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); - const char *attr_name = dev_attr->attr.name; if (!(attr->flags & mask)) { *states = ATTR_STATE_UNSUPPORTED; return 0; } -#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name)) - - if (DEVICE_ATTR_IS(pp_dpm_socclk)) { - if (gc_ver < IP_VERSION(9, 0, 0)) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { - if (mp1_ver < IP_VERSION(10, 0, 0)) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(mem_busy_percent)) { + if (DEVICE_ATTR_IS(mem_busy_percent)) { if ((adev->flags & AMD_IS_APU && gc_ver != IP_VERSION(9, 4, 3)) || gc_ver == IP_VERSION(9, 0, 1)) *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(vcn_busy_percent)) { + if (!(gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 3) || + gc_ver == IP_VERSION(10, 3, 6) || + gc_ver == IP_VERSION(10, 3, 7) || + gc_ver == IP_VERSION(11, 0, 1) || + gc_ver == IP_VERSION(11, 0, 4) || + gc_ver == IP_VERSION(11, 5, 0))) + *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pcie_bw)) { /* PCIe Perf counters won't work on APU nodes */ if (adev->flags & AMD_IS_APU || @@ -2253,36 +2382,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } else if (DEVICE_ATTR_IS(gpu_metrics)) { if (gc_ver < IP_VERSION(9, 1, 0)) *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { - if (!(gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(10, 1, 2) || - gc_ver == IP_VERSION(11, 0, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3) || - gc_ver == IP_VERSION(9, 4, 3))) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) { - if (!((gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { - if (!(gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(10, 1, 2) || - gc_ver == IP_VERSION(11, 0, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3) || - gc_ver == IP_VERSION(9, 4, 3))) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) { - if (!((gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) - *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) { if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; @@ -2304,23 +2403,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { - if (gc_ver == IP_VERSION(9, 4, 2) || - gc_ver == IP_VERSION(9, 4, 3)) - *states = ATTR_STATE_UNSUPPORTED; } switch (gc_ver) { - case IP_VERSION(9, 4, 1): - case IP_VERSION(9, 4, 2): - /* the Mi series card does not support standalone mclk/socclk/fclk level setting */ - if (DEVICE_ATTR_IS(pp_dpm_mclk) || - DEVICE_ATTR_IS(pp_dpm_socclk) || - DEVICE_ATTR_IS(pp_dpm_fclk)) { - dev_attr->attr.mode &= ~S_IWUGO; - dev_attr->store = NULL; - } - break; case IP_VERSION(10, 3, 0): if (DEVICE_ATTR_IS(power_dpm_force_performance_level) && amdgpu_sriov_vf(adev)) { @@ -2332,14 +2417,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ break; } - /* setting should not be allowed from VF if not in one VF mode */ - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { - dev_attr->attr.mode &= ~S_IWUGO; - dev_attr->store = NULL; - } - -#undef DEVICE_ATTR_IS - return 0; } @@ -4329,6 +4406,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) ret = amdgpu_od_set_init(adev); if (ret) goto err_out1; + } else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) { + dev_info(adev->dev, "overdrive feature is not supported\n"); } adev->pm.sysfs_initialized = true; @@ -4436,6 +4515,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a /* MEM Load */ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size)) seq_printf(m, "MEM Load: %u %%\n", value); + /* VCN Load */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size)) + seq_printf(m, "VCN Load: %u %%\n", value); seq_printf(m, "\n"); diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 621200e082..501f8c726e 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -50,8 +50,12 @@ enum amdgpu_runpm_mode { AMDGPU_RUNPM_PX, AMDGPU_RUNPM_BOCO, AMDGPU_RUNPM_BACO, + AMDGPU_RUNPM_BAMACO, }; +#define BACO_SUPPORT (1<<0) +#define MACO_SUPPORT (1<<1) + struct amdgpu_ps { u32 caps; /* vbios flags */ u32 class; /* vbios flags */ @@ -407,7 +411,7 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev); int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev); int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev); -bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev); +int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev); bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev); int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h index eec816f0cb..448ba3a145 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h @@ -43,8 +43,48 @@ enum amdgpu_device_attr_states { ATTR_STATE_SUPPORTED, }; +enum amdgpu_device_attr_id { + device_attr_id__unknown = -1, + device_attr_id__power_dpm_state = 0, + device_attr_id__power_dpm_force_performance_level, + device_attr_id__pp_num_states, + device_attr_id__pp_cur_state, + device_attr_id__pp_force_state, + device_attr_id__pp_table, + device_attr_id__pp_dpm_sclk, + device_attr_id__pp_dpm_mclk, + device_attr_id__pp_dpm_socclk, + device_attr_id__pp_dpm_fclk, + device_attr_id__pp_dpm_vclk, + device_attr_id__pp_dpm_vclk1, + device_attr_id__pp_dpm_dclk, + device_attr_id__pp_dpm_dclk1, + device_attr_id__pp_dpm_dcefclk, + device_attr_id__pp_dpm_pcie, + device_attr_id__pp_sclk_od, + device_attr_id__pp_mclk_od, + device_attr_id__pp_power_profile_mode, + device_attr_id__pp_od_clk_voltage, + device_attr_id__gpu_busy_percent, + device_attr_id__mem_busy_percent, + device_attr_id__vcn_busy_percent, + device_attr_id__pcie_bw, + device_attr_id__pp_features, + device_attr_id__unique_id, + device_attr_id__thermal_throttling_logging, + device_attr_id__apu_thermal_cap, + device_attr_id__gpu_metrics, + device_attr_id__smartshift_apu_power, + device_attr_id__smartshift_dgpu_power, + device_attr_id__smartshift_bias, + device_attr_id__xgmi_plpd_policy, + device_attr_id__pm_metrics, + device_attr_id__count, +}; + struct amdgpu_device_attr { struct device_attribute dev_attr; + enum amdgpu_device_attr_id attr_id; enum amdgpu_device_attr_flags flags; int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, uint32_t mask, enum amdgpu_device_attr_states *states); @@ -61,6 +101,7 @@ struct amdgpu_device_attr_entry { #define __AMDGPU_DEVICE_ATTR(_name, _mode, _show, _store, _flags, ...) \ { .dev_attr = __ATTR(_name, _mode, _show, _store), \ + .attr_id = device_attr_id__##_name, \ .flags = _flags, \ ##__VA_ARGS__, } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index c8586cb7d0..e8b6989a40 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -3318,6 +3318,8 @@ static const struct amd_ip_funcs kv_dpm_ip_funcs = { .soft_reset = kv_dpm_soft_reset, .set_clockgating_state = kv_dpm_set_clockgating_state, .set_powergating_state = kv_dpm_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version kv_smu_ip_block = { diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index eb4da3666e..f245fc0bc6 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -8060,6 +8060,8 @@ static const struct amd_ip_funcs si_dpm_ip_funcs = { .soft_reset = si_dpm_soft_reset, .set_clockgating_state = si_dpm_set_clockgating_state, .set_powergating_state = si_dpm_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version si_smu_ip_block = diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index aed0e2cefb..5fb21a0508 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -302,6 +302,8 @@ static const struct amd_ip_funcs pp_ip_funcs = { .soft_reset = pp_sw_reset, .set_clockgating_state = pp_set_clockgating_state, .set_powergating_state = pp_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version pp_smu_ip_block = @@ -1371,7 +1373,7 @@ static int pp_set_active_display_count(void *handle, uint32_t count) return phm_set_active_display_count(hwmgr, count); } -static bool pp_get_asic_baco_capability(void *handle) +static int pp_get_asic_baco_capability(void *handle) { struct pp_hwmgr *hwmgr = handle; @@ -1379,10 +1381,10 @@ static bool pp_get_asic_baco_capability(void *handle) return false; if (!(hwmgr->not_vf && amdgpu_dpm) || - !hwmgr->hwmgr_func->get_asic_baco_capability) + !hwmgr->hwmgr_func->get_bamaco_support) return false; - return hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr); + return hwmgr->hwmgr_func->get_bamaco_support(hwmgr); } static int pp_get_asic_baco_state(void *handle, int *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c index e8a9471c18..ad60918aaa 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c @@ -33,7 +33,7 @@ #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" -bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr) +int smu7_get_bamaco_support(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg; @@ -44,9 +44,9 @@ bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr) reg = RREG32(mmCC_BIF_BX_FUSESTRAP0); if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK) - return true; + return BACO_SUPPORT; - return false; + return 0; } int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h index 73a773f4ce..750082ea74 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr); +extern int smu7_get_bamaco_support(struct pp_hwmgr *hwmgr); extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index aa91730e4e..1fcd445100 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -5791,7 +5791,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .get_power_profile_mode = smu7_get_power_profile_mode, .set_power_profile_mode = smu7_set_power_profile_mode, .get_performance_level = smu7_get_performance_level, - .get_asic_baco_capability = smu7_baco_get_capability, + .get_bamaco_support = smu7_get_bamaco_support, .get_asic_baco_state = smu7_baco_get_state, .set_asic_baco_state = smu7_baco_set_state, .power_off_asic = smu7_power_off_asic, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c index c66ef97415..c1ce1d7cae 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c @@ -28,13 +28,13 @@ #include "vega10_inc.h" #include "smu9_baco.h" -bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr) +int smu9_get_bamaco_support(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg, data; if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) - return false; + return 0; WREG32(0x12074, 0xFFF0003B); data = RREG32(0x12075); @@ -43,10 +43,10 @@ bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr) reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0); if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) - return true; + return BACO_SUPPORT; } - return false; + return 0; } int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h index 9ff7c2ea1b..2c10048208 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr); +extern int smu9_get_bamaco_support(struct pp_hwmgr *hwmgr); extern int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); #endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index 6d6bc6a380..9f5bd998c6 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -5756,7 +5756,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .set_power_limit = vega10_set_power_limit, .odn_edit_dpm_table = vega10_odn_edit_dpm_table, .get_performance_level = vega10_get_performance_level, - .get_asic_baco_capability = smu9_baco_get_capability, + .get_bamaco_support = smu9_get_bamaco_support, .get_asic_baco_state = smu9_baco_get_state, .set_asic_baco_state = vega10_baco_set_state, .enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index 460067933d..c223e3a6bf 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -2966,7 +2966,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .start_thermal_controller = vega12_start_thermal_controller, .powergate_gfx = vega12_gfx_off_control, .get_performance_level = vega12_get_performance_level, - .get_asic_baco_capability = smu9_baco_get_capability, + .get_bamaco_support = smu9_get_bamaco_support, .get_asic_baco_state = smu9_baco_get_state, .set_asic_baco_state = vega12_baco_set_state, .get_ppfeature_status = vega12_get_ppfeature_status, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c index dad4c80aee..424e4ec9e3 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c @@ -36,22 +36,22 @@ static const struct soc15_baco_cmd_entry clean_baco_tbl[] = { {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0}, }; -bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr) +int vega20_get_bamaco_support(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg; if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) - return false; + return 0; if (((RREG32(0x17569) & 0x20000000) >> 29) == 0x1) { reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0); if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) - return true; + return BACO_SUPPORT; } - return false; + return 0; } int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h index bdad9c9156..0f2dd8c008 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr); +extern int vega20_get_bamaco_support(struct pp_hwmgr *hwmgr); extern int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); extern int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); extern int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 3b33af30eb..f9efb0bad8 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -4422,7 +4422,7 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { .notify_cac_buffer_info = vega20_notify_cac_buffer_info, .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost, /* BACO related */ - .get_asic_baco_capability = vega20_baco_get_capability, + .get_bamaco_support = vega20_get_bamaco_support, .get_asic_baco_state = vega20_baco_get_state, .set_asic_baco_state = vega20_baco_set_state, .set_mp1_state = vega20_set_mp1_state, diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h index 6f536159df..69928a4a07 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -351,7 +351,7 @@ struct pp_hwmgr_func { int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); int (*set_hard_min_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); int (*set_soft_max_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); - bool (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr); + int (*get_bamaco_support)(struct pp_hwmgr *hwmgr); int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state); int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 5a22470182..e1796ecf9c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -45,6 +45,7 @@ #include "smu_v13_0_6_ppt.h" #include "smu_v13_0_7_ppt.h" #include "smu_v14_0_0_ppt.h" +#include "smu_v14_0_2_ppt.h" #include "amd_pcie.h" /* @@ -727,6 +728,10 @@ static int smu_set_funcs(struct amdgpu_device *adev) case IP_VERSION(14, 0, 1): smu_v14_0_0_set_ppt_funcs(smu); break; + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): + smu_v14_0_2_set_ppt_funcs(smu); + break; default: return -EINVAL; } @@ -749,6 +754,7 @@ static int smu_early_init(void *handle) smu->is_apu = false; smu->smu_baco.state = SMU_BACO_STATE_NONE; smu->smu_baco.platform_support = false; + smu->smu_baco.maco_support = false; smu->user_dpm_profile.fan_mode = -1; mutex_init(&smu->message_lock); @@ -3236,17 +3242,17 @@ static int smu_set_xgmi_pstate(void *handle, return ret; } -static bool smu_get_baco_capability(void *handle) +static int smu_get_baco_capability(void *handle) { struct smu_context *smu = handle; if (!smu->pm_enabled) return false; - if (!smu->ppt_funcs || !smu->ppt_funcs->baco_is_support) + if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support) return false; - return smu->ppt_funcs->baco_is_support(smu); + return smu->ppt_funcs->get_bamaco_support(smu); } static int smu_baco_set_state(void *handle, int state) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 8667e8c9d7..64ccdb5f14 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -459,7 +459,7 @@ struct smu_umd_pstate_table { struct cmn2asic_msg_mapping { int valid_mapping; int map_to; - int valid_in_vf; + uint32_t flags; }; struct cmn2asic_mapping { @@ -539,6 +539,7 @@ struct smu_context { uint32_t smc_driver_if_version; uint32_t smc_fw_if_version; uint32_t smc_fw_version; + uint32_t smc_fw_caps; bool uploading_custom_pp_table; bool dc_controlled_by_gpio; @@ -1174,9 +1175,11 @@ struct pptable_funcs { int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); /** - * @baco_is_support: Check if GPU supports BACO (Bus Active, Chip Off). + * @get_bamaco_support: Check if GPU supports BACO/MACO + * BACO: Bus Active, Chip Off + * MACO: Memory Active, Chip Off */ - bool (*baco_is_support)(struct smu_context *smu); + int (*get_bamaco_support)(struct smu_context *smu); /** * @baco_get_state: Get the current BACO state. @@ -1488,8 +1491,8 @@ enum smu_baco_seq { BACO_SEQ_COUNT, }; -#define MSG_MAP(msg, index, valid_in_vf) \ - [SMU_MSG_##msg] = {1, (index), (valid_in_vf)} +#define MSG_MAP(msg, index, flags) \ + [SMU_MSG_##msg] = {1, (index), (flags)} #define CLK_MAP(clk, index) \ [SMU_##clk] = {1, (index)} diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h new file mode 100644 index 0000000000..97a29b80fb --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h @@ -0,0 +1,1836 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU14_DRIVER_IF_V14_0_H +#define SMU14_DRIVER_IF_V14_0_H + +//Increment this version if SkuTable_t or BoardTable_t change +#define PPTABLE_VERSION 0x18 + +#define NUM_GFXCLK_DPM_LEVELS 16 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_MP0CLK_DPM_LEVELS 2 +#define NUM_DCLK_DPM_LEVELS 8 +#define NUM_VCLK_DPM_LEVELS 8 +#define NUM_DISPCLK_DPM_LEVELS 8 +#define NUM_DPPCLK_DPM_LEVELS 8 +#define NUM_DPREFCLK_DPM_LEVELS 8 +#define NUM_DCFCLK_DPM_LEVELS 8 +#define NUM_DTBCLK_DPM_LEVELS 8 +#define NUM_UCLK_DPM_LEVELS 6 +#define NUM_LINK_LEVELS 3 +#define NUM_FCLK_DPM_LEVELS 8 +#define NUM_OD_FAN_MAX_POINTS 6 + +// Feature Control Defines +#define FEATURE_FW_DATA_READ_BIT 0 +#define FEATURE_DPM_GFXCLK_BIT 1 +#define FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT 2 +#define FEATURE_DPM_UCLK_BIT 3 +#define FEATURE_DPM_FCLK_BIT 4 +#define FEATURE_DPM_SOCCLK_BIT 5 +#define FEATURE_DPM_LINK_BIT 6 +#define FEATURE_DPM_DCN_BIT 7 +#define FEATURE_VMEMP_SCALING_BIT 8 +#define FEATURE_VDDIO_MEM_SCALING_BIT 9 +#define FEATURE_DS_GFXCLK_BIT 10 +#define FEATURE_DS_SOCCLK_BIT 11 +#define FEATURE_DS_FCLK_BIT 12 +#define FEATURE_DS_LCLK_BIT 13 +#define FEATURE_DS_DCFCLK_BIT 14 +#define FEATURE_DS_UCLK_BIT 15 +#define FEATURE_GFX_ULV_BIT 16 +#define FEATURE_FW_DSTATE_BIT 17 +#define FEATURE_GFXOFF_BIT 18 +#define FEATURE_BACO_BIT 19 +#define FEATURE_MM_DPM_BIT 20 +#define FEATURE_SOC_MPCLK_DS_BIT 21 +#define FEATURE_BACO_MPCLK_DS_BIT 22 +#define FEATURE_THROTTLERS_BIT 23 +#define FEATURE_SMARTSHIFT_BIT 24 +#define FEATURE_GTHR_BIT 25 +#define FEATURE_ACDC_BIT 26 +#define FEATURE_VR0HOT_BIT 27 +#define FEATURE_FW_CTF_BIT 28 +#define FEATURE_FAN_CONTROL_BIT 29 +#define FEATURE_GFX_DCS_BIT 30 +#define FEATURE_GFX_READ_MARGIN_BIT 31 +#define FEATURE_LED_DISPLAY_BIT 32 +#define FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT 33 +#define FEATURE_OUT_OF_BAND_MONITOR_BIT 34 +#define FEATURE_OPTIMIZED_VMIN_BIT 35 +#define FEATURE_GFX_IMU_BIT 36 +#define FEATURE_BOOT_TIME_CAL_BIT 37 +#define FEATURE_GFX_PCC_DFLL_BIT 38 +#define FEATURE_SOC_CG_BIT 39 +#define FEATURE_DF_CSTATE_BIT 40 +#define FEATURE_GFX_EDC_BIT 41 +#define FEATURE_BOOT_POWER_OPT_BIT 42 +#define FEATURE_CLOCK_POWER_DOWN_BYPASS_BIT 43 +#define FEATURE_DS_VCN_BIT 44 +#define FEATURE_BACO_CG_BIT 45 +#define FEATURE_MEM_TEMP_READ_BIT 46 +#define FEATURE_ATHUB_MMHUB_PG_BIT 47 +#define FEATURE_SOC_PCC_BIT 48 +#define FEATURE_EDC_PWRBRK_BIT 49 +#define FEATURE_SOC_EDC_XVMIN_BIT 50 +#define FEATURE_GFX_PSM_DIDT_BIT 51 +#define FEATURE_APT_ALL_ENABLE_BIT 52 +#define FEATURE_APT_SQ_THROTTLE_BIT 53 +#define FEATURE_APT_PF_DCS_BIT 54 +#define FEATURE_GFX_EDC_XVMIN_BIT 55 +#define FEATURE_GFX_DIDT_XVMIN_BIT 56 +#define FEATURE_FAN_ABNORMAL_BIT 57 +#define FEATURE_CLOCK_STRETCH_COMPENSATOR 58 +#define FEATURE_SPARE_59_BIT 59 +#define FEATURE_SPARE_60_BIT 60 +#define FEATURE_SPARE_61_BIT 61 +#define FEATURE_SPARE_62_BIT 62 +#define FEATURE_SPARE_63_BIT 63 +#define NUM_FEATURES 64 + +#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL +#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \ + (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \ + (1 << FEATURE_DPM_UCLK_BIT) | \ + (1 << FEATURE_DPM_FCLK_BIT) | \ + (1 << FEATURE_DPM_SOCCLK_BIT) | \ + (1 << FEATURE_DPM_LINK_BIT) | \ + (1 << FEATURE_DPM_DCN_BIT) | \ + (1 << FEATURE_DS_GFXCLK_BIT) | \ + (1 << FEATURE_DS_SOCCLK_BIT) | \ + (1 << FEATURE_DS_FCLK_BIT) | \ + (1 << FEATURE_DS_LCLK_BIT) | \ + (1 << FEATURE_DS_DCFCLK_BIT) | \ + (1 << FEATURE_DS_UCLK_BIT) | \ + (1ULL << FEATURE_DS_VCN_BIT) + + +//For use with feature control messages +typedef enum { + FEATURE_PWR_ALL, + FEATURE_PWR_S5, + FEATURE_PWR_BACO, + FEATURE_PWR_SOC, + FEATURE_PWR_GFX, + FEATURE_PWR_DOMAIN_COUNT, +} FEATURE_PWR_DOMAIN_e; + +//For use with feature control + BTC save restore +typedef enum { + FEATURE_BTC_NOP, + FEATURE_BTC_SAVE, + FEATURE_BTC_RESTORE, + FEATURE_BTC_COUNT, +} FEATURE_BTC_e; + +// Debug Overrides Bitmask +#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001 +#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002 +#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004 +#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008 +#define DEBUG_OVERRIDE_DISABLE_FAST_FCLK_TIMER 0x00000010 +#define DEBUG_OVERRIDE_DISABLE_VCN_PG 0x00000020 +#define DEBUG_OVERRIDE_DISABLE_FMAX_VMAX 0x00000040 +#define DEBUG_OVERRIDE_DISABLE_IMU_FW_CHECKS 0x00000080 +#define DEBUG_OVERRIDE_DISABLE_D0i2_REENTRY_HSR_TIMER_CHECK 0x00000100 +#define DEBUG_OVERRIDE_DISABLE_DFLL 0x00000200 +#define DEBUG_OVERRIDE_ENABLE_RLC_VF_BRINGUP_MODE 0x00000400 +#define DEBUG_OVERRIDE_DFLL_MASTER_MODE 0x00000800 +#define DEBUG_OVERRIDE_ENABLE_PROFILING_MODE 0x00001000 +#define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000 +#define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000 +#define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000 + +// VR Mapping Bit Defines +#define VR_MAPPING_VR_SELECT_MASK 0x01 +#define VR_MAPPING_VR_SELECT_SHIFT 0x00 + +#define VR_MAPPING_PLANE_SELECT_MASK 0x02 +#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 + +// PSI Bit Defines +#define PSI_SEL_VR0_PLANE0_PSI0 0x01 +#define PSI_SEL_VR0_PLANE0_PSI1 0x02 +#define PSI_SEL_VR0_PLANE1_PSI0 0x04 +#define PSI_SEL_VR0_PLANE1_PSI1 0x08 +#define PSI_SEL_VR1_PLANE0_PSI0 0x10 +#define PSI_SEL_VR1_PLANE0_PSI1 0x20 +#define PSI_SEL_VR1_PLANE1_PSI0 0x40 +#define PSI_SEL_VR1_PLANE1_PSI1 0x80 + +typedef enum { + SVI_PSI_0, // Full phase count (default) + SVI_PSI_1, // Phase count 1st level + SVI_PSI_2, // Phase count 2nd level + SVI_PSI_3, // Single phase operation + active diode emulation + SVI_PSI_4, // Single phase operation + passive diode emulation *optional* + SVI_PSI_5, // Reserved + SVI_PSI_6, // Power down to 0V (voltage regulation disabled) + SVI_PSI_7, // Automated phase shedding and diode emulation +} SVI_PSI_e; + +// Throttler Control/Status Bits +#define THROTTLER_TEMP_EDGE_BIT 0 +#define THROTTLER_TEMP_HOTSPOT_BIT 1 +#define THROTTLER_TEMP_HOTSPOT_GFX_BIT 2 +#define THROTTLER_TEMP_HOTSPOT_SOC_BIT 3 +#define THROTTLER_TEMP_MEM_BIT 4 +#define THROTTLER_TEMP_VR_GFX_BIT 5 +#define THROTTLER_TEMP_VR_SOC_BIT 6 +#define THROTTLER_TEMP_VR_MEM0_BIT 7 +#define THROTTLER_TEMP_VR_MEM1_BIT 8 +#define THROTTLER_TEMP_LIQUID0_BIT 9 +#define THROTTLER_TEMP_LIQUID1_BIT 10 +#define THROTTLER_TEMP_PLX_BIT 11 +#define THROTTLER_TDC_GFX_BIT 12 +#define THROTTLER_TDC_SOC_BIT 13 +#define THROTTLER_PPT0_BIT 14 +#define THROTTLER_PPT1_BIT 15 +#define THROTTLER_PPT2_BIT 16 +#define THROTTLER_PPT3_BIT 17 +#define THROTTLER_FIT_BIT 18 +#define THROTTLER_GFX_APCC_PLUS_BIT 19 +#define THROTTLER_GFX_DVO_BIT 20 +#define THROTTLER_COUNT 21 + +// FW DState Features Control Bits +#define FW_DSTATE_SOC_ULV_BIT 0 +#define FW_DSTATE_G6_HSR_BIT 1 +#define FW_DSTATE_G6_PHY_VMEMP_OFF_BIT 2 +#define FW_DSTATE_SMN_DS_BIT 3 +#define FW_DSTATE_MP1_WHISPER_MODE_BIT 4 +#define FW_DSTATE_SOC_LIV_MIN_BIT 5 +#define FW_DSTATE_SOC_PLL_PWRDN_BIT 6 +#define FW_DSTATE_MEM_PLL_PWRDN_BIT 7 +#define FW_DSTATE_MALL_ALLOC_BIT 8 +#define FW_DSTATE_MEM_PSI_BIT 9 +#define FW_DSTATE_HSR_NON_STROBE_BIT 10 +#define FW_DSTATE_MP0_ENTER_WFI_BIT 11 +#define FW_DSTATE_MALL_FLUSH_BIT 12 +#define FW_DSTATE_SOC_PSI_BIT 13 +#define FW_DSTATE_MMHUB_INTERLOCK_BIT 14 +#define FW_DSTATE_D0i3_2_QUIET_FW_BIT 15 +#define FW_DSTATE_CLDO_PRG_BIT 16 +#define FW_DSTATE_DF_PLL_PWRDN_BIT 17 + +//LED Display Mask & Control Bits +#define LED_DISPLAY_GFX_DPM_BIT 0 +#define LED_DISPLAY_PCIE_BIT 1 +#define LED_DISPLAY_ERROR_BIT 2 + + +#define MEM_TEMP_READ_OUT_OF_BAND_BIT 0 +#define MEM_TEMP_READ_IN_BAND_REFRESH_BIT 1 +#define MEM_TEMP_READ_IN_BAND_DUMMY_PSTATE_BIT 2 + +typedef enum { + SMARTSHIFT_VERSION_1, + SMARTSHIFT_VERSION_2, + SMARTSHIFT_VERSION_3, +} SMARTSHIFT_VERSION_e; + +typedef enum { + FOPT_CALC_AC_CALC_DC, + FOPT_PPTABLE_AC_CALC_DC, + FOPT_CALC_AC_PPTABLE_DC, + FOPT_PPTABLE_AC_PPTABLE_DC, +} FOPT_CALC_e; + +typedef enum { + DRAM_BIT_WIDTH_DISABLED = 0, + DRAM_BIT_WIDTH_X_8 = 8, + DRAM_BIT_WIDTH_X_16 = 16, + DRAM_BIT_WIDTH_X_32 = 32, + DRAM_BIT_WIDTH_X_64 = 64, + DRAM_BIT_WIDTH_X_128 = 128, + DRAM_BIT_WIDTH_COUNT, +} DRAM_BIT_WIDTH_TYPE_e; + +//I2C Interface +#define NUM_I2C_CONTROLLERS 8 + +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +#define MAX_SW_I2C_COMMANDS 24 + +typedef enum { + I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0 + I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1 + I2C_CONTROLLER_PORT_COUNT, +} I2cControllerPort_e; + +typedef enum { + I2C_CONTROLLER_NAME_VR_GFX = 0, + I2C_CONTROLLER_NAME_VR_SOC, + I2C_CONTROLLER_NAME_VR_VMEMP, + I2C_CONTROLLER_NAME_VR_VDDIO, + I2C_CONTROLLER_NAME_LIQUID0, + I2C_CONTROLLER_NAME_LIQUID1, + I2C_CONTROLLER_NAME_PLX, + I2C_CONTROLLER_NAME_FAN_INTAKE, + I2C_CONTROLLER_NAME_COUNT, +} I2cControllerName_e; + +typedef enum { + I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, + I2C_CONTROLLER_THROTTLER_VR_GFX, + I2C_CONTROLLER_THROTTLER_VR_SOC, + I2C_CONTROLLER_THROTTLER_VR_VMEMP, + I2C_CONTROLLER_THROTTLER_VR_VDDIO, + I2C_CONTROLLER_THROTTLER_LIQUID0, + I2C_CONTROLLER_THROTTLER_LIQUID1, + I2C_CONTROLLER_THROTTLER_PLX, + I2C_CONTROLLER_THROTTLER_FAN_INTAKE, + I2C_CONTROLLER_THROTTLER_INA3221, + I2C_CONTROLLER_THROTTLER_COUNT, +} I2cControllerThrottler_e; + +typedef enum { + I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5, + I2C_CONTROLLER_PROTOCOL_VR_IR35217, + I2C_CONTROLLER_PROTOCOL_TMP_MAX31875, + I2C_CONTROLLER_PROTOCOL_INA3221, + I2C_CONTROLLER_PROTOCOL_TMP_MAX6604, + I2C_CONTROLLER_PROTOCOL_COUNT, +} I2cControllerProtocol_e; + +typedef struct { + uint8_t Enabled; + uint8_t Speed; + uint8_t SlaveAddress; + uint8_t ControllerPort; + uint8_t ControllerName; + uint8_t ThermalThrotter; + uint8_t I2cProtocol; + uint8_t PaddingConfig; +} I2cControllerConfig_t; + +typedef enum { + I2C_PORT_SVD_SCL = 0, + I2C_PORT_GPIO, +} I2cPort_e; + +typedef enum { + I2C_SPEED_FAST_50K = 0, //50 Kbits/s + I2C_SPEED_FAST_100K, //100 Kbits/s + I2C_SPEED_FAST_400K, //400 Kbits/s + I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) + I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) + I2C_SPEED_HIGH_2M, //2.3 Mbits/s + I2C_SPEED_COUNT, +} I2cSpeed_e; + +typedef enum { + I2C_CMD_READ = 0, + I2C_CMD_WRITE, + I2C_CMD_COUNT, +} I2cCmdType_e; + +#define CMDCONFIG_STOP_BIT 0 +#define CMDCONFIG_RESTART_BIT 1 +#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write + +#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) +#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) +#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT) + +typedef struct { + uint8_t ReadWriteData; //Return data for read. Data to send for write + uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write +} SwI2cCmd_t; //SW I2C Command Table + +typedef struct { + uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) + uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select + uint8_t SlaveAddress; //Slave address of device + uint8_t NumCmds; //Number of commands + + SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; +} SwI2cRequest_t; // SW I2C Request Table + +typedef struct { + SwI2cRequest_t SwI2cRequest; + + uint32_t Spare[8]; + uint32_t MmHubPadding[8]; // SMU internal use +} SwI2cRequestExternal_t; + +typedef struct { + uint64_t mca_umc_status; + uint64_t mca_umc_addr; + + uint16_t ce_count_lo_chip; + uint16_t ce_count_hi_chip; + + uint32_t eccPadding; +} EccInfo_t; + +typedef struct { + EccInfo_t EccInfo[24]; +} EccInfoTable_t; + +//D3HOT sequences +typedef enum { + BACO_SEQUENCE, + MSR_SEQUENCE, + BAMACO_SEQUENCE, + ULPS_SEQUENCE, + D3HOT_SEQUENCE_COUNT, +} D3HOTSequence_e; + +//This is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_DYNAMIC_MODE = 0, + PG_STATIC_MODE, +} PowerGatingMode_e; + +//This is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_POWER_DOWN = 0, + PG_POWER_UP, +} PowerGatingSettings_e; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} QuadraticInt_t; + +typedef struct { + uint32_t m; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable +} LinearInt_t; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} DroopInt_t; + +typedef enum { + DCS_ARCH_DISABLED, + DCS_ARCH_FADCS, + DCS_ARCH_ASYNC, +} DCS_ARCH_e; + +//Only Clks that have DPM descriptors are listed here +typedef enum { + PPCLK_GFXCLK = 0, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_FCLK, + PPCLK_DCLK_0, + PPCLK_VCLK_0, + PPCLK_DISPCLK, + PPCLK_DPPCLK, + PPCLK_DPREFCLK, + PPCLK_DCFCLK, + PPCLK_DTBCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef enum { + VOLTAGE_MODE_PPTABLE = 0, + VOLTAGE_MODE_FUSES, + VOLTAGE_MODE_COUNT, +} VOLTAGE_MODE_e; + +typedef enum { + AVFS_VOLTAGE_GFX = 0, + AVFS_VOLTAGE_SOC, + AVFS_VOLTAGE_COUNT, +} AVFS_VOLTAGE_TYPE_e; + +typedef enum { + AVFS_TEMP_COLD = 0, + AVFS_TEMP_HOT, + AVFS_TEMP_COUNT, +} AVFS_TEMP_e; + +typedef enum { + AVFS_D_G, + AVFS_D_COUNT, +} AVFS_D_e; + + +typedef enum { + UCLK_DIV_BY_1 = 0, + UCLK_DIV_BY_2, + UCLK_DIV_BY_4, + UCLK_DIV_BY_8, +} UCLK_DIV_e; + +typedef enum { + GPIO_INT_POLARITY_ACTIVE_LOW = 0, + GPIO_INT_POLARITY_ACTIVE_HIGH, +} GpioIntPolarity_e; + +typedef enum { + PWR_CONFIG_TDP = 0, + PWR_CONFIG_TGP, + PWR_CONFIG_TCP_ESTIMATED, + PWR_CONFIG_TCP_MEASURED, + PWR_CONFIG_TBP_DESKTOP, + PWR_CONFIG_TBP_MOBILE, +} PwrConfig_e; + +typedef struct { + uint8_t Padding; + uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM + uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used + uint8_t CalculateFopt; // Indication whether FW should calculate Fopt or use values below. Reference FOPT_CALC_e + LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) + uint32_t Padding3[3]; + uint16_t Padding4; + uint16_t FoptimalDc; //Foptimal frequency in DC power mode. + uint16_t FoptimalAc; //Foptimal frequency in AC power mode. + uint16_t Padding2; +} DpmDescriptor_t; + +typedef enum { + PPT_THROTTLER_PPT0, + PPT_THROTTLER_PPT1, + PPT_THROTTLER_PPT2, + PPT_THROTTLER_PPT3, + PPT_THROTTLER_COUNT +} PPT_THROTTLER_e; + +typedef enum { + TEMP_EDGE, + TEMP_HOTSPOT, + TEMP_HOTSPOT_GFX, + TEMP_HOTSPOT_SOC, + TEMP_MEM, + TEMP_VR_GFX, + TEMP_VR_SOC, + TEMP_VR_MEM0, + TEMP_VR_MEM1, + TEMP_LIQUID0, + TEMP_LIQUID1, + TEMP_PLX, + TEMP_COUNT, +} TEMP_e; + +typedef enum { + TDC_THROTTLER_GFX, + TDC_THROTTLER_SOC, + TDC_THROTTLER_COUNT +} TDC_THROTTLER_e; + +typedef enum { + SVI_PLANE_VDD_GFX, + SVI_PLANE_VDD_SOC, + SVI_PLANE_VDDCI_MEM, + SVI_PLANE_VDDIO_MEM, + SVI_PLANE_COUNT, +} SVI_PLANE_e; + +typedef enum { + PMFW_VOLT_PLANE_GFX, + PMFW_VOLT_PLANE_SOC, + PMFW_VOLT_PLANE_COUNT +} PMFW_VOLT_PLANE_e; + +typedef enum { + CUSTOMER_VARIANT_ROW, + CUSTOMER_VARIANT_FALCON, + CUSTOMER_VARIANT_COUNT, +} CUSTOMER_VARIANT_e; + +typedef enum { + POWER_SOURCE_AC, + POWER_SOURCE_DC, + POWER_SOURCE_COUNT, +} POWER_SOURCE_e; + +typedef enum { + MEM_VENDOR_PLACEHOLDER0, // 0 + MEM_VENDOR_SAMSUNG, // 1 + MEM_VENDOR_INFINEON, // 2 + MEM_VENDOR_ELPIDA, // 3 + MEM_VENDOR_ETRON, // 4 + MEM_VENDOR_NANYA, // 5 + MEM_VENDOR_HYNIX, // 6 + MEM_VENDOR_MOSEL, // 7 + MEM_VENDOR_WINBOND, // 8 + MEM_VENDOR_ESMT, // 9 + MEM_VENDOR_PLACEHOLDER1, // 10 + MEM_VENDOR_PLACEHOLDER2, // 11 + MEM_VENDOR_PLACEHOLDER3, // 12 + MEM_VENDOR_PLACEHOLDER4, // 13 + MEM_VENDOR_PLACEHOLDER5, // 14 + MEM_VENDOR_MICRON, // 15 + MEM_VENDOR_COUNT, +} MEM_VENDOR_e; + +typedef enum { + PP_GRTAVFS_HW_CPO_CTL_ZONE0, + PP_GRTAVFS_HW_CPO_CTL_ZONE1, + PP_GRTAVFS_HW_CPO_CTL_ZONE2, + PP_GRTAVFS_HW_CPO_CTL_ZONE3, + PP_GRTAVFS_HW_CPO_CTL_ZONE4, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE0, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE0, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE1, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE1, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE2, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE2, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE3, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE3, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE4, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE4, + PP_GRTAVFS_HW_ZONE0_VF, + PP_GRTAVFS_HW_ZONE1_VF1, + PP_GRTAVFS_HW_ZONE2_VF2, + PP_GRTAVFS_HW_ZONE3_VF3, + PP_GRTAVFS_HW_VOLTAGE_GB, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE0, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE1, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE2, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE3, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE4, + PP_GRTAVFS_HW_RESERVED_0, + PP_GRTAVFS_HW_RESERVED_1, + PP_GRTAVFS_HW_RESERVED_2, + PP_GRTAVFS_HW_RESERVED_3, + PP_GRTAVFS_HW_RESERVED_4, + PP_GRTAVFS_HW_RESERVED_5, + PP_GRTAVFS_HW_RESERVED_6, + PP_GRTAVFS_HW_FUSE_COUNT, +} PP_GRTAVFS_HW_FUSE_e; + +typedef enum { + PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_COLD_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_COLD_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_COLD_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_COLD_T0, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z0, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z1, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z2, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z3, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z4, + PP_GRTAVFS_FW_COMMON_FUSE_COUNT, +} PP_GRTAVFS_FW_COMMON_FUSE_e; + +typedef enum { + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_NEG_1, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_0, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_1, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_2, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_3, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_4, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_NEG_1, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_0, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_1, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_2, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_3, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_4, + PP_GRTAVFS_FW_SEP_FUSE_VF_NEG_1_FREQUENCY, + PP_GRTAVFS_FW_SEP_FUSE_VF4_FREQUENCY, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_0, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_1, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_2, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_3, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_4, + PP_GRTAVFS_FW_SEP_FUSE_COUNT, +} PP_GRTAVFS_FW_SEP_FUSE_e; + +#define PP_NUM_RTAVFS_PWL_ZONES 5 + + +// VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3 +// Slope Q1.7, Offset Q1.2 +typedef struct { + int8_t Offset; // in Amps + uint8_t Padding; + uint16_t MaxCurrent; // in Amps +} SviTelemetryScale_t; + +#define PP_NUM_OD_VF_CURVE_POINTS PP_NUM_RTAVFS_PWL_ZONES + 1 + +#define PP_OD_FEATURE_GFX_VF_CURVE_BIT 0 +#define PP_OD_FEATURE_GFX_VMAX_BIT 1 +#define PP_OD_FEATURE_SOC_VMAX_BIT 2 +#define PP_OD_FEATURE_PPT_BIT 3 +#define PP_OD_FEATURE_FAN_CURVE_BIT 4 +#define PP_OD_FEATURE_FAN_LEGACY_BIT 5 +#define PP_OD_FEATURE_FULL_CTRL_BIT 6 +#define PP_OD_FEATURE_TDC_BIT 7 +#define PP_OD_FEATURE_GFXCLK_BIT 8 +#define PP_OD_FEATURE_UCLK_BIT 9 +#define PP_OD_FEATURE_FCLK_BIT 10 +#define PP_OD_FEATURE_ZERO_FAN_BIT 11 +#define PP_OD_FEATURE_TEMPERATURE_BIT 12 +#define PP_OD_FEATURE_EDC_BIT 13 +#define PP_OD_FEATURE_COUNT 14 + +typedef enum { + PP_OD_POWER_FEATURE_ALWAYS_ENABLED, + PP_OD_POWER_FEATURE_DISABLED_WHILE_GAMING, + PP_OD_POWER_FEATURE_ALWAYS_DISABLED, +} PP_OD_POWER_FEATURE_e; + +typedef enum { + FAN_MODE_AUTO = 0, + FAN_MODE_MANUAL_LINEAR, +} FanMode_e; + +typedef enum { + OD_NO_ERROR, + OD_REQUEST_ADVANCED_NOT_SUPPORTED, + OD_UNSUPPORTED_FEATURE, + OD_INVALID_FEATURE_COMBO_ERROR, + OD_GFXCLK_VF_CURVE_OFFSET_ERROR, + OD_VDD_GFX_VMAX_ERROR, + OD_VDD_SOC_VMAX_ERROR, + OD_PPT_ERROR, + OD_FAN_MIN_PWM_ERROR, + OD_FAN_ACOUSTIC_TARGET_ERROR, + OD_FAN_ACOUSTIC_LIMIT_ERROR, + OD_FAN_TARGET_TEMP_ERROR, + OD_FAN_ZERO_RPM_STOP_TEMP_ERROR, + OD_FAN_CURVE_PWM_ERROR, + OD_FAN_CURVE_TEMP_ERROR, + OD_FULL_CTRL_GFXCLK_ERROR, + OD_FULL_CTRL_UCLK_ERROR, + OD_FULL_CTRL_FCLK_ERROR, + OD_FULL_CTRL_VDD_GFX_ERROR, + OD_FULL_CTRL_VDD_SOC_ERROR, + OD_TDC_ERROR, + OD_GFXCLK_ERROR, + OD_UCLK_ERROR, + OD_FCLK_ERROR, + OD_OP_TEMP_ERROR, + OD_OP_GFX_EDC_ERROR, + OD_OP_GFX_PCC_ERROR, + OD_POWER_FEATURE_CTRL_ERROR, +} OD_FAIL_e; + +typedef struct { + uint32_t FeatureCtrlMask; + + //Voltage control + int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS]; + + uint16_t VddGfxVmax; // in mV + uint16_t VddSocVmax; + + uint8_t IdlePwrSavingFeaturesCtrl; + uint8_t RuntimePwrSavingFeaturesCtrl; + uint16_t Padding; + + //Frequency changes + int16_t GfxclkFmin; // MHz + int16_t GfxclkFmax; // MHz + uint16_t UclkFmin; // MHz + uint16_t UclkFmax; // MHz + uint16_t FclkFmin; + uint16_t FclkFmax; + + //PPT + int16_t Ppt; // % + int16_t Tdc; + + //Fan control + uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS]; + uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS]; + uint16_t FanMinimumPwm; + uint16_t AcousticTargetRpmThreshold; + uint16_t AcousticLimitRpmThreshold; + uint16_t FanTargetTemperature; // Degree Celcius + uint8_t FanZeroRpmEnable; + uint8_t FanZeroRpmStopTemp; + uint8_t FanMode; + uint8_t MaxOpTemp; + + uint8_t AdvancedOdModeEnabled; + uint8_t Padding1[3]; + + uint16_t GfxVoltageFullCtrlMode; + uint16_t SocVoltageFullCtrlMode; + uint16_t GfxclkFullCtrlMode; + uint16_t UclkFullCtrlMode; + uint16_t FclkFullCtrlMode; + uint16_t Padding2; + + int16_t GfxEdc; + int16_t GfxPccLimitControl; + + uint32_t Spare[10]; + uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround +} OverDriveTable_t; + +typedef struct { + OverDriveTable_t OverDriveTable; + +} OverDriveTableExternal_t; + +typedef struct { + uint32_t FeatureCtrlMask; + + //Gfx Vf Curve + int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS]; + //gfx Vmax + uint16_t VddGfxVmax; // in mV + //soc Vmax + uint16_t VddSocVmax; + + //gfxclk + int16_t GfxclkFmin; // MHz + int16_t GfxclkFmax; // MHz + //uclk + uint16_t UclkFmin; // MHz + uint16_t UclkFmax; // MHz + //fclk + uint16_t FclkFmin; + uint16_t FclkFmax; + + //PPT + int16_t Ppt; // % + //TDC + int16_t Tdc; + + //Fan Curve + uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS]; + uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS]; + //Fan Legacy + uint16_t FanMinimumPwm; + uint16_t AcousticTargetRpmThreshold; + uint16_t AcousticLimitRpmThreshold; + uint16_t FanTargetTemperature; // Degree Celcius + //zero fan + uint8_t FanZeroRpmEnable; + //temperature + uint8_t MaxOpTemp; + uint8_t Padding[2]; + + //Full Ctrl + uint16_t GfxVoltageFullCtrlMode; + uint16_t SocVoltageFullCtrlMode; + uint16_t GfxclkFullCtrlMode; + uint16_t UclkFullCtrlMode; + uint16_t FclkFullCtrlMode; + //EDC + int16_t GfxEdc; + int16_t GfxPccLimitControl; + int16_t Padding1; + + uint32_t Spare[5]; +} OverDriveLimits_t; + +typedef enum { + BOARD_GPIO_SMUIO_0, + BOARD_GPIO_SMUIO_1, + BOARD_GPIO_SMUIO_2, + BOARD_GPIO_SMUIO_3, + BOARD_GPIO_SMUIO_4, + BOARD_GPIO_SMUIO_5, + BOARD_GPIO_SMUIO_6, + BOARD_GPIO_SMUIO_7, + BOARD_GPIO_SMUIO_8, + BOARD_GPIO_SMUIO_9, + BOARD_GPIO_SMUIO_10, + BOARD_GPIO_SMUIO_11, + BOARD_GPIO_SMUIO_12, + BOARD_GPIO_SMUIO_13, + BOARD_GPIO_SMUIO_14, + BOARD_GPIO_SMUIO_15, + BOARD_GPIO_SMUIO_16, + BOARD_GPIO_SMUIO_17, + BOARD_GPIO_SMUIO_18, + BOARD_GPIO_SMUIO_19, + BOARD_GPIO_SMUIO_20, + BOARD_GPIO_SMUIO_21, + BOARD_GPIO_SMUIO_22, + BOARD_GPIO_SMUIO_23, + BOARD_GPIO_SMUIO_24, + BOARD_GPIO_SMUIO_25, + BOARD_GPIO_SMUIO_26, + BOARD_GPIO_SMUIO_27, + BOARD_GPIO_SMUIO_28, + BOARD_GPIO_SMUIO_29, + BOARD_GPIO_SMUIO_30, + BOARD_GPIO_SMUIO_31, + MAX_BOARD_GPIO_SMUIO_NUM, + BOARD_GPIO_DC_GEN_A, + BOARD_GPIO_DC_GEN_B, + BOARD_GPIO_DC_GEN_C, + BOARD_GPIO_DC_GEN_D, + BOARD_GPIO_DC_GEN_E, + BOARD_GPIO_DC_GEN_F, + BOARD_GPIO_DC_GEN_G, + BOARD_GPIO_DC_GENLK_CLK, + BOARD_GPIO_DC_GENLK_VSYNC, + BOARD_GPIO_DC_SWAPLOCK_A, + BOARD_GPIO_DC_SWAPLOCK_B, + MAX_BOARD_DC_GPIO_NUM, + BOARD_GPIO_LV_EN, +} BOARD_GPIO_TYPE_e; + +#define INVALID_BOARD_GPIO 0xFF + + +typedef struct { + //PLL 0 + uint16_t InitImuClk; + uint16_t InitSocclk; + uint16_t InitMpioclk; + uint16_t InitSmnclk; + //PLL 1 + uint16_t InitDispClk; + uint16_t InitDppClk; + uint16_t InitDprefclk; + uint16_t InitDcfclk; + uint16_t InitDtbclk; + uint16_t InitDbguSocClk; + //PLL 2 + uint16_t InitGfxclk_bypass; + uint16_t InitMp1clk; + uint16_t InitLclk; + uint16_t InitDbguBacoClk; + uint16_t InitBaco400clk; + uint16_t InitBaco1200clk_bypass; + uint16_t InitBaco700clk_bypass; + uint16_t InitBaco500clk; + // PLL 3 + uint16_t InitDclk0; + uint16_t InitVclk0; + // PLL 4 + uint16_t InitFclk; + uint16_t Padding1; + // PLL 5 + //UCLK clocks, assumed all UCLK instances will be the same. + uint8_t InitUclkLevel; // =0,1,2,3,4,5 frequency from FreqTableUclk + + uint8_t Padding[3]; + + uint32_t InitVcoFreqPll0; //smu_socclk_t + uint32_t InitVcoFreqPll1; //smu_displayclk_t + uint32_t InitVcoFreqPll2; //smu_nbioclk_t + uint32_t InitVcoFreqPll3; //smu_vcnclk_t + uint32_t InitVcoFreqPll4; //smu_fclk_t + uint32_t InitVcoFreqPll5; //smu_uclk_01_t + uint32_t InitVcoFreqPll6; //smu_uclk_23_t + uint32_t InitVcoFreqPll7; //smu_uclk_45_t + uint32_t InitVcoFreqPll8; //smu_uclk_67_t + + //encoding will be SVI3 + uint16_t InitGfx; // In mV(Q2) , should be 0? + uint16_t InitSoc; // In mV(Q2) + uint16_t InitVddIoMem; // In mV(Q2) MemVdd + uint16_t InitVddCiMem; // In mV(Q2) VMemP + + //uint16_t Padding2; + + uint32_t Spare[8]; +} BootValues_t; + +typedef struct { + uint16_t Power[PPT_THROTTLER_COUNT][POWER_SOURCE_COUNT]; // Watts + uint16_t Tdc[TDC_THROTTLER_COUNT]; // Amps + + uint16_t Temperature[TEMP_COUNT]; // Celsius + + uint8_t PwmLimitMin; + uint8_t PwmLimitMax; + uint8_t FanTargetTemperature; + uint8_t Spare1[1]; + + uint16_t AcousticTargetRpmThresholdMin; + uint16_t AcousticTargetRpmThresholdMax; + + uint16_t AcousticLimitRpmThresholdMin; + uint16_t AcousticLimitRpmThresholdMax; + + uint16_t PccLimitMin; + uint16_t PccLimitMax; + + uint16_t FanStopTempMin; + uint16_t FanStopTempMax; + uint16_t FanStartTempMin; + uint16_t FanStartTempMax; + + uint16_t PowerMinPpt0[POWER_SOURCE_COUNT]; + uint32_t Spare[11]; +} MsgLimits_t; + +typedef struct { + uint16_t BaseClockAc; + uint16_t GameClockAc; + uint16_t BoostClockAc; + uint16_t BaseClockDc; + uint16_t GameClockDc; + uint16_t BoostClockDc; + + uint32_t Reserved[4]; +} DriverReportedClocks_t; + +typedef struct { + uint8_t DcBtcEnabled; + uint8_t Padding[3]; + + uint16_t DcTol; // mV Q2 + uint16_t DcBtcGb; // mV Q2 + + uint16_t DcBtcMin; // mV Q2 + uint16_t DcBtcMax; // mV Q2 + + LinearInt_t DcBtcGbScalar; +} AvfsDcBtcParams_t; + +typedef struct { + uint16_t AvfsTemp[AVFS_TEMP_COUNT]; //in degrees C + uint16_t VftFMin; // in MHz + uint16_t VInversion; // in mV Q2 + QuadraticInt_t qVft[AVFS_TEMP_COUNT]; + QuadraticInt_t qAvfsGb; + QuadraticInt_t qAvfsGb2; +} AvfsFuseOverride_t; + +//all settings maintained by PFE team +typedef struct { + uint8_t Version; + uint8_t Spare8[3]; + // SECTION: Feature Control + uint32_t FeaturesToRun[NUM_FEATURES / 32]; // Features that PMFW will attempt to enable. Use FEATURE_*_BIT as mapping + // SECTION: FW DSTATE Settings + uint32_t FwDStateMask; // See FW_DSTATE_*_BIT for mapping + // SECTION: Advanced Options + uint32_t DebugOverrides; + + uint32_t Spare[2]; +} PFE_Settings_t; + +typedef struct { + // SECTION: Version + uint32_t Version; // should be unique to each SKU(i.e if any value changes in below structure then this value must be different) + + // SECTION: Miscellaneous Configuration + uint8_t TotalPowerConfig; // Determines how PMFW calculates the power. Use defines from PwrConfig_e + uint8_t CustomerVariant; //To specify if this PPTable is intended for a particular customer. Use defines from CUSTOMER_VARIANT_e + uint8_t MemoryTemperatureTypeMask; // Bit mapping indicating which methods of memory temperature reading are enabled. Use defines from MEM_TEMP_*BIT + uint8_t SmartShiftVersion; // Determine what SmartShift feature version is supported Use defines from SMARTSHIFT_VERSION_e + + // SECTION: Infrastructure Limits + uint8_t SocketPowerLimitSpare[10]; + + //if set to 1, SocketPowerLimitAc and SocketPowerLimitDc will be interpreted as legacy programs(i.e absolute power). If 0, all except index 0 will be scalars + //relative index 0 + uint8_t EnableLegacyPptLimit; + uint8_t UseInputTelemetry; //applicable to SVI3 only and only to be set if VRs support + + uint8_t SmartShiftMinReportedPptinDcs; //minimum possible active power consumption for this SKU. Used for SmartShift power reporting + + uint8_t PaddingPpt[7]; + + uint16_t HwCtfTempLimit; // In degrees Celsius. Temperature above which HW will trigger CTF. Consumed by VBIOS only + + uint16_t PaddingInfra; + + // Per year normalized Vmax state failure rates (sum of the two domains divided by life time in years) + uint32_t FitControllerFailureRateLimit; //in IEEE float + //Expected GFX Duty Cycle at Vmax. + uint32_t FitControllerGfxDutyCycle; // in IEEE float + //Expected SOC Duty Cycle at Vmax. + uint32_t FitControllerSocDutyCycle; // in IEEE float + + //This offset will be deducted from the controller output to before it goes through the SOC Vset limiter block. + uint32_t FitControllerSocOffset; //in IEEE float + + uint32_t GfxApccPlusResidencyLimit; // Percentage value. Used by APCC+ controller to control PCC residency to some value + + // SECTION: Throttler settings + uint32_t ThrottlerControlMask; // See THROTTLER_*_BIT for mapping + + + // SECTION: Voltage Control Parameters + uint16_t UlvVoltageOffset[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2). ULV offset used in either GFX_ULV or SOC_ULV(part of FW_DSTATE) + + uint8_t Padding[2]; + uint16_t DeepUlvVoltageOffsetSoc; // In mV(Q2) Long Idle Vmin (deep ULV), for VDD_SOC as part of FW_DSTATE + + // Voltage Limits + uint16_t DefaultMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage without FIT controller enabled + uint16_t BoostMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage with FIT controller enabled + + //Vmin Optimizations + int16_t VminTempHystersis[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature hysteresis for switching between low/high temperature values for Vmin + int16_t VminTempThreshold[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature threshold for switching between low/high temperature values for Vmin + uint16_t Vmin_Hot_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at hot. + uint16_t Vmin_Cold_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at cold. + uint16_t Vmin_Hot_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at hot. + uint16_t Vmin_Cold_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at cold. + uint16_t Vmin_Aging_Offset[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Worst-case aging margin + uint16_t Spare_Vmin_Plat_Offset_Hot[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Hot + uint16_t Spare_Vmin_Plat_Offset_Cold[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Cold + + //This is a fixed/minimum VMIN aging degradation offset which is applied at T0. This reflects the minimum amount of aging already accounted for. + uint16_t VcBtcFixedVminAgingOffset[PMFW_VOLT_PLANE_COUNT]; + //Linear offset or GB term to account for mis-correlation between PSM and Vmin shift trends across parts. + uint16_t VcBtcVmin2PsmDegrationGb[PMFW_VOLT_PLANE_COUNT]; + //Scalar coefficient of the PSM aging degradation function + uint32_t VcBtcPsmA[PMFW_VOLT_PLANE_COUNT]; // A_PSM + //Exponential coefficient of the PSM aging degradation function + uint32_t VcBtcPsmB[PMFW_VOLT_PLANE_COUNT]; // B_PSM + //Scalar coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold. + uint32_t VcBtcVminA[PMFW_VOLT_PLANE_COUNT]; // A_VMIN + //Exponential coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold. + uint32_t VcBtcVminB[PMFW_VOLT_PLANE_COUNT]; // B_VMIN + + uint8_t PerPartVminEnabled[PMFW_VOLT_PLANE_COUNT]; + uint8_t VcBtcEnabled[PMFW_VOLT_PLANE_COUNT]; + + uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms + uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms + + QuadraticInt_t Gfx_Vmin_droop; + QuadraticInt_t Soc_Vmin_droop; + uint32_t SpareVmin[6]; + + //SECTION: DPM Configuration 1 + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableShadowUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz + uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz + + uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz + + uint16_t GfxclkAibFmax; + uint16_t GfxclkFreqCap; + + //GFX Idle Power Settings + uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz + uint16_t GfxclkFgfxoffExitImu; // Exit/Entry in IMU stage (BYPASS), in Mhz + uint16_t GfxclkFgfxoffExitRlc; // Exit in RLC stage (PLL), in Mhz + uint16_t GfxclkThrottleClock; //Used primarily in DCS + uint8_t EnableGfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages + uint8_t GfxIdlePadding; + + uint8_t SmsRepairWRCKClkDivEn; + uint8_t SmsRepairWRCKClkDivVal; + uint8_t GfxOffEntryEarlyMGCGEn; + uint8_t GfxOffEntryForceCGCGEn; + uint8_t GfxOffEntryForceCGCGDelayEn; + uint8_t GfxOffEntryForceCGCGDelayVal; // in microseconds + + uint16_t GfxclkFreqGfxUlv; // in MHz + uint8_t GfxIdlePadding2[2]; + uint32_t GfxOffEntryHysteresis; //For RLC to count after it enters CGCG, and before triggers GFXOFF entry + uint32_t GfxoffSpare[15]; + + // DFLL + uint16_t DfllMstrOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias + uint16_t DfllSlvOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias + uint32_t DfllBtcMasterScalerM; + int32_t DfllBtcMasterScalerB; + uint32_t DfllBtcSlaveScalerM; + int32_t DfllBtcSlaveScalerB; + + uint32_t DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg + uint32_t DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg + uint32_t GfxDfllSpare[9]; + + // DVO + uint32_t DvoPsmDownThresholdVoltage; //Voltage float + uint32_t DvoPsmUpThresholdVoltage; //Voltage float + uint32_t DvoFmaxLowScaler; //Unitless float + + // GFX DCS + uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase + uint16_t PaddingDcs; + + uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase + uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch. + + uint32_t DcsMinCreditAccum; //Min amount of positive credit accumulation before waking GFX up as part of DCS. + + uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase. + uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin. + + uint32_t DcsPfGfxFopt; //Default to GFX FMIN + uint32_t DcsPfUclkFopt; //Default to UCLK FMIN + + uint8_t FoptEnabled; + uint8_t DcsSpare2[3]; + uint32_t DcsFoptM; //Tuning paramters to shift Fopt calculation, IEEE754 float + uint32_t DcsFoptB; //Tuning paramters to shift Fopt calculation, IEEE754 float + uint32_t DcsSpare[9]; + + // UCLK section + uint8_t UseStrobeModeOptimizations; //Set to indicate that FW should use strobe mode optimizations + uint8_t PaddingMem[3]; + + uint8_t UclkDpmPstates [NUM_UCLK_DPM_LEVELS]; // 6 Primary SW DPM states (6 + 6 Shadow) + uint8_t UclkDpmShadowPstates [NUM_UCLK_DPM_LEVELS]; // 6 Shadow SW DPM states (6 + 6 Shadow) + uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 + uint8_t FreqTableShadowUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 + uint16_t MemVmempVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + uint16_t MemVddioVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + uint16_t DalDcModeMaxUclkFreq; + uint8_t PaddingsMem[2]; + //FCLK Section + uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value + uint16_t PaddingFclk; + + // Link DPM Settings + uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5 + uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 + uint16_t LclkFreq[NUM_LINK_LEVELS]; + + // SECTION: VDD_GFX AVFS + uint8_t OverrideGfxAvfsFuses; + uint8_t GfxAvfsPadding[3]; + + uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain + uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding + //uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; + uint32_t spare_HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; + + uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; + uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; + + uint32_t SocFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + uint32_t GfxL2FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + //uint32_t GfxSeFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + uint32_t spare_FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + + uint32_t Soc_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES]; + + uint32_t Gfx_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Gfx_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Gfx_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Gfx_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES]; + + uint32_t Gfx_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES]; + + uint32_t dGbV_dT_vmin; + uint32_t dGbV_dT_vmax; + + //Unused: PMFW-9370 + uint32_t V2F_vmin_range_low; + uint32_t V2F_vmin_range_high; + uint32_t V2F_vmax_range_low; + uint32_t V2F_vmax_range_high; + + AvfsDcBtcParams_t DcBtcGfxParams; + QuadraticInt_t SSCurve_GFX; + uint32_t GfxAvfsSpare[29]; + + //SECTION: VDD_SOC AVFS + uint8_t OverrideSocAvfsFuses; + uint8_t MinSocAvfsRevision; + uint8_t SocAvfsPadding[2]; + + AvfsFuseOverride_t SocAvfsFuseOverride[AVFS_D_COUNT]; + + DroopInt_t dBtcGbSoc[AVFS_D_COUNT]; // GHz->V BtcGb + + LinearInt_t qAgingGb[AVFS_D_COUNT]; // GHz->V + + QuadraticInt_t qStaticVoltageOffset[AVFS_D_COUNT]; // GHz->V + + AvfsDcBtcParams_t DcBtcSocParams[AVFS_D_COUNT]; + + QuadraticInt_t SSCurve_SOC; + uint32_t SocAvfsSpare[29]; + + //SECTION: Boot clock and voltage values + BootValues_t BootValues; + + //SECTION: Driver Reported Clocks + DriverReportedClocks_t DriverReportedClocks; + + //SECTION: Message Limits + MsgLimits_t MsgLimits; + + //SECTION: OverDrive Limits + OverDriveLimits_t OverDriveLimitsBasicMin; + OverDriveLimits_t OverDriveLimitsBasicMax; + OverDriveLimits_t OverDriveLimitsAdvancedMin; + OverDriveLimits_t OverDriveLimitsAdvancedMax; + + // Section: Total Board Power idle vs active coefficients + uint8_t TotalBoardPowerSupport; + uint8_t TotalBoardPowerPadding[1]; + uint16_t TotalBoardPowerRoc; + + //PMFW-11158 + QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT]; + QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT]; + QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT]; + + // APT GFX to UCLK mapping + int32_t AptUclkGfxclkLookup[POWER_SOURCE_COUNT][6]; + uint32_t AptUclkGfxclkLookupHyst[POWER_SOURCE_COUNT][6]; + uint32_t AptPadding; + + // Xvmin didt + QuadraticInt_t GfxXvminDidtDroopThresh; + uint32_t GfxXvminDidtResetDDWait; + uint32_t GfxXvminDidtClkStopWait; + uint32_t GfxXvminDidtFcsStepCtrl; + uint32_t GfxXvminDidtFcsWaitCtrl; + + // PSM based didt controller + uint32_t PsmModeEnabled; //0: all disabled 1: static mode only 2: dynamic mode only 3:static + dynamic mode + uint32_t P2v_a; // floating point in U32 format + uint32_t P2v_b; + uint32_t P2v_c; + uint32_t T2p_a; + uint32_t T2p_b; + uint32_t T2p_c; + uint32_t P2vTemp; + QuadraticInt_t PsmDidtStaticSettings; + QuadraticInt_t PsmDidtDynamicSettings; + uint8_t PsmDidtAvgDiv; + uint8_t PsmDidtForceStall; + uint16_t PsmDidtReleaseTimer; + uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog + // CAC EDC + uint32_t Leakage_C0; // in IEEE float + uint32_t Leakage_C1; // in IEEE float + uint32_t Leakage_C2; // in IEEE float + uint32_t Leakage_C3; // in IEEE float + uint32_t Leakage_C4; // in IEEE float + uint32_t Leakage_C5; // in IEEE float + uint32_t GFX_CLK_SCALAR; // in IEEE float + uint32_t GFX_CLK_INTERCEPT; // in IEEE float + uint32_t GFX_CAC_M; // in IEEE float + uint32_t GFX_CAC_B; // in IEEE float + uint32_t VDD_GFX_CurrentLimitGuardband; // in IEEE float + uint32_t DynToTotalCacScalar; // in IEEE + // GFX EDC XVMIN + uint32_t XVmin_Gfx_EdcThreshScalar; + uint32_t XVmin_Gfx_EdcEnableFreq; + uint32_t XVmin_Gfx_EdcPccAsStepCtrl; + uint32_t XVmin_Gfx_EdcPccAsWaitCtrl; + uint16_t XVmin_Gfx_EdcThreshold; + uint16_t XVmin_Gfx_EdcFiltHysWaitCtrl; + // SOC EDC XVMIN + uint32_t XVmin_Soc_EdcThreshScalar; + uint32_t XVmin_Soc_EdcEnableFreq; + uint32_t XVmin_Soc_EdcThreshold; // LPF: number of cycles Xvmin_trig_filt will react. + uint16_t XVmin_Soc_EdcStepUpTime; // 10 bit, refclk count to step up throttle when PCC remains asserted. + uint16_t XVmin_Soc_EdcStepDownTime;// 10 bit, refclk count to step down throttle when PCC remains asserted. + uint8_t XVmin_Soc_EdcInitPccStep; // 3 bit, First Pcc Step number that will applied when PCC asserts. + uint8_t PaddingSocEdc[3]; + + // Fuse Override for SOC and GFX XVMIN + uint8_t GfxXvminFuseOverride; + uint8_t SocXvminFuseOverride; + uint8_t PaddingXvminFuseOverride[2]; + uint8_t GfxXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value + uint8_t GfxXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value + uint8_t SocXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value + uint8_t SocXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value + + + uint16_t GfxXvminFddVolt0; // low voltage, in VID + uint16_t GfxXvminFddVolt1; // mid voltage, in VID + uint16_t GfxXvminFddVolt2; // high voltage, in VID + uint16_t SocXvminFddVolt0; // low voltage, in VID + uint16_t SocXvminFddVolt1; // mid voltage, in VID + uint16_t SocXvminFddVolt2; // high voltage, in VID + uint16_t GfxXvminDsFddDsm[6]; // XVMIN DS, same organization with fuse + uint16_t GfxXvminEdcFddDsm[6];// XVMIN GFX EDC, same organization with fuse + uint16_t SocXvminEdcFddDsm[6];// XVMIN SOC EDC, same organization with fuse + + // SECTION: Sku Reserved + uint32_t Spare; + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; +} SkuTable_t; + +typedef struct { + uint8_t SlewRateConditions; + uint8_t LoadLineAdjust; + uint8_t VoutOffset; + uint8_t VidMax; + uint8_t VidMin; + uint8_t TenBitTelEn; + uint8_t SixteenBitTelEn; + uint8_t OcpThresh; + uint8_t OcpWarnThresh; + uint8_t OcpSettings; + uint8_t VrhotThresh; + uint8_t OtpThresh; + uint8_t UvpOvpDeltaRef; + uint8_t PhaseShed; + uint8_t Padding[10]; + uint32_t SettingOverrideMask; +} Svi3RegulatorSettings_t; + +typedef struct { + // SECTION: Version + uint32_t Version; //should be unique to each board type + + // SECTION: I2C Control + I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; + + //SECTION SVI3 Board Parameters + uint8_t SlaveAddrMapping[SVI_PLANE_COUNT]; + uint8_t VrPsiSupport[SVI_PLANE_COUNT]; + + uint32_t Svi3SvcSpeed; + uint8_t EnablePsi6[SVI_PLANE_COUNT]; // only applicable in SVI3 + + // SECTION: Voltage Regulator Settings + Svi3RegulatorSettings_t Svi3RegSettings[SVI_PLANE_COUNT]; + + // SECTION: GPIO Settings + uint8_t LedOffGpio; + uint8_t FanOffGpio; + uint8_t GfxVrPowerStageOffGpio; + + uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching + uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching + uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event + uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event + + uint8_t GthrGpio; // GPIO pin configured for GTHR Event + uint8_t GthrPolarity; // replace GPIO polarity for GTHR + + // LED Display Settings + uint8_t LedPin0; // GPIO number for LedPin[0] + uint8_t LedPin1; // GPIO number for LedPin[1] + uint8_t LedPin2; // GPIO number for LedPin[2] + uint8_t LedEnableMask; + + uint8_t LedPcie; // GPIO number for PCIE results + uint8_t LedError; // GPIO number for Error Cases + uint8_t PaddingLed; + + // SECTION: Clock Spread Spectrum + + // UCLK Spread Spectrum + uint8_t UclkTrainingModeSpreadPercent; // Q4.4 + uint8_t UclkSpreadPadding; + uint16_t UclkSpreadFreq; // kHz + + // UCLK Spread Spectrum + uint8_t UclkSpreadPercent[MEM_VENDOR_COUNT]; + + // DFLL Spread Spectrum + uint8_t GfxclkSpreadEnable; + + // FCLK Spread Spectrum + uint8_t FclkSpreadPercent; // Q4.4 + uint16_t FclkSpreadFreq; // kHz + + // Section: Memory Config + uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e + uint8_t PaddingMem1[7]; + + // SECTION: UMC feature flags + uint8_t HsrEnabled; + uint8_t VddqOffEnabled; + uint8_t PaddingUmcFlags[2]; + + uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued + uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS + + uint8_t FuseWritePowerMuxPresent; + uint8_t FuseWritePadding[3]; + + // SECTION: EDC Params + uint32_t LoadlineGfx; + uint32_t LoadlineSoc; + uint32_t GfxEdcLimit; + uint32_t SocEdcLimit; + + uint32_t RestBoardPower; //power consumed by board that is not captured by the SVI3 input telemetry + uint32_t ConnectorsImpedance; // impedance of the input ATX power connectors + + uint8_t EpcsSens0; //GPIO number for External Power Connector Support Sense0 + uint8_t EpcsSens1; //GPIO Number for External Power Connector Support Sense1 + uint8_t PaddingEpcs[2]; + + // SECTION: Board Reserved + uint32_t BoardSpare[52]; + + // SECTION: Structure Padding + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; +} BoardTable_t; + +typedef struct { + // SECTION: Infrastructure Limits + uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in AC mode. Multiple limits supported + + uint16_t VrTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with VR regulator maximum temperature + + int16_t TotalIdleBoardPowerM; + int16_t TotalIdleBoardPowerB; + int16_t TotalBoardPowerM; + int16_t TotalBoardPowerB; + + uint16_t TemperatureLimit[TEMP_COUNT]; // In degrees Celsius. Temperature limit associated with each input + + // SECTION: Fan Control + uint16_t FanStopTemp[TEMP_COUNT]; //Celsius + uint16_t FanStartTemp[TEMP_COUNT]; //Celsius + + uint16_t FanGain[TEMP_COUNT]; + + uint16_t FanPwmMin; + uint16_t AcousticTargetRpmThreshold; + uint16_t AcousticLimitRpmThreshold; + uint16_t FanMaximumRpm; + uint16_t MGpuAcousticLimitRpmThreshold; + uint16_t FanTargetGfxclk; + uint32_t TempInputSelectMask; + uint8_t FanZeroRpmEnable; + uint8_t FanTachEdgePerRev; + uint16_t FanPadding; + uint16_t FanTargetTemperature[TEMP_COUNT]; + + // The following are AFC override parameters. Leave at 0 to use FW defaults. + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t FuzzyFan_Reserved; + + uint16_t FwCtfLimit[TEMP_COUNT]; + + uint16_t IntakeTempEnableRPM; + int16_t IntakeTempOffsetTemp; + uint16_t IntakeTempReleaseTemp; + uint16_t IntakeTempHighIntakeAcousticLimit; + + uint16_t IntakeTempAcouticLimitReleaseRate; + int16_t FanAbnormalTempLimitOffset; // FanStalledTempLimitOffset + uint16_t FanStalledTriggerRpm; // + uint16_t FanAbnormalTriggerRpmCoeff; // FanAbnormalTriggerRpm + + uint16_t FanSpare[1]; + uint8_t FanIntakeSensorSupport; + uint8_t FanIntakePadding; + uint32_t FanAmbientPerfBoostThreshold; + uint32_t FanSpare2[12]; + + uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix + uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron + uint16_t TemperatureFwCtfLimit_Hynix; + uint16_t TemperatureFwCtfLimit_Micron; + + // SECTION: Board Reserved + uint16_t PlatformTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with platform maximum temperature per VR current rail + uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in DC mode. Multiple limits supported + uint16_t SocketPowerLimitSmartShift2; // In Watts. Power limit used SmartShift + uint16_t CustomSkuSpare16b; + uint32_t CustomSkuSpare32b[10]; + + // SECTION: Structure Padding + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; +} CustomSkuTable_t; + +typedef struct { + PFE_Settings_t PFE_Settings; + SkuTable_t SkuTable; + CustomSkuTable_t CustomSkuTable; + BoardTable_t BoardTable; +} PPTable_t; + +typedef struct { + // Time constant parameters for clock averages in ms + uint16_t GfxclkAverageLpfTau; + uint16_t FclkAverageLpfTau; + uint16_t UclkAverageLpfTau; + uint16_t GfxActivityLpfTau; + uint16_t UclkActivityLpfTau; + uint16_t UclkMaxActivityLpfTau; + uint16_t SocketPowerLpfTau; + uint16_t VcnClkAverageLpfTau; + uint16_t VcnUsageAverageLpfTau; + uint16_t PcieActivityLpTau; +} DriverSmuConfig_t; + +typedef struct { + DriverSmuConfig_t DriverSmuConfig; + + uint32_t Spare[8]; + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} DriverSmuConfigExternal_t; + + +typedef struct { + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz + uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz + + uint16_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz + + uint16_t Padding; + + uint32_t Spare[32]; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use + +} DriverInfoTable_t; + +typedef struct { + uint32_t CurrClock[PPCLK_COUNT]; + + uint16_t AverageGfxclkFrequencyTarget; + uint16_t AverageGfxclkFrequencyPreDs; + uint16_t AverageGfxclkFrequencyPostDs; + uint16_t AverageFclkFrequencyPreDs; + uint16_t AverageFclkFrequencyPostDs; + uint16_t AverageMemclkFrequencyPreDs ; // this is scaled to actual memory clock + uint16_t AverageMemclkFrequencyPostDs ; // this is scaled to actual memory clock + uint16_t AverageVclk0Frequency ; + uint16_t AverageDclk0Frequency ; + uint16_t AverageVclk1Frequency ; + uint16_t AverageDclk1Frequency ; + uint16_t PCIeBusy ; + uint16_t dGPU_W_MAX ; + uint16_t padding ; + + uint32_t MetricsCounter ; + + uint16_t AvgVoltage[SVI_PLANE_COUNT]; + uint16_t AvgCurrent[SVI_PLANE_COUNT]; + + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint16_t Vcn0ActivityPercentage ; + uint16_t Vcn1ActivityPercentage ; + + uint32_t EnergyAccumulator; + uint16_t AverageSocketPower; + uint16_t AverageTotalBoardPower; + + uint16_t AvgTemperature[TEMP_COUNT]; + uint16_t AvgTemperatureFanIntake; + + uint8_t PcieRate ; + uint8_t PcieWidth ; + + uint8_t AvgFanPwm; + uint8_t Padding[1]; + uint16_t AvgFanRpm; + + + uint8_t ThrottlingPercentage[THROTTLER_COUNT]; + uint8_t padding1[3]; + + //metrics for D3hot entry/exit and driver ARM msgs + uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT]; + uint32_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT]; + uint32_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT]; + + uint16_t ApuSTAPMSmartShiftLimit; + uint16_t ApuSTAPMLimit; + uint16_t AvgApuSocketPower; + + uint16_t AverageUclkActivity_MAX; + + uint32_t PublicSerialNumberLower; + uint32_t PublicSerialNumberUpper; + +} SmuMetrics_t; + +typedef struct { + SmuMetrics_t SmuMetrics; + uint32_t Spare[30]; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetricsExternal_t; + +typedef struct { + uint8_t WmSetting; + uint8_t Flags; + uint8_t Padding[2]; + +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 + +typedef enum { + WATERMARKS_CLOCK_RANGE = 0, + WATERMARKS_DUMMY_PSTATE, + WATERMARKS_MALL, + WATERMARKS_COUNT, +} WATERMARKS_FLAGS_e; + +typedef struct { + // Watermarks + WatermarkRowGeneric_t WatermarkRow[NUM_WM_RANGES]; +} Watermarks_t; + +typedef struct { + Watermarks_t Watermarks; + uint32_t Spare[16]; + + uint32_t MmHubPadding[8]; // SMU internal use +} WatermarksExternal_t; + +typedef struct { + uint16_t avgPsmCount[76]; + uint16_t minPsmCount[76]; + uint16_t maxPsmCount[76]; + float avgPsmVoltage[76]; + float minPsmVoltage[76]; + float maxPsmVoltage[76]; +} AvfsDebugTable_t; + +typedef struct { + AvfsDebugTable_t AvfsDebugTable; + + uint32_t MmHubPadding[8]; // SMU internal use +} AvfsDebugTableExternal_t; + + +typedef struct { + uint8_t Gfx_ActiveHystLimit; + uint8_t Gfx_IdleHystLimit; + uint8_t Gfx_FPS; + uint8_t Gfx_MinActiveFreqType; + uint8_t Gfx_BoosterFreqType; + uint8_t PaddingGfx; + uint16_t Gfx_MinActiveFreq; // MHz + uint16_t Gfx_BoosterFreq; // MHz + uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Gfx_PD_Data_limit_a; // Q16 + uint32_t Gfx_PD_Data_limit_b; // Q16 + uint32_t Gfx_PD_Data_limit_c; // Q16 + uint32_t Gfx_PD_Data_error_coeff; // Q16 + uint32_t Gfx_PD_Data_error_rate_coeff; // Q16 + + uint8_t Fclk_ActiveHystLimit; + uint8_t Fclk_IdleHystLimit; + uint8_t Fclk_FPS; + uint8_t Fclk_MinActiveFreqType; + uint8_t Fclk_BoosterFreqType; + uint8_t PaddingFclk; + uint16_t Fclk_MinActiveFreq; // MHz + uint16_t Fclk_BoosterFreq; // MHz + uint16_t Fclk_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Fclk_PD_Data_limit_a; // Q16 + uint32_t Fclk_PD_Data_limit_b; // Q16 + uint32_t Fclk_PD_Data_limit_c; // Q16 + uint32_t Fclk_PD_Data_error_coeff; // Q16 + uint32_t Fclk_PD_Data_error_rate_coeff; // Q16 + + uint32_t Mem_UpThreshold_Limit[NUM_UCLK_DPM_LEVELS]; // Q16 + uint8_t Mem_UpHystLimit[NUM_UCLK_DPM_LEVELS]; + uint16_t Mem_DownHystLimit[NUM_UCLK_DPM_LEVELS]; + uint16_t Mem_Fps; + +} DpmActivityMonitorCoeffInt_t; + + +typedef struct { + DpmActivityMonitorCoeffInt_t DpmActivityMonitorCoeffInt; + uint32_t MmHubPadding[8]; // SMU internal use +} DpmActivityMonitorCoeffIntExternal_t; + + + +// Workload bits +#define WORKLOAD_PPLIB_DEFAULT_BIT 0 +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 +#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 +#define WORKLOAD_PPLIB_VIDEO_BIT 3 +#define WORKLOAD_PPLIB_VR_BIT 4 +#define WORKLOAD_PPLIB_COMPUTE_BIT 5 +#define WORKLOAD_PPLIB_CUSTOM_BIT 6 +#define WORKLOAD_PPLIB_WINDOW_3D_BIT 7 +#define WORKLOAD_PPLIB_DIRECT_ML_BIT 8 +#define WORKLOAD_PPLIB_CGVDI_BIT 9 +#define WORKLOAD_PPLIB_COUNT 10 + + +// These defines are used with the following messages: +// SMC_MSG_TransferTableDram2Smu +// SMC_MSG_TransferTableSmu2Dram + +// Table transfer status +#define TABLE_TRANSFER_OK 0x0 +#define TABLE_TRANSFER_FAILED 0xFF +#define TABLE_TRANSFER_PENDING 0xAB + +// Table types +#define TABLE_PPTABLE 0 +#define TABLE_COMBO_PPTABLE 1 +#define TABLE_WATERMARKS 2 +#define TABLE_AVFS_PSM_DEBUG 3 +#define TABLE_PMSTATUSLOG 4 +#define TABLE_SMU_METRICS 5 +#define TABLE_DRIVER_SMU_CONFIG 6 +#define TABLE_ACTIVITY_MONITOR_COEFF 7 +#define TABLE_OVERDRIVE 8 +#define TABLE_I2C_COMMANDS 9 +#define TABLE_DRIVER_INFO 10 +#define TABLE_ECCINFO 11 +#define TABLE_CUSTOM_SKUTABLE 12 +#define TABLE_COUNT 13 + +//IH Interupt ID +#define IH_INTERRUPT_ID_TO_DRIVER 0xFE +#define IH_INTERRUPT_CONTEXT_ID_BACO 0x2 +#define IH_INTERRUPT_CONTEXT_ID_AC 0x3 +#define IH_INTERRUPT_CONTEXT_ID_DC 0x4 +#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5 +#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6 +#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7 +#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 +#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index 7b812b9994..0b3c2f54a3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -123,7 +123,7 @@ typedef enum { VOLTAGE_GUARDBAND_COUNT } GFX_GUARDBAND_e; -#define SMU_METRICS_TABLE_VERSION 0xB +#define SMU_METRICS_TABLE_VERSION 0xC typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; @@ -223,6 +223,10 @@ typedef struct __attribute__((packed, aligned(4))) { // VCN/JPEG ACTIVITY uint32_t VcnBusy[4]; uint32_t JpegBusy[32]; + + // PCIE LINK Speed and width + uint32_t PCIeLinkSpeed; + uint32_t PCIeLinkWidth; } MetricsTableX_t; typedef struct __attribute__((packed, aligned(4))) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h new file mode 100644 index 0000000000..de2e442281 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h @@ -0,0 +1,140 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU_V14_0_2_PPSMC_H +#define SMU_V14_0_2_PPSMC_H + +#define PPSMC_VERSION 0x1 + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +// BASIC +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4 +#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5 +#define PPSMC_MSG_EnableAllSmuFeatures 0x6 +#define PPSMC_MSG_DisableAllSmuFeatures 0x7 +#define PPSMC_MSG_EnableSmuFeaturesLow 0x8 +#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9 +#define PPSMC_MSG_DisableSmuFeaturesLow 0xA +#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB +#define PPSMC_MSG_GetRunningSmuFeaturesLow 0xC +#define PPSMC_MSG_GetRunningSmuFeaturesHigh 0xD +#define PPSMC_MSG_SetDriverDramAddrHigh 0xE +#define PPSMC_MSG_SetDriverDramAddrLow 0xF +#define PPSMC_MSG_SetToolsDramAddrHigh 0x10 +#define PPSMC_MSG_SetToolsDramAddrLow 0x11 +#define PPSMC_MSG_TransferTableSmu2Dram 0x12 +#define PPSMC_MSG_TransferTableDram2Smu 0x13 +#define PPSMC_MSG_UseDefaultPPTable 0x14 + +//BACO/BAMACO/BOMACO +#define PPSMC_MSG_EnterBaco 0x15 +#define PPSMC_MSG_ExitBaco 0x16 +#define PPSMC_MSG_ArmD3 0x17 +#define PPSMC_MSG_BacoAudioD3PME 0x18 + +//DPM +#define PPSMC_MSG_SetSoftMinByFreq 0x19 +#define PPSMC_MSG_SetSoftMaxByFreq 0x1A +#define PPSMC_MSG_SetHardMinByFreq 0x1B +#define PPSMC_MSG_SetHardMaxByFreq 0x1C +#define PPSMC_MSG_GetMinDpmFreq 0x1D +#define PPSMC_MSG_GetMaxDpmFreq 0x1E +#define PPSMC_MSG_GetDpmFreqByIndex 0x1F +#define PPSMC_MSG_OverridePcieParameters 0x20 + +//DramLog Set DramAddr +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x21 +#define PPSMC_MSG_DramLogSetDramAddrLow 0x22 +#define PPSMC_MSG_DramLogSetDramSize 0x23 +#define PPSMC_MSG_SetWorkloadMask 0x24 + +#define PPSMC_MSG_GetVoltageByDpm 0x25 // Can be removed +#define PPSMC_MSG_SetVideoFps 0x26 // Can be removed +#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x27 + +//Power Gating +#define PPSMC_MSG_AllowGfxOff 0x28 +#define PPSMC_MSG_DisallowGfxOff 0x29 +#define PPSMC_MSG_PowerUpVcn 0x2A +#define PPSMC_MSG_PowerDownVcn 0x2B +#define PPSMC_MSG_PowerUpJpeg 0x2C +#define PPSMC_MSG_PowerDownJpeg 0x2D + +//Resets +#define PPSMC_MSG_PrepareMp1ForUnload 0x2E +#define PPSMC_MSG_Mode1Reset 0x2F + +//Set SystemVirtual DramAddrHigh +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x30 +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x31 +//ACDC Power Source +#define PPSMC_MSG_SetPptLimit 0x32 +#define PPSMC_MSG_GetPptLimit 0x33 +#define PPSMC_MSG_ReenableAcDcInterrupt 0x34 +#define PPSMC_MSG_NotifyPowerSource 0x35 + +//BTC +#define PPSMC_MSG_RunDcBtc 0x36 + +// 0x37 + +//Others +#define PPSMC_MSG_SetTemperatureInputSelect 0x38 // Can be removed +#define PPSMC_MSG_SetFwDstatesMask 0x39 +#define PPSMC_MSG_SetThrottlerMask 0x3A + +#define PPSMC_MSG_SetExternalClientDfCstateAllow 0x3B + +#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x3C + +//STB to dram log +#define PPSMC_MSG_DumpSTBtoDram 0x3D +#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x3E +#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x3F +#define PPSMC_MSG_STBtoDramLogSetDramSize 0x40 +#define PPSMC_MSG_SetOBMTraceBufferLogging 0x41 + +#define PPSMC_MSG_AllowGfxDcs 0x43 +#define PPSMC_MSG_DisallowGfxDcs 0x44 +#define PPSMC_MSG_EnableAudioStutterWA 0x45 +#define PPSMC_MSG_PowerUpUmsch 0x46 +#define PPSMC_MSG_PowerDownUmsch 0x47 +#define PPSMC_MSG_SetDcsArch 0x48 +#define PPSMC_MSG_TriggerVFFLR 0x49 +#define PPSMC_MSG_SetNumBadMemoryPagesRetired 0x4A +#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4B +#define PPSMC_MSG_SetPriorityDeltaGain 0x4C +#define PPSMC_MSG_AllowIHHostInterrupt 0x4D +#define PPSMC_MSG_Mode3Reset 0x4F +#define PPSMC_Message_Count 0x50 +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index 4a7404856b..2e32b08582 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -447,4 +447,11 @@ enum smu_feature_mask { SMU_FEATURE_COUNT, }; +/* Message category flags */ +#define SMU_MSG_VF_FLAG (1U << 0) +#define SMU_MSG_RAS_PRI (1U << 1) + +/* Firmware capability flags */ +#define SMU_FW_CAP_RAS_PRI (1U << 0) + #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h index a0e5ad0381..c2ab336bb5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h @@ -237,7 +237,7 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu); int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); -bool smu_v11_0_baco_is_support(struct smu_context *smu); +int smu_v11_0_get_bamaco_support(struct smu_context *smu); enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index fbd57fa1a0..d9700a3f28 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -210,7 +210,7 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu); int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); -bool smu_v13_0_baco_is_support(struct smu_context *smu); +int smu_v13_0_get_bamaco_support(struct smu_context *smu); int smu_v13_0_baco_enter(struct smu_context *smu); int smu_v13_0_baco_exit(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 4af1985ae4..1fc4557e6f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -28,7 +28,7 @@ #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6 -#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1 +#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x25 #define FEATURE_MASK(feature) (1ULL << feature) @@ -39,7 +39,8 @@ #define MP1_SRAM 0x03c00004 /* address block */ -#define smnMP1_FIRMWARE_FLAGS 0x3010028 +#define smnMP1_FIRMWARE_FLAGS_14_0_0 0x3010028 +#define smnMP1_FIRMWARE_FLAGS 0x3010024 #define smnMP1_PUB_CTRL 0x3010d10 #define MAX_DPM_LEVELS 16 @@ -160,7 +161,7 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu); int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_baco_seq baco_seq); -bool smu_v14_0_baco_is_support(struct smu_context *smu); +int smu_v14_0_get_bamaco_support(struct smu_context *smu); enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h new file mode 100644 index 0000000000..4a3fde89ae --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h @@ -0,0 +1,164 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU_14_0_2_PPTABLE_H +#define SMU_14_0_2_PPTABLE_H + + +#pragma pack(push, 1) + +#define SMU_14_0_2_TABLE_FORMAT_REVISION 3 + +// POWERPLAYTABLE::ulPlatformCaps +#define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page. +#define SMU_14_0_2_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2 // This cap indicates whether power source notificaiton is done by SBIOS instead of OS. +#define SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC 0x4 // This cap indicates whether DC mode notificaiton is done by GPIO pin directly. +#define SMU_14_0_2_PP_PLATFORM_CAP_BACO 0x8 // This cap indicates whether board supports the BACO circuitry. +#define SMU_14_0_2_PP_PLATFORM_CAP_MACO 0x10 // This cap indicates whether board supports the MACO circuitry. +#define SMU_14_0_2_PP_PLATFORM_CAP_SHADOWPSTATE 0x20 // This cap indicates whether board supports the Shadow Pstate. +#define SMU_14_0_2_PP_PLATFORM_CAP_LEDSUPPORTED 0x40 // This cap indicates whether board supports the LED. +#define SMU_14_0_2_PP_PLATFORM_CAP_MOBILEOVERDRIVE 0x80 // This cap indicates whether board supports the Mobile Overdrive. + +// SMU_14_0_2_PP_THERMALCONTROLLER - Thermal Controller Type +#define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0 + +#define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD +#define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 + +enum SMU_14_0_2_OD_SW_FEATURE_CAP +{ + SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT = 0, + SMU_14_0_2_ODCAP_POWER_MODE = 1, + SMU_14_0_2_ODCAP_AUTO_UV_ENGINE = 2, + SMU_14_0_2_ODCAP_AUTO_OC_ENGINE = 3, + SMU_14_0_2_ODCAP_AUTO_OC_MEMORY = 4, + SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE = 5, + SMU_14_0_2_ODCAP_MANUAL_AC_TIMING = 6, + SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER = 7, + SMU_14_0_2_ODCAP_AUTO_SOC_UV = 8, + SMU_14_0_2_ODCAP_COUNT = 9, +}; + +enum SMU_14_0_2_OD_SW_FEATURE_ID +{ + SMU_14_0_2_ODFEATURE_AUTO_FAN_ACOUSTIC_LIMIT = 1 << SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, // Auto Fan Acoustic RPM + SMU_14_0_2_ODFEATURE_POWER_MODE = 1 << SMU_14_0_2_ODCAP_POWER_MODE, // Optimized GPU Power Mode + SMU_14_0_2_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_UV_ENGINE, // Auto Under Volt GFXCLK + SMU_14_0_2_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_OC_ENGINE, // Auto Over Clock GFXCLK + SMU_14_0_2_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_14_0_2_ODCAP_AUTO_OC_MEMORY, // Auto Over Clock MCLK + SMU_14_0_2_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE, // Auto AC Timing Tuning + SMU_14_0_2_ODFEATURE_MANUAL_AC_TIMING = 1 << SMU_14_0_2_ODCAP_MANUAL_AC_TIMING, // Manual fine grain AC Timing tuning + SMU_14_0_2_ODFEATURE_AUTO_VF_CURVE_OPTIMIZER = 1 << SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER, // Fine grain auto VF curve tuning + SMU_14_0_2_ODFEATURE_AUTO_SOC_UV = 1 << SMU_14_0_2_ODCAP_AUTO_SOC_UV, // Auto Unver Volt VDDSOC +}; + +#define SMU_14_0_2_MAX_ODFEATURE 32 // Maximum Number of OD Features + +enum SMU_14_0_2_OD_SW_FEATURE_SETTING_ID +{ + SMU_14_0_2_ODSETTING_AUTO_FAN_ACOUSTIC_LIMIT = 0, + SMU_14_0_2_ODSETTING_POWER_MODE = 1, + SMU_14_0_2_ODSETTING_AUTOUVENGINE = 2, + SMU_14_0_2_ODSETTING_AUTOOCENGINE = 3, + SMU_14_0_2_ODSETTING_AUTOOCMEMORY = 4, + SMU_14_0_2_ODSETTING_ACTIMING = 5, + SMU_14_0_2_ODSETTING_MANUAL_AC_TIMING = 6, + SMU_14_0_2_ODSETTING_AUTO_VF_CURVE_OPTIMIZER = 7, + SMU_14_0_2_ODSETTING_AUTO_SOC_UV = 8, + SMU_14_0_2_ODSETTING_COUNT = 9, +}; +#define SMU_14_0_2_MAX_ODSETTING 64 // Maximum Number of ODSettings + +enum SMU_14_0_2_PWRMODE_SETTING +{ + SMU_14_0_2_PMSETTING_POWER_LIMIT_QUIET = 0, + SMU_14_0_2_PMSETTING_POWER_LIMIT_BALANCE, + SMU_14_0_2_PMSETTING_POWER_LIMIT_TURBO, + SMU_14_0_2_PMSETTING_POWER_LIMIT_RAGE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_QUIET, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_BALANCE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_TURBO, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_RAGE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_QUIET, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_BALANCE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_TURBO, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_RAGE, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_QUIET, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE, +}; +#define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings + +enum SMU_14_0_2_overdrive_table_id +{ + SMU_14_0_2_OVERDRIVE_TABLE_BASIC = 0, + SMU_14_0_2_OVERDRIVE_TABLE_ADVANCED = 1, + SMU_14_0_2_OVERDRIVE_TABLE_COUNT = 2, +}; + +struct smu_14_0_2_overdrive_table +{ + uint8_t revision; // Revision = SMU_14_0_2_PP_OVERDRIVE_VERSION + uint8_t reserve[3]; // Zero filled field reserved for future use + uint8_t cap[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODFEATURE]; // OD feature support flags + int32_t max[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // maximum settings + int32_t min[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // minimum settings + int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings +}; + +struct smu_14_0_2_powerplay_table +{ + struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen. + uint8_t table_revision; // PPGen use only: table_revision = 3 + uint8_t padding; // Padding 1 byte to align table_size offset to 6 bytes (pmfw_start_offset, for PMFW to know the starting offset of PPTable_t). + uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t) + uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t. + uint16_t pmfw_pfe_table_start_offset; // The start offset of the PFE_Settings_t within pmfw_pptable. + uint16_t pmfw_pfe_table_size; // The size of PFE_Settings_t. + uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t within pmfw_pptable. + uint16_t pmfw_board_table_size; // The size of BoardTable_t. + uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable. + uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t. + uint32_t golden_pp_id; // PPGen use only: PP Table ID on the Golden Data Base + uint32_t golden_revision; // PPGen use only: PP Table Revision on the Golden Data Base + uint16_t format_id; // PPGen use only: PPTable for different ASICs. + uint32_t platform_caps; // POWERPLAYTABLE::ulPlatformCaps + + uint8_t thermal_controller_type; // one of smu_14_0_2_PP_THERMALCONTROLLER + + uint16_t small_power_limit1; + uint16_t small_power_limit2; + uint16_t boost_power_limit; // For Gemini Board, when the slave adapter is in BACO mode, the master adapter will use this boost power limit instead of the default power limit to boost the power limit. + uint16_t software_shutdown_temp; + + uint8_t reserve[143]; // Zero filled field reserved for future use + + struct smu_14_0_2_overdrive_table overdrive_table; + + PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes +}; + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 0c2d04f978..6d334a2aff 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -2387,7 +2387,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = smu_v11_0_baco_is_support, + .get_bamaco_support = smu_v11_0_get_bamaco_support, .baco_enter = smu_v11_0_baco_enter, .baco_exit = smu_v11_0_baco_exit, .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 836b1df799..5a68d36596 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -3538,7 +3538,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = smu_v11_0_baco_is_support, + .get_bamaco_support = smu_v11_0_get_bamaco_support, .baco_enter = navi10_baco_enter, .baco_exit = navi10_baco_exit, .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 1f18b61884..e426f457a0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -4431,7 +4431,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = smu_v11_0_baco_is_support, + .get_bamaco_support = smu_v11_0_get_bamaco_support, .baco_enter = sienna_cichlid_baco_enter, .baco_exit = sienna_cichlid_baco_exit, .mode1_reset_is_support = sienna_cichlid_is_mode1_reset_supported, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index f6545093bf..9d5ab2ea64 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -93,7 +93,7 @@ static void smu_v11_0_poll_baco_exit(struct smu_context *smu) int smu_v11_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char ucode_prefix[30]; + char ucode_prefix[25]; char fw_name[SMU_FW_NAME_LEN]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; @@ -1557,23 +1557,27 @@ int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); } -bool smu_v11_0_baco_is_support(struct smu_context *smu) +int smu_v11_0_get_bamaco_support(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; + int bamaco_support = 0; if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) - return false; + return 0; + + if (smu_baco->maco_support) + bamaco_support |= MACO_SUPPORT; /* return true if ASIC is in BACO state already */ if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) - return true; + return bamaco_support |= BACO_SUPPORT; /* Arcturus does not support this bit mask */ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) - return false; + return 0; - return true; + return (bamaco_support |= BACO_SUPPORT); } enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) @@ -1603,7 +1607,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) case IP_VERSION(11, 0, 11): case IP_VERSION(11, 0, 12): case IP_VERSION(11, 0, 13): - if (amdgpu_runtime_pm == 2) + if (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, D3HOT_BAMACO_SEQUENCE, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index da1f43999d..379e44eb00 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -301,7 +301,7 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; + *value = metrics->UvdActivity / 100; break; case METRICS_AVERAGE_SOCKETPOWER: *value = (metrics->CurrentSocketPower << 8) / @@ -1507,6 +1507,12 @@ static int vangogh_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = vangogh_common_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = vangogh_common_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index f41ac6465f..ce941fbb9c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -759,8 +759,11 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, switch (type) { case SMU_OD_SCLK: - *offset += sysfs_emit_at(buf, *offset, "%s:\n", "GFXCLK"); - fallthrough; + *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_SCLK"); + *offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n", + pstate_table->gfxclk_pstate.curr.min, + pstate_table->gfxclk_pstate.curr.max); + return 0; case SMU_SCLK: ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &cur_value); if (ret) { @@ -788,8 +791,11 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, break; case SMU_OD_MCLK: - *offset += sysfs_emit_at(buf, *offset, "%s:\n", "MCLK"); - fallthrough; + *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_MCLK"); + *offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n", + pstate_table->uclk_pstate.curr.min, + pstate_table->uclk_pstate.curr.max); + return 0; case SMU_MCLK: ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &cur_value); if (ret) { @@ -850,7 +856,6 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, } switch (type) { - case SMU_OD_SCLK: case SMU_SCLK: for (i = 0; i < display_levels; i++) { clock_mhz = freq_values[i]; @@ -863,7 +868,6 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, } break; - case SMU_OD_MCLK: case SMU_MCLK: case SMU_SOCCLK: case SMU_FCLK: @@ -1581,11 +1585,11 @@ out: adev->unique_id = ((uint64_t)upper32 << 32) | lower32; } -static bool aldebaran_is_baco_supported(struct smu_context *smu) +static int aldebaran_get_bamaco_support(struct smu_context *smu) { /* aldebaran is not support baco */ - return false; + return 0; } static int aldebaran_set_df_cstate(struct smu_context *smu, @@ -2059,7 +2063,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .register_irq_handler = smu_v13_0_register_irq_handler, .set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = aldebaran_is_baco_supported, + .get_bamaco_support = aldebaran_get_bamaco_support, .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range, .od_edit_dpm_table = aldebaran_usr_edit_dpm_table, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 48170bb511..b63ad9cb24 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -79,8 +79,8 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 #define smnPCIE_LC_SPEED_CNTL 0x11140290 -#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 -#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0 +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5 #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 @@ -93,7 +93,7 @@ int smu_v13_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; char fw_name[30]; - char ucode_prefix[30]; + char ucode_prefix[15]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -2247,7 +2247,7 @@ static int smu_v13_0_baco_set_state(struct smu_context *smu, if (state == SMU_BACO_STATE_ENTER) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, - (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? BACO_SEQ_BAMACO : BACO_SEQ_BACO, NULL); } else { @@ -2268,33 +2268,36 @@ static int smu_v13_0_baco_set_state(struct smu_context *smu, return ret; } -bool smu_v13_0_baco_is_support(struct smu_context *smu) +int smu_v13_0_get_bamaco_support(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; + int bamaco_support = 0; if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) - return false; + return 0; + + if (smu_baco->maco_support) + bamaco_support |= MACO_SUPPORT; /* return true if ASIC is in BACO state already */ if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) - return true; + return bamaco_support |= BACO_SUPPORT; if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) - return false; + return 0; - return true; + return (bamaco_support |= BACO_SUPPORT); } int smu_v13_0_baco_enter(struct smu_context *smu) { - struct smu_baco_context *smu_baco = &smu->smu_baco; struct amdgpu_device *adev = smu->adev; int ret; if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { return smu_v13_0_baco_set_armd3_sequence(smu, - (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); } else { ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 67117ced7c..1e09d5f2d8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -3076,7 +3076,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .set_tool_table_location = smu_v13_0_set_tool_table_location, .deep_sleep_control = smu_v13_0_deep_sleep_control, .gfx_ulv_control = smu_v13_0_gfx_ulv_control, - .baco_is_support = smu_v13_0_baco_is_support, + .get_bamaco_support = smu_v13_0_get_bamaco_support, .baco_enter = smu_v13_0_baco_enter, .baco_exit = smu_v13_0_baco_exit, .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 2fb6c9cb0f..b6257f34a7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -330,7 +330,7 @@ static int smu_v13_0_4_get_smu_metrics_data(struct smu_context *smu, *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; + *value = metrics->UvdActivity / 100; break; case METRICS_AVERAGE_SOCKETPOWER: *value = (metrics->AverageSocketPower << 8) / 1000; @@ -584,6 +584,12 @@ static int smu_v13_0_4_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v13_0_4_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v13_0_4_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 0dce672ac1..218f209c37 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -286,7 +286,7 @@ static int smu_v13_0_5_get_smu_metrics_data(struct smu_context *smu, *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; + *value = metrics->UvdActivity / 100; break; case METRICS_CURR_SOCKETPOWER: *value = (metrics->CurrentSocketPower << 8) / 1000; @@ -332,6 +332,12 @@ static int smu_v13_0_5_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v13_0_5_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: ret = smu_v13_0_5_get_smu_metrics_data(smu, METRICS_CURR_SOCKETPOWER, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index c977ebe880..4d3eca2fc3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -138,13 +138,13 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), - MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), - MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, 0), + MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI), MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), @@ -167,10 +167,10 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0), MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0), MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0), - MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, 0), - MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, 0), - MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, 0), - MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, 0), + MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI), + MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI), + MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI), + MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI), MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0), MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0), }; @@ -1010,8 +1010,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, switch (type) { case SMU_OD_SCLK: - size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK"); - fallthrough; + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", + pstate_table->gfxclk_pstate.curr.min, + pstate_table->gfxclk_pstate.curr.max); + break; case SMU_SCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now); @@ -1052,8 +1055,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, break; case SMU_OD_MCLK: - size += sysfs_emit_at(buf, size, "%s:\n", "MCLK"); - fallthrough; + size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", + pstate_table->uclk_pstate.curr.min, + pstate_table->uclk_pstate.curr.max); + break; case SMU_MCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK, &now); @@ -1670,6 +1676,11 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, if (clk_type == SMU_UCLK) { if (max == pstate_table->uclk_pstate.curr.max) return 0; + /* For VF, only allowed in FW versions 85.102 or greater */ + if (amdgpu_sriov_vf(adev) && + ((smu->smc_fw_version < 0x556600) || + (adev->flags & AMD_IS_APU))) + return -EOPNOTSUPP; /* Only max clock limiting is allowed for UCLK */ ret = smu_v13_0_set_soft_freq_limited_range( smu, SMU_UCLK, 0, max); @@ -2077,11 +2088,11 @@ static void smu_v13_0_6_get_unique_id(struct smu_context *smu) adev->unique_id = pptable->PublicSerialNumber_AID; } -static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu) +static int smu_v13_0_6_get_bamaco_support(struct smu_context *smu) { /* smu_13_0_6 does not support baco */ - return false; + return 0; } static const char *const throttling_logging_label[] = { @@ -2228,7 +2239,15 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0); if (!(adev->flags & AMD_IS_APU)) { - if (!amdgpu_sriov_vf(adev)) { + /*Check smu version, PCIE link speed and width will be reported from pmfw metric + * table for both pf & one vf for smu version 85.99.0 or higher else report only + * for pf from registers + */ + if (smu->smc_fw_version >= 0x556300) { + gpu_metrics->pcie_link_width = metrics_x->PCIeLinkWidth; + gpu_metrics->pcie_link_speed = + pcie_gen_to_speed(metrics_x->PCIeLinkSpeed); + } else if (!amdgpu_sriov_vf(adev)) { link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu); if (link_width_level > MAX_LINK_WIDTH) link_width_level = 0; @@ -2238,6 +2257,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->pcie_link_speed = smu_v13_0_6_get_current_pcie_link_speed(smu); } + gpu_metrics->pcie_bandwidth_acc = SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]); gpu_metrics->pcie_bandwidth_inst = @@ -2696,6 +2716,11 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct umc_v12_0_is_correctable_error(adev, status0)) *count = (ext_error_code == 0) ? odecc_err_cnt : 1; + amdgpu_umc_update_ecc_status(adev, + entry->regs[MCA_REG_IDX_STATUS], + entry->regs[MCA_REG_IDX_IPID], + entry->regs[MCA_REG_IDX_ADDR]); + return 0; } @@ -2709,7 +2734,8 @@ static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, st ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]); err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]); - if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0) + if (type == AMDGPU_MCA_ERROR_TYPE_UE && + (ext_error_code == 0 || ext_error_code == 9)) *count = err_cnt; else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6) *count = err_cnt; @@ -3000,7 +3026,7 @@ static int aca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) return smu_v13_0_6_mca_set_debug_mode(smu, enable); } -static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_error_type type, u32 *count) +static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_smu_type type, u32 *count) { uint32_t msg; int ret; @@ -3009,10 +3035,10 @@ static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_err return -EINVAL; switch (type) { - case ACA_ERROR_TYPE_UE: + case ACA_SMU_TYPE_UE: msg = SMU_MSG_QueryValidMcaCount; break; - case ACA_ERROR_TYPE_CE: + case ACA_SMU_TYPE_CE: msg = SMU_MSG_QueryValidMcaCeCount; break; default: @@ -3029,14 +3055,14 @@ static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_err } static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, - enum aca_error_type type, u32 *count) + enum aca_smu_type type, u32 *count) { struct smu_context *smu = adev->powerplay.pp_handle; int ret; switch (type) { - case ACA_ERROR_TYPE_UE: - case ACA_ERROR_TYPE_CE: + case ACA_SMU_TYPE_UE: + case ACA_SMU_TYPE_CE: ret = smu_v13_0_6_get_valid_aca_count(smu, type, count); break; default: @@ -3047,16 +3073,16 @@ static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, return ret; } -static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type, +static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type, int idx, int offset, u32 *val) { uint32_t msg, param; switch (type) { - case ACA_ERROR_TYPE_UE: + case ACA_SMU_TYPE_UE: msg = SMU_MSG_McaBankDumpDW; break; - case ACA_ERROR_TYPE_CE: + case ACA_SMU_TYPE_CE: msg = SMU_MSG_McaBankCeDumpDW; break; default: @@ -3068,7 +3094,7 @@ static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_t return smu_cmn_send_smc_msg_with_param(smu, msg, param, (uint32_t *)val); } -static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type, +static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type, int idx, int offset, u32 *val, int count) { int ret, i; @@ -3085,7 +3111,7 @@ static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_typ return 0; } -static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type type, +static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_smu_type type, int idx, int reg_idx, u64 *val) { struct smu_context *smu = adev->powerplay.pp_handle; @@ -3102,13 +3128,13 @@ static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type typ *val = (u64)data[1] << 32 | data[0]; dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n", - type == ACA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val); + type == ACA_SMU_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val); return 0; } static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev, - enum aca_error_type type, int idx, struct aca_bank *bank) + enum aca_smu_type type, int idx, struct aca_bank *bank) { int i, ret, count; @@ -3122,12 +3148,25 @@ static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev, return 0; } +static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank) +{ + int error_code; + + if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) + error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]); + else + error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]); + + return error_code & 0xff; +} + static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = { .max_ue_bank_count = 12, .max_ce_bank_count = 12, .set_debug_mode = aca_smu_set_debug_mode, .get_valid_aca_count = aca_smu_get_valid_aca_count, .get_valid_aca_bank = aca_smu_get_valid_aca_bank, + .parse_error_code = aca_smu_parse_error_code, }; static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, @@ -3204,7 +3243,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .enable_thermal_alert = smu_v13_0_enable_thermal_alert, .disable_thermal_alert = smu_v13_0_disable_thermal_alert, .setup_pptable = smu_v13_0_6_setup_pptable, - .baco_is_support = smu_v13_0_6_is_baco_supported, + .get_bamaco_support = smu_v13_0_6_get_bamaco_support, .get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq, .set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range, .od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table, @@ -3233,6 +3272,7 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) smu->feature_map = smu_v13_0_6_feature_mask_map; smu->table_map = smu_v13_0_6_table_map; smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION; + smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI; smu_v13_0_set_smu_mailbox_registers(smu); amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs); amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 7318964f1f..e996a0a4d3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -2650,7 +2650,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .set_tool_table_location = smu_v13_0_set_tool_table_location, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, - .baco_is_support = smu_v13_0_baco_is_support, + .get_bamaco_support = smu_v13_0_get_bamaco_support, .baco_enter = smu_v13_0_baco_enter, .baco_exit = smu_v13_0_baco_exit, .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 2d1736234b..d8bcf765a8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -363,7 +363,7 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu, *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; + *value = metrics->UvdActivity / 100; break; case METRICS_CURR_SOCKETPOWER: *value = (metrics->CurrentSocketPower << 8) / 1000; @@ -423,6 +423,12 @@ static int yellow_carp_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = yellow_carp_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: ret = yellow_carp_get_smu_metrics_data(smu, METRICS_CURR_SOCKETPOWER, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile index ddbac5c655..4593e29e8f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile @@ -23,7 +23,7 @@ # Makefile for the 'smu manager' sub-component of powerplay. # It provides the smu management services for the driver. -SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o +SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o smu_v14_0_2_ppt.o AMD_SWSMU_SMU14MGR = $(addprefix $(AMD_SWSMU_PATH)/smu14/,$(SMU14_MGR)) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 07a65e0057..68b9bf822e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -38,8 +38,13 @@ #include "amdgpu_ras.h" #include "smu_cmn.h" -#include "asic_reg/mp/mp_14_0_0_offset.h" -#include "asic_reg/mp/mp_14_0_0_sh_mask.h" +#include "asic_reg/mp/mp_14_0_2_offset.h" +#include "asic_reg/mp/mp_14_0_2_sh_mask.h" + +#define regMP1_SMN_IH_SW_INT_mp1_14_0_0 0x0341 +#define regMP1_SMN_IH_SW_INT_mp1_14_0_0_BASE_IDX 0 +#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0 0x0342 +#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0 /* * DO NOT use these for err/warn/info/debug messages. @@ -52,6 +57,7 @@ #undef pr_debug MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin"); +MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin"); #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 @@ -106,7 +112,6 @@ void smu_v14_0_fini_microcode(struct smu_context *smu) int smu_v14_0_load_microcode(struct smu_context *smu) { -#if 0 struct amdgpu_device *adev = smu->adev; const uint32_t *src; const struct smc_firmware_header_v1_0 *hdr; @@ -131,8 +136,13 @@ int smu_v14_0_load_microcode(struct smu_context *smu) 1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK); for (i = 0; i < adev->usec_timeout; i++) { - mp1_fw_flags = RREG32_PCIE(MP1_Public | - (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff)); + else + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) break; @@ -142,9 +152,7 @@ int smu_v14_0_load_microcode(struct smu_context *smu) if (i == adev->usec_timeout) return -ETIME; -#endif return 0; - } int smu_v14_0_init_pptable_microcode(struct smu_context *smu) @@ -165,6 +173,10 @@ int smu_v14_0_init_pptable_microcode(struct smu_context *smu) if (!adev->scpm_enabled) return 0; + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) || + (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 3))) + return 0; + /* override pptable_id from driver parameter */ if (amdgpu_smu_pptable_id >= 0) { pptable_id = amdgpu_smu_pptable_id; @@ -198,7 +210,12 @@ int smu_v14_0_check_fw_status(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; uint32_t mp1_fw_flags; - mp1_fw_flags = RREG32_PCIE(MP1_Public | + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff)); + else + mp1_fw_flags = RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> @@ -227,16 +244,16 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) adev->pm.fw_version = smu_version; switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { - case IP_VERSION(14, 0, 2): - smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; - break; case IP_VERSION(14, 0, 0): smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; break; case IP_VERSION(14, 0, 1): smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1; break; - + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): + smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; + break; default: dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", amdgpu_ip_version(adev, MP1_HWIP, 0)); @@ -738,9 +755,9 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable) struct amdgpu_device *adev = smu->adev; switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { - case IP_VERSION(14, 0, 2): case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 1): + case IP_VERSION(14, 0, 2): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) @@ -841,9 +858,16 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, // TODO /* For MP1 SW irqs */ - val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); - WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val); + } else { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + } break; case AMDGPU_IRQ_STATE_ENABLE: @@ -851,14 +875,26 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, // TODO /* For MP1 SW irqs */ - val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); - WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); - - val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); - WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0, val); + + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val); + } else { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); + + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + } break; default: @@ -868,11 +904,32 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, return 0; } +#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ +#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ + static int smu_v14_0_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - // TODO + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t client_id = entry->client_id; + uint32_t src_id = entry->src_id; + + if (client_id == SOC15_IH_CLIENTID_THM) { + switch (src_id) { + case THM_11_0__SRCID__THM_DIG_THERM_L2H: + schedule_delayed_work(&smu->swctf_delayed_work, + msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY)); + break; + case THM_11_0__SRCID__THM_DIG_THERM_H2L: + dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); + break; + default: + dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", + src_id); + break; + } + } return 0; } @@ -894,7 +951,17 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu) irq_src->num_types = 1; irq_src->funcs = &smu_v14_0_irq_funcs; - // TODO: THM related + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, + THM_11_0__SRCID__THM_DIG_THERM_L2H, + irq_src); + if (ret) + return ret; + + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, + THM_11_0__SRCID__THM_DIG_THERM_H2L, + irq_src); + if (ret) + return ret; ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, SMU_IH_INTERRUPT_ID_TO_DRIVER, @@ -1590,23 +1657,27 @@ int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu, return 0; } -bool smu_v14_0_baco_is_support(struct smu_context *smu) +int smu_v14_0_get_bamaco_support(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; + int bamaco_support = 0; if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) - return false; + return 0; + + if (smu_baco->maco_support) + bamaco_support |= MACO_SUPPORT; /* return true if ASIC is in BACO state already */ if (smu_v14_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) - return true; + return (bamaco_support |= BACO_SUPPORT); if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) - return false; + return 0; - return true; + return (bamaco_support |= BACO_SUPPORT); } enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu) @@ -1629,7 +1700,7 @@ int smu_v14_0_baco_set_state(struct smu_context *smu, if (state == SMU_BACO_STATE_ENTER) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, - smu_baco->maco_support ? + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? BACO_SEQ_BAMACO : BACO_SEQ_BACO, NULL); } else { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index 20f3861b5e..18abfbd6d0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -383,6 +383,12 @@ static int smu_v14_0_0_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v14_0_0_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v14_0_0_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c new file mode 100644 index 0000000000..90703f4542 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -0,0 +1,1795 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#define SWSMU_CODE_LAYER_L2 + +#include +#include +#include +#include "amdgpu.h" +#include "amdgpu_smu.h" +#include "atomfirmware.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_atombios.h" +#include "smu_v14_0.h" +#include "smu14_driver_if_v14_0.h" +#include "soc15_common.h" +#include "atom.h" +#include "smu_v14_0_2_ppt.h" +#include "smu_v14_0_2_pptable.h" +#include "smu_v14_0_2_ppsmc.h" +#include "mp/mp_14_0_2_offset.h" +#include "mp/mp_14_0_2_sh_mask.h" + +#include "smu_cmn.h" +#include "amdgpu_ras.h" + +/* + * DO NOT use these for err/warn/info/debug messages. + * Use dev_err, dev_warn, dev_info and dev_dbg instead. + * They are more MGPU friendly. + */ +#undef pr_err +#undef pr_warn +#undef pr_info +#undef pr_debug + +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) + +#define FEATURE_MASK(feature) (1ULL << feature) +#define SMC_DPM_FEATURE ( \ + FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_FCLK_BIT)) + +#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 + +static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = { + MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), + MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), + MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), + MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0), + MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0), + MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), + MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), + MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1), + MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1), + MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1), + MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1), + MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), + MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), + MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), + MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), + MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), + MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), + MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), + MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), + MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), + MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), + MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), + MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), + MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), + MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), + MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), + MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), + MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), + MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), + MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), + MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), + MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), + MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), + MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), + MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), + MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), + MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), + MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), + MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), + MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), + MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), + MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), + MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), + MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), + MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), + MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0), + MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel, + PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0), + MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0), + MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), +}; + +static struct cmn2asic_mapping smu_v14_0_2_clk_map[SMU_CLK_COUNT] = { + CLK_MAP(GFXCLK, PPCLK_GFXCLK), + CLK_MAP(SCLK, PPCLK_GFXCLK), + CLK_MAP(SOCCLK, PPCLK_SOCCLK), + CLK_MAP(FCLK, PPCLK_FCLK), + CLK_MAP(UCLK, PPCLK_UCLK), + CLK_MAP(MCLK, PPCLK_UCLK), + CLK_MAP(VCLK, PPCLK_VCLK_0), + CLK_MAP(DCLK, PPCLK_DCLK_0), +}; + +static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] = { + FEA_MAP(FW_DATA_READ), + FEA_MAP(DPM_GFXCLK), + FEA_MAP(DPM_GFX_POWER_OPTIMIZER), + FEA_MAP(DPM_UCLK), + FEA_MAP(DPM_FCLK), + FEA_MAP(DPM_SOCCLK), + FEA_MAP(DPM_LINK), + FEA_MAP(DPM_DCN), + FEA_MAP(VMEMP_SCALING), + FEA_MAP(VDDIO_MEM_SCALING), + FEA_MAP(DS_GFXCLK), + FEA_MAP(DS_SOCCLK), + FEA_MAP(DS_FCLK), + FEA_MAP(DS_LCLK), + FEA_MAP(DS_DCFCLK), + FEA_MAP(DS_UCLK), + FEA_MAP(GFX_ULV), + FEA_MAP(FW_DSTATE), + FEA_MAP(GFXOFF), + FEA_MAP(BACO), + FEA_MAP(MM_DPM), + FEA_MAP(SOC_MPCLK_DS), + FEA_MAP(BACO_MPCLK_DS), + FEA_MAP(THROTTLERS), + FEA_MAP(SMARTSHIFT), + FEA_MAP(GTHR), + FEA_MAP(ACDC), + FEA_MAP(VR0HOT), + FEA_MAP(FW_CTF), + FEA_MAP(FAN_CONTROL), + FEA_MAP(GFX_DCS), + FEA_MAP(GFX_READ_MARGIN), + FEA_MAP(LED_DISPLAY), + FEA_MAP(GFXCLK_SPREAD_SPECTRUM), + FEA_MAP(OUT_OF_BAND_MONITOR), + FEA_MAP(OPTIMIZED_VMIN), + FEA_MAP(GFX_IMU), + FEA_MAP(BOOT_TIME_CAL), + FEA_MAP(GFX_PCC_DFLL), + FEA_MAP(SOC_CG), + FEA_MAP(DF_CSTATE), + FEA_MAP(GFX_EDC), + FEA_MAP(BOOT_POWER_OPT), + FEA_MAP(CLOCK_POWER_DOWN_BYPASS), + FEA_MAP(DS_VCN), + FEA_MAP(BACO_CG), + FEA_MAP(MEM_TEMP_READ), + FEA_MAP(ATHUB_MMHUB_PG), + FEA_MAP(SOC_PCC), + [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, + [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, + [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT}, +}; + +static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = { + TAB_MAP(PPTABLE), + TAB_MAP(WATERMARKS), + TAB_MAP(AVFS_PSM_DEBUG), + TAB_MAP(PMSTATUSLOG), + TAB_MAP(SMU_METRICS), + TAB_MAP(DRIVER_SMU_CONFIG), + TAB_MAP(ACTIVITY_MONITOR_COEFF), + [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, + TAB_MAP(I2C_COMMANDS), + TAB_MAP(ECCINFO), +}; + +static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { + PWR_MAP(AC), + PWR_MAP(DC), +}; + +static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT), +}; + +#if 0 +static const uint8_t smu_v14_0_2_throttler_map[] = { + [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), + [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), + [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT), + [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT), + [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), + [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), + [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT), + [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT), + [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), + [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), + [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), + [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), + [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT), + [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT), + [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT), + [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT), + [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), +}; +#endif + +static int +smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu, + uint32_t *feature_mask, uint32_t num) +{ + struct amdgpu_device *adev = smu->adev; + /*u32 smu_version;*/ + + if (num > 2) + return -EINVAL; + + memset(feature_mask, 0xff, sizeof(uint32_t) * num); + + if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) { + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT); + } +#if 0 + if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) || + !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT); + + if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); + + /* PMFW 78.58 contains a critical fix for gfxoff feature */ + smu_cmn_get_smc_version(smu, NULL, &smu_version); + if ((smu_version < 0x004e3a00) || + !(adev->pm.pp_feature & PP_GFXOFF_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT); + + if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) { + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT); + } + + if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); + + if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT); + } + + if (!(adev->pm.pp_feature & PP_ULV_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT); +#endif + + return 0; +} + +static int smu_v14_0_2_check_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_14_0_2_powerplay_table *powerplay_table = + table_context->power_play_table; + struct smu_baco_context *smu_baco = &smu->smu_baco; + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + const OverDriveLimits_t * const overdrive_lowerlimits = + &pptable->SkuTable.OverDriveLimitsBasicMin; + + if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC) + smu->dc_controlled_by_gpio = true; + + if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_BACO) { + smu_baco->platform_support = true; + + if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_MACO) + smu_baco->maco_support = true; + } + + if (!overdrive_lowerlimits->FeatureCtrlMask || + !overdrive_upperlimits->FeatureCtrlMask) + smu->od_enabled = false; + + table_context->thermal_controller_type = + powerplay_table->thermal_controller_type; + + /* + * Instead of having its own buffer space and get overdrive_table copied, + * smu->od_settings just points to the actual overdrive_table + */ + smu->od_settings = &powerplay_table->overdrive_table; + + smu->adev->pm.no_fan = + !(pptable->PFE_Settings.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT)); + + return 0; +} + +static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_14_0_2_powerplay_table *powerplay_table = + table_context->power_play_table; + + memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, + sizeof(PPTable_t)); + + return 0; +} + +#ifndef atom_smc_dpm_info_table_14_0_0 +struct atom_smc_dpm_info_table_14_0_0 { + struct atom_common_table_header table_header; + BoardTable_t BoardTable; +}; +#endif + +static int smu_v14_0_2_append_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *smc_pptable = table_context->driver_pptable; + struct atom_smc_dpm_info_table_14_0_0 *smc_dpm_table; + BoardTable_t *BoardTable = &smc_pptable->BoardTable; + int index, ret; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + smc_dpm_info); + + ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, + (uint8_t **)&smc_dpm_table); + if (ret) + return ret; + + memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t)); + + return 0; +} + +#if 0 +static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, + void **table, + uint32_t *size) +{ + struct smu_table_context *smu_table = &smu->smu_table; + void *combo_pptable = smu_table->combo_pptable; + int ret = 0; + + ret = smu_cmn_get_combo_pptable(smu); + if (ret) + return ret; + + *table = combo_pptable; + *size = sizeof(struct smu_14_0_powerplay_table); + + return 0; +} +#endif + +static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, + void **table, + uint32_t *size) +{ + struct smu_table_context *smu_table = &smu->smu_table; + void *combo_pptable = smu_table->combo_pptable; + int ret = 0; + + ret = smu_cmn_get_combo_pptable(smu); + if (ret) + return ret; + + *table = combo_pptable; + *size = sizeof(struct smu_14_0_2_powerplay_table); + + return 0; +} + +static int smu_v14_0_2_setup_pptable(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct amdgpu_device *adev = smu->adev; + int ret = 0; + + if (amdgpu_sriov_vf(smu->adev)) + return 0; + + if (!adev->scpm_enabled) + ret = smu_v14_0_setup_pptable(smu); + else + ret = smu_v14_0_2_get_pptable_from_pmfw(smu, + &smu_table->power_play_table, + &smu_table->power_play_table_size); + if (ret) + return ret; + + ret = smu_v14_0_2_store_powerplay_table(smu); + if (ret) + return ret; + + /* + * With SCPM enabled, the operation below will be handled + * by PSP. Driver involvment is unnecessary and useless. + */ + if (!adev->scpm_enabled) { + ret = smu_v14_0_2_append_powerplay_table(smu); + if (ret) + return ret; + } + + ret = smu_v14_0_2_check_powerplay_table(smu); + if (ret) + return ret; + + return ret; +} + +static int smu_v14_0_2_tables_init(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *tables = smu_table->tables; + + SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, + sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + + smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); + if (!smu_table->metrics_table) + goto err0_out; + smu_table->metrics_time = 0; + + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); + smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); + if (!smu_table->gpu_metrics_table) + goto err1_out; + + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); + if (!smu_table->watermarks_table) + goto err2_out; + + smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL); + if (!smu_table->ecc_table) + goto err3_out; + + return 0; + +err3_out: + kfree(smu_table->watermarks_table); +err2_out: + kfree(smu_table->gpu_metrics_table); +err1_out: + kfree(smu_table->metrics_table); +err0_out: + return -ENOMEM; +} + +static int smu_v14_0_2_allocate_dpm_context(struct smu_context *smu) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + smu_dpm->dpm_context = kzalloc(sizeof(struct smu_14_0_dpm_context), + GFP_KERNEL); + if (!smu_dpm->dpm_context) + return -ENOMEM; + + smu_dpm->dpm_context_size = sizeof(struct smu_14_0_dpm_context); + + return 0; +} + +static int smu_v14_0_2_init_smc_tables(struct smu_context *smu) +{ + int ret = 0; + + ret = smu_v14_0_2_tables_init(smu); + if (ret) + return ret; + + ret = smu_v14_0_2_allocate_dpm_context(smu); + if (ret) + return ret; + + return smu_v14_0_init_smc_tables(smu); +} + +static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) +{ + struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + struct smu_14_0_dpm_table *dpm_table; + struct smu_14_0_pcie_table *pcie_table; + uint32_t link_level; + int ret = 0; + + /* socclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.soc_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_SOCCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* gfxclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.gfx_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_GFXCLK, + dpm_table); + if (ret) + return ret; + + /* + * Update the reported maximum shader clock to the value + * which can be guarded to be achieved on all cards. This + * is aligned with Window setting. And considering that value + * might be not the peak frequency the card can achieve, it + * is normal some real-time clock frequency can overtake this + * labelled maximum clock frequency(for example in pp_dpm_sclk + * sysfs output). + */ + if (skutable->DriverReportedClocks.GameClockAc && + (dpm_table->dpm_levels[dpm_table->count - 1].value > + skutable->DriverReportedClocks.GameClockAc)) { + dpm_table->dpm_levels[dpm_table->count - 1].value = + skutable->DriverReportedClocks.GameClockAc; + dpm_table->max = skutable->DriverReportedClocks.GameClockAc; + } + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* uclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.uclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_UCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* fclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.fclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_FCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* vclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.vclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_VCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* dclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.dclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_DCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* lclk dpm table setup */ + pcie_table = &dpm_context->dpm_tables.pcie_table; + pcie_table->num_of_link_levels = 0; + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + + return 0; +} + +static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu) +{ + int ret = 0; + uint64_t feature_enabled; + + ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); + if (ret) + return false; + + return !!(feature_enabled & SMC_DPM_FEATURE); +} + +static void smu_v14_0_2_dump_pptable(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + PFE_Settings_t *PFEsettings = &pptable->PFE_Settings; + + dev_info(smu->adev->dev, "Dumped PPTable:\n"); + + dev_info(smu->adev->dev, "Version = 0x%08x\n", PFEsettings->Version); + dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", PFEsettings->FeaturesToRun[0]); + dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", PFEsettings->FeaturesToRun[1]); +} + +static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics) +{ + uint32_t throttler_status = 0; + int i; + + for (i = 0; i < THROTTLER_COUNT; i++) + throttler_status |= + (metrics->ThrottlingPercentage[i] ? 1U << i : 0); + + return throttler_status; +} + +#define SMU_14_0_2_BUSY_THRESHOLD 5 +static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table = &smu->smu_table; + SmuMetrics_t *metrics = + &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); + int ret = 0; + + ret = smu_cmn_get_metrics_table(smu, + NULL, + false); + if (ret) + return ret; + + switch (member) { + case METRICS_CURR_GFXCLK: + *value = metrics->CurrClock[PPCLK_GFXCLK]; + break; + case METRICS_CURR_SOCCLK: + *value = metrics->CurrClock[PPCLK_SOCCLK]; + break; + case METRICS_CURR_UCLK: + *value = metrics->CurrClock[PPCLK_UCLK]; + break; + case METRICS_CURR_VCLK: + *value = metrics->CurrClock[PPCLK_VCLK_0]; + break; + case METRICS_CURR_DCLK: + *value = metrics->CurrClock[PPCLK_DCLK_0]; + break; + case METRICS_CURR_FCLK: + *value = metrics->CurrClock[PPCLK_FCLK]; + break; + case METRICS_CURR_DCEFCLK: + *value = metrics->CurrClock[PPCLK_DCFCLK]; + break; + case METRICS_AVERAGE_GFXCLK: + if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD) + *value = metrics->AverageGfxclkFrequencyPostDs; + else + *value = metrics->AverageGfxclkFrequencyPreDs; + break; + case METRICS_AVERAGE_FCLK: + if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) + *value = metrics->AverageFclkFrequencyPostDs; + else + *value = metrics->AverageFclkFrequencyPreDs; + break; + case METRICS_AVERAGE_UCLK: + if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) + *value = metrics->AverageMemclkFrequencyPostDs; + else + *value = metrics->AverageMemclkFrequencyPreDs; + break; + case METRICS_AVERAGE_VCLK: + *value = metrics->AverageVclk0Frequency; + break; + case METRICS_AVERAGE_DCLK: + *value = metrics->AverageDclk0Frequency; + break; + case METRICS_AVERAGE_VCLK1: + *value = metrics->AverageVclk1Frequency; + break; + case METRICS_AVERAGE_DCLK1: + *value = metrics->AverageDclk1Frequency; + break; + case METRICS_AVERAGE_GFXACTIVITY: + *value = metrics->AverageGfxActivity; + break; + case METRICS_AVERAGE_MEMACTIVITY: + *value = metrics->AverageUclkActivity; + break; + case METRICS_AVERAGE_SOCKETPOWER: + *value = metrics->AverageSocketPower << 8; + break; + case METRICS_TEMPERATURE_EDGE: + *value = metrics->AvgTemperature[TEMP_EDGE] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = metrics->AvgTemperature[TEMP_HOTSPOT] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_MEM: + *value = metrics->AvgTemperature[TEMP_MEM] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRGFX: + *value = metrics->AvgTemperature[TEMP_VR_GFX] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRSOC: + *value = metrics->AvgTemperature[TEMP_VR_SOC] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_THROTTLER_STATUS: + *value = smu_v14_0_2_get_throttler_status(metrics); + break; + case METRICS_CURR_FANSPEED: + *value = metrics->AvgFanRpm; + break; + case METRICS_CURR_FANPWM: + *value = metrics->AvgFanPwm; + break; + case METRICS_VOLTAGE_VDDGFX: + *value = metrics->AvgVoltage[SVI_PLANE_VDD_GFX]; + break; + case METRICS_PCIE_RATE: + *value = metrics->PcieRate; + break; + case METRICS_PCIE_WIDTH: + *value = metrics->PcieWidth; + break; + default: + *value = UINT_MAX; + break; + } + + return ret; +} + +static int smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, + uint32_t *max) +{ + struct smu_14_0_dpm_context *dpm_context = + smu->smu_dpm.dpm_context; + struct smu_14_0_dpm_table *dpm_table; + + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + /* uclk dpm table */ + dpm_table = &dpm_context->dpm_tables.uclk_table; + break; + case SMU_GFXCLK: + case SMU_SCLK: + /* gfxclk dpm table */ + dpm_table = &dpm_context->dpm_tables.gfx_table; + break; + case SMU_SOCCLK: + /* socclk dpm table */ + dpm_table = &dpm_context->dpm_tables.soc_table; + break; + case SMU_FCLK: + /* fclk dpm table */ + dpm_table = &dpm_context->dpm_tables.fclk_table; + break; + case SMU_VCLK: + case SMU_VCLK1: + /* vclk dpm table */ + dpm_table = &dpm_context->dpm_tables.vclk_table; + break; + case SMU_DCLK: + case SMU_DCLK1: + /* dclk dpm table */ + dpm_table = &dpm_context->dpm_tables.dclk_table; + break; + default: + dev_err(smu->adev->dev, "Unsupported clock type!\n"); + return -EINVAL; + } + + if (min) + *min = dpm_table->min; + if (max) + *max = dpm_table->max; + + return 0; +} + +static int smu_v14_0_2_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, + uint32_t *size) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *smc_pptable = table_context->driver_pptable; + int ret = 0; + + switch (sensor) { + case AMDGPU_PP_SENSOR_MAX_FAN_RPM: + *(uint16_t *)data = smc_pptable->CustomSkuTable.FanMaximumRpm; + *size = 4; + break; + case AMDGPU_PP_SENSOR_MEM_LOAD: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_MEMACTIVITY, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_GFXACTIVITY, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_SOCKETPOWER, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_HOTSPOT, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_EDGE_TEMP: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_EDGE, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_MEM_TEMP: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_MEM, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_MCLK: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_CURR_UCLK, + (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_SCLK: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_GFXCLK, + (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_VDDGFX: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_VOLTAGE_VDDGFX, + (uint32_t *)data); + *size = 4; + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + MetricsMember_t member_type; + int clk_id = 0; + + clk_id = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_CLK, + clk_type); + if (clk_id < 0) + return -EINVAL; + + switch (clk_id) { + case PPCLK_GFXCLK: + member_type = METRICS_AVERAGE_GFXCLK; + break; + case PPCLK_UCLK: + member_type = METRICS_CURR_UCLK; + break; + case PPCLK_FCLK: + member_type = METRICS_CURR_FCLK; + break; + case PPCLK_SOCCLK: + member_type = METRICS_CURR_SOCCLK; + break; + case PPCLK_VCLK_0: + member_type = METRICS_AVERAGE_VCLK; + break; + case PPCLK_DCLK_0: + member_type = METRICS_AVERAGE_DCLK; + break; + default: + return -EINVAL; + } + + return smu_v14_0_2_get_smu_metrics_data(smu, + member_type, + value); +} + +static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + char *buf) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct smu_14_0_dpm_table *single_dpm_table; + int i, curr_freq, size = 0; + int ret = 0; + + smu_cmn_get_sysfs_buf(&buf, &size); + + if (amdgpu_ras_intr_triggered()) { + size += sysfs_emit_at(buf, size, "unavailable\n"); + return size; + } + + switch (clk_type) { + case SMU_SCLK: + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + break; + case SMU_MCLK: + single_dpm_table = &(dpm_context->dpm_tables.uclk_table); + break; + case SMU_SOCCLK: + single_dpm_table = &(dpm_context->dpm_tables.soc_table); + break; + case SMU_FCLK: + single_dpm_table = &(dpm_context->dpm_tables.fclk_table); + break; + case SMU_VCLK: + case SMU_VCLK1: + single_dpm_table = &(dpm_context->dpm_tables.vclk_table); + break; + case SMU_DCLK: + case SMU_DCLK1: + single_dpm_table = &(dpm_context->dpm_tables.dclk_table); + break; + default: + break; + } + + switch (clk_type) { + case SMU_SCLK: + case SMU_MCLK: + case SMU_SOCCLK: + case SMU_FCLK: + case SMU_VCLK: + case SMU_VCLK1: + case SMU_DCLK: + case SMU_DCLK1: + ret = smu_v14_0_2_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); + if (ret) { + dev_err(smu->adev->dev, "Failed to get current clock freq!"); + return ret; + } + + if (single_dpm_table->is_fine_grained) { + /* + * For fine grained dpms, there are only two dpm levels: + * - level 0 -> min clock freq + * - level 1 -> max clock freq + * And the current clock frequency can be any value between them. + * So, if the current clock frequency is not at level 0 or level 1, + * we will fake it as three dpm levels: + * - level 0 -> min clock freq + * - level 1 -> current actual clock freq + * - level 2 -> max clock freq + */ + if ((single_dpm_table->dpm_levels[0].value != curr_freq) && + (single_dpm_table->dpm_levels[1].value != curr_freq)) { + size += sysfs_emit_at(buf, size, "0: %uMhz\n", + single_dpm_table->dpm_levels[0].value); + size += sysfs_emit_at(buf, size, "1: %uMhz *\n", + curr_freq); + size += sysfs_emit_at(buf, size, "2: %uMhz\n", + single_dpm_table->dpm_levels[1].value); + } else { + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", + single_dpm_table->dpm_levels[0].value, + single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + single_dpm_table->dpm_levels[1].value, + single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : ""); + } + } else { + for (i = 0; i < single_dpm_table->count; i++) + size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + i, single_dpm_table->dpm_levels[i].value, + single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : ""); + } + break; + case SMU_PCIE: + // TODO + break; + + default: + break; + } + + return size; +} + +static int smu_v14_0_2_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t mask) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct smu_14_0_dpm_table *single_dpm_table; + uint32_t soft_min_level, soft_max_level; + uint32_t min_freq, max_freq; + int ret = 0; + + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + break; + case SMU_MCLK: + case SMU_UCLK: + single_dpm_table = &(dpm_context->dpm_tables.uclk_table); + break; + case SMU_SOCCLK: + single_dpm_table = &(dpm_context->dpm_tables.soc_table); + break; + case SMU_FCLK: + single_dpm_table = &(dpm_context->dpm_tables.fclk_table); + break; + case SMU_VCLK: + case SMU_VCLK1: + single_dpm_table = &(dpm_context->dpm_tables.vclk_table); + break; + case SMU_DCLK: + case SMU_DCLK1: + single_dpm_table = &(dpm_context->dpm_tables.dclk_table); + break; + default: + break; + } + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + case SMU_MCLK: + case SMU_UCLK: + case SMU_SOCCLK: + case SMU_FCLK: + case SMU_VCLK: + case SMU_VCLK1: + case SMU_DCLK: + case SMU_DCLK1: + if (single_dpm_table->is_fine_grained) { + /* There is only 2 levels for fine grained DPM */ + soft_max_level = (soft_max_level >= 1 ? 1 : 0); + soft_min_level = (soft_min_level >= 1 ? 1 : 0); + } else { + if ((soft_max_level >= single_dpm_table->count) || + (soft_min_level >= single_dpm_table->count)) + return -EINVAL; + } + + min_freq = single_dpm_table->dpm_levels[soft_min_level].value; + max_freq = single_dpm_table->dpm_levels[soft_max_level].value; + + ret = smu_v14_0_set_soft_freq_limited_range(smu, + clk_type, + min_freq, + max_freq); + break; + case SMU_DCEFCLK: + case SMU_PCIE: + default: + break; + } + + return ret; +} + +static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, + uint8_t pcie_gen_cap, + uint8_t pcie_width_cap) +{ + struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_14_0_pcie_table *pcie_table = + &dpm_context->dpm_tables.pcie_table; + uint32_t smu_pcie_arg; + int ret, i; + + for (i = 0; i < pcie_table->num_of_link_levels; i++) { + if (pcie_table->pcie_gen[i] > pcie_gen_cap) + pcie_table->pcie_gen[i] = pcie_gen_cap; + if (pcie_table->pcie_lane[i] > pcie_width_cap) + pcie_table->pcie_lane[i] = pcie_width_cap; + + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + return ret; + } + + return 0; +} + +static int smu_v14_0_2_get_thermal_temperature_range(struct smu_context *smu, + struct smu_temperature_range *range) +{ + // TODO + + return 0; +} + +static int smu_v14_0_2_populate_umd_state_clk(struct smu_context *smu) +{ + // TODO + + return 0; +} + +static void smu_v14_0_2_get_unique_id(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + SmuMetrics_t *metrics = + &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); + struct amdgpu_device *adev = smu->adev; + uint32_t upper32 = 0, lower32 = 0; + int ret; + + ret = smu_cmn_get_metrics_table(smu, NULL, false); + if (ret) + goto out; + + upper32 = metrics->PublicSerialNumberUpper; + lower32 = metrics->PublicSerialNumberLower; + +out: + adev->unique_id = ((uint64_t)upper32 << 32) | lower32; +} + +static int smu_v14_0_2_get_power_limit(struct smu_context *smu, + uint32_t *current_power_limit, + uint32_t *default_power_limit, + uint32_t *max_power_limit, + uint32_t *min_power_limit) +{ + // TODO + + return 0; +} + +static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu, + char *buf) +{ + DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; + DpmActivityMonitorCoeffInt_t *activity_monitor = + &(activity_monitor_external.DpmActivityMonitorCoeffInt); + static const char *title[] = { + "PROFILE_INDEX(NAME)", + "CLOCK_TYPE(NAME)", + "FPS", + "MinActiveFreqType", + "MinActiveFreq", + "BoosterFreqType", + "BoosterFreq", + "PD_Data_limit_c", + "PD_Data_error_coeff", + "PD_Data_error_rate_coeff"}; + int16_t workload_type = 0; + uint32_t i, size = 0; + int result = 0; + + if (!buf) + return -EINVAL; + + size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n", + title[0], title[1], title[2], title[3], title[4], title[5], + title[6], title[7], title[8], title[9]); + + for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + i); + if (workload_type == -ENOTSUPP) + continue; + else if (workload_type < 0) + return -EINVAL; + + result = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + workload_type, + (void *)(&activity_monitor_external), + false); + if (result) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return result; + } + + size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", + i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + + size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 0, + "GFXCLK", + activity_monitor->Gfx_FPS, + activity_monitor->Gfx_MinActiveFreqType, + activity_monitor->Gfx_MinActiveFreq, + activity_monitor->Gfx_BoosterFreqType, + activity_monitor->Gfx_BoosterFreq, + activity_monitor->Gfx_PD_Data_limit_c, + activity_monitor->Gfx_PD_Data_error_coeff, + activity_monitor->Gfx_PD_Data_error_rate_coeff); + + size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 1, + "FCLK", + activity_monitor->Fclk_FPS, + activity_monitor->Fclk_MinActiveFreqType, + activity_monitor->Fclk_MinActiveFreq, + activity_monitor->Fclk_BoosterFreqType, + activity_monitor->Fclk_BoosterFreq, + activity_monitor->Fclk_PD_Data_limit_c, + activity_monitor->Fclk_PD_Data_error_coeff, + activity_monitor->Fclk_PD_Data_error_rate_coeff); + } + + return size; +} + +static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, + long *input, + uint32_t size) +{ + DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; + DpmActivityMonitorCoeffInt_t *activity_monitor = + &(activity_monitor_external.DpmActivityMonitorCoeffInt); + int workload_type, ret = 0; + + smu->power_profile_mode = input[size]; + + if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { + dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); + return -EINVAL; + } + + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } + + switch (input[0]) { + case 0: /* Gfxclk */ + activity_monitor->Gfx_FPS = input[1]; + activity_monitor->Gfx_MinActiveFreqType = input[2]; + activity_monitor->Gfx_MinActiveFreq = input[3]; + activity_monitor->Gfx_BoosterFreqType = input[4]; + activity_monitor->Gfx_BoosterFreq = input[5]; + activity_monitor->Gfx_PD_Data_limit_c = input[6]; + activity_monitor->Gfx_PD_Data_error_coeff = input[7]; + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; + break; + case 1: /* Fclk */ + activity_monitor->Fclk_FPS = input[1]; + activity_monitor->Fclk_MinActiveFreqType = input[2]; + activity_monitor->Fclk_MinActiveFreq = input[3]; + activity_monitor->Fclk_BoosterFreqType = input[4]; + activity_monitor->Fclk_BoosterFreq = input[5]; + activity_monitor->Fclk_PD_Data_limit_c = input[6]; + activity_monitor->Fclk_PD_Data_error_coeff = input[7]; + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; + break; + } + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } + } + + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + smu->power_profile_mode); + if (workload_type < 0) + return -EINVAL; + + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetWorkloadMask, + 1 << workload_type, + NULL); +} + +static int smu_v14_0_2_baco_enter(struct smu_context *smu) +{ + struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) + return smu_v14_0_baco_set_armd3_sequence(smu, + smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); + else + return smu_v14_0_baco_enter(smu); +} + +static int smu_v14_0_2_baco_exit(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { + /* Wait for PMFW handling for the Dstate change */ + usleep_range(10000, 11000); + return smu_v14_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); + } else { + return smu_v14_0_baco_exit(smu); + } +} + +static bool smu_v14_0_2_is_mode1_reset_supported(struct smu_context *smu) +{ + // TODO + + return true; +} + +static int smu_v14_0_2_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msg, int num_msgs) +{ + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); + struct amdgpu_device *adev = smu_i2c->adev; + struct smu_context *smu = adev->powerplay.pp_handle; + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *table = &smu_table->driver_table; + SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; + int i, j, r, c; + u16 dir; + + if (!adev->pm.dpm_enabled) + return -EBUSY; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + req->I2CcontrollerPort = smu_i2c->port; + req->I2CSpeed = I2C_SPEED_FAST_400K; + req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ + dir = msg[0].flags & I2C_M_RD; + + for (c = i = 0; i < num_msgs; i++) { + for (j = 0; j < msg[i].len; j++, c++) { + SwI2cCmd_t *cmd = &req->SwI2cCmds[c]; + + if (!(msg[i].flags & I2C_M_RD)) { + /* write */ + cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK; + cmd->ReadWriteData = msg[i].buf[j]; + } + + if ((dir ^ msg[i].flags) & I2C_M_RD) { + /* The direction changes. + */ + dir = msg[i].flags & I2C_M_RD; + cmd->CmdConfig |= CMDCONFIG_RESTART_MASK; + } + + req->NumCmds++; + + /* + * Insert STOP if we are at the last byte of either last + * message for the transaction or the client explicitly + * requires a STOP at this particular message. + */ + if ((j == msg[i].len - 1) && + ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) { + cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK; + cmd->CmdConfig |= CMDCONFIG_STOP_MASK; + } + } + } + mutex_lock(&adev->pm.mutex); + r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); + mutex_unlock(&adev->pm.mutex); + if (r) + goto fail; + + for (c = i = 0; i < num_msgs; i++) { + if (!(msg[i].flags & I2C_M_RD)) { + c += msg[i].len; + continue; + } + for (j = 0; j < msg[i].len; j++, c++) { + SwI2cCmd_t *cmd = &res->SwI2cCmds[c]; + + msg[i].buf[j] = cmd->ReadWriteData; + } + } + r = num_msgs; +fail: + kfree(req); + return r; +} + +static u32 smu_v14_0_2_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm smu_v14_0_2_i2c_algo = { + .master_xfer = smu_v14_0_2_i2c_xfer, + .functionality = smu_v14_0_2_i2c_func, +}; + +static const struct i2c_adapter_quirks smu_v14_0_2_i2c_control_quirks = { + .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN, + .max_read_len = MAX_SW_I2C_COMMANDS, + .max_write_len = MAX_SW_I2C_COMMANDS, + .max_comb_1st_msg_len = 2, + .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2, +}; + +static int smu_v14_0_2_i2c_control_init(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int res, i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + smu_i2c->adev = adev; + smu_i2c->port = i; + mutex_init(&smu_i2c->mutex); + control->owner = THIS_MODULE; + control->dev.parent = &adev->pdev->dev; + control->algo = &smu_v14_0_2_i2c_algo; + snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); + control->quirks = &smu_v14_0_2_i2c_control_quirks; + i2c_set_adapdata(control, smu_i2c); + + res = i2c_add_adapter(control); + if (res) { + DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + goto Out_err; + } + } + + /* assign the buses used for the FRU EEPROM and RAS EEPROM */ + /* XXX ideally this would be something in a vbios data table */ + adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; + adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; + + return 0; +Out_err: + for ( ; i >= 0; i--) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + i2c_del_adapter(control); + } + return res; +} + +static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + i2c_del_adapter(control); + } + adev->pm.ras_eeprom_i2c_bus = NULL; + adev->pm.fru_eeprom_i2c_bus = NULL; +} + +static int smu_v14_0_2_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state) +{ + int ret; + + switch (mp1_state) { + case PP_MP1_STATE_UNLOAD: + ret = smu_cmn_set_mp1_state(smu, mp1_state); + break; + default: + /* Ignore others */ + ret = 0; + } + + return ret; +} + +static int smu_v14_0_2_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_DFCstateControl, + state, + NULL); +} + +static int smu_v14_0_2_mode1_reset(struct smu_context *smu) +{ + int ret = 0; + + // TODO + + return ret; +} + +static int smu_v14_0_2_mode2_reset(struct smu_context *smu) +{ + int ret = 0; + + // TODO + + return ret; +} + +static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(14, 0, 2)) + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures, + FEATURE_PWR_GFX, NULL); + else + return -EOPNOTSUPP; +} + +static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + smu->param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_82); + smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_66); + smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_90); +} + +static int smu_v14_0_2_smu_send_bad_mem_page_num(struct smu_context *smu, + uint32_t size) +{ + int ret = 0; + + /* message SMU to update the bad page number on SMUBUS */ + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetNumBadMemoryPagesRetired, + size, NULL); + if (ret) + dev_err(smu->adev->dev, + "[%s] failed to message SMU to update bad memory pages number\n", + __func__); + + return ret; +} + +static int smu_v14_0_2_send_bad_mem_channel_flag(struct smu_context *smu, + uint32_t size) +{ + int ret = 0; + + /* message SMU to update the bad channel info on SMUBUS */ + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, + size, NULL); + if (ret) + dev_err(smu->adev->dev, + "[%s] failed to message SMU to update bad memory pages channel info\n", + __func__); + + return ret; +} + +static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu, + void *table) +{ + int ret = 0; + + // TODO + + return ret; +} + +static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { + .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask, + .set_default_dpm_table = smu_v14_0_2_set_default_dpm_table, + .i2c_init = smu_v14_0_2_i2c_control_init, + .i2c_fini = smu_v14_0_2_i2c_control_fini, + .is_dpm_running = smu_v14_0_2_is_dpm_running, + .dump_pptable = smu_v14_0_2_dump_pptable, + .init_microcode = smu_v14_0_init_microcode, + .load_microcode = smu_v14_0_load_microcode, + .fini_microcode = smu_v14_0_fini_microcode, + .init_smc_tables = smu_v14_0_2_init_smc_tables, + .fini_smc_tables = smu_v14_0_fini_smc_tables, + .init_power = smu_v14_0_init_power, + .fini_power = smu_v14_0_fini_power, + .check_fw_status = smu_v14_0_check_fw_status, + .setup_pptable = smu_v14_0_2_setup_pptable, + .check_fw_version = smu_v14_0_check_fw_version, + .write_pptable = smu_cmn_write_pptable, + .set_driver_table_location = smu_v14_0_set_driver_table_location, + .system_features_control = smu_v14_0_system_features_control, + .set_allowed_mask = smu_v14_0_set_allowed_mask, + .get_enabled_mask = smu_cmn_get_enabled_mask, + .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable, + .dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable, + .get_dpm_ultimate_freq = smu_v14_0_2_get_dpm_ultimate_freq, + .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values, + .read_sensor = smu_v14_0_2_read_sensor, + .feature_is_enabled = smu_cmn_feature_is_enabled, + .print_clk_levels = smu_v14_0_2_print_clk_levels, + .force_clk_levels = smu_v14_0_2_force_clk_levels, + .update_pcie_parameters = smu_v14_0_2_update_pcie_parameters, + .get_thermal_temperature_range = smu_v14_0_2_get_thermal_temperature_range, + .register_irq_handler = smu_v14_0_register_irq_handler, + .notify_memory_pool_location = smu_v14_0_notify_memory_pool_location, + .set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range, + .init_pptable_microcode = smu_v14_0_init_pptable_microcode, + .populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk, + .set_performance_level = smu_v14_0_set_performance_level, + .gfx_off_control = smu_v14_0_gfx_off_control, + .get_unique_id = smu_v14_0_2_get_unique_id, + .get_power_limit = smu_v14_0_2_get_power_limit, + .set_power_limit = smu_v14_0_set_power_limit, + .set_power_source = smu_v14_0_set_power_source, + .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode, + .set_power_profile_mode = smu_v14_0_2_set_power_profile_mode, + .run_btc = smu_v14_0_run_btc, + .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, + .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, + .set_tool_table_location = smu_v14_0_set_tool_table_location, + .deep_sleep_control = smu_v14_0_deep_sleep_control, + .gfx_ulv_control = smu_v14_0_gfx_ulv_control, + .get_bamaco_support = smu_v14_0_get_bamaco_support, + .baco_get_state = smu_v14_0_baco_get_state, + .baco_set_state = smu_v14_0_baco_set_state, + .baco_enter = smu_v14_0_2_baco_enter, + .baco_exit = smu_v14_0_2_baco_exit, + .mode1_reset_is_support = smu_v14_0_2_is_mode1_reset_supported, + .mode1_reset = smu_v14_0_2_mode1_reset, + .mode2_reset = smu_v14_0_2_mode2_reset, + .enable_gfx_features = smu_v14_0_2_enable_gfx_features, + .set_mp1_state = smu_v14_0_2_set_mp1_state, + .set_df_cstate = smu_v14_0_2_set_df_cstate, + .send_hbm_bad_pages_num = smu_v14_0_2_smu_send_bad_mem_page_num, + .send_hbm_bad_channel_flag = smu_v14_0_2_send_bad_mem_channel_flag, + .gpo_control = smu_v14_0_gpo_control, + .get_ecc_info = smu_v14_0_2_get_ecc_info, +}; + +void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu) +{ + smu->ppt_funcs = &smu_v14_0_2_ppt_funcs; + smu->message_map = smu_v14_0_2_message_map; + smu->clock_map = smu_v14_0_2_clk_map; + smu->feature_map = smu_v14_0_2_feature_mask_map; + smu->table_map = smu_v14_0_2_table_map; + smu->pwr_src_map = smu_v14_0_2_pwr_src_map; + smu->workload_map = smu_v14_0_2_workload_map; + smu_v14_0_2_set_smu_mailbox_registers(smu); +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h new file mode 100644 index 0000000000..b83729e5d6 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h @@ -0,0 +1,28 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU_V14_0_2_PPT_H__ +#define __SMU_V14_0_2_PPT_H__ + +extern void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu); + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index b8dbd4e253..6d1c3af927 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -235,6 +235,50 @@ static void __smu_cmn_send_msg(struct smu_context *smu, WREG32(smu->msg_reg, msg); } +static inline uint32_t __smu_cmn_get_msg_flags(struct smu_context *smu, + enum smu_message_type msg) +{ + return smu->message_map[msg].flags; +} + +static int __smu_cmn_ras_filter_msg(struct smu_context *smu, + enum smu_message_type msg, bool *poll) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t flags, resp; + bool fed_status; + + flags = __smu_cmn_get_msg_flags(smu, msg); + *poll = true; + + /* When there is RAS fatal error, FW won't process non-RAS priority + * messages. Don't allow any messages other than RAS priority messages. + */ + fed_status = amdgpu_ras_get_fed_status(adev); + if (fed_status) { + if (!(flags & SMU_MSG_RAS_PRI)) { + dev_dbg(adev->dev, + "RAS error detected, skip sending %s", + smu_get_message_name(smu, msg)); + return -EACCES; + } + + /* FW will ignore non-priority messages when a RAS fatal error + * is detected. Hence it is possible that a previous message + * wouldn't have got response. Allow to continue without polling + * for response status for priority messages. + */ + resp = RREG32(smu->resp_reg); + dev_dbg(adev->dev, + "Sending RAS priority message %s response status: %x", + smu_get_message_name(smu, msg), resp); + if (resp == 0) + *poll = false; + } + + return 0; +} + static int __smu_cmn_send_debug_msg(struct smu_context *smu, u32 msg, u32 param) @@ -354,6 +398,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, { struct amdgpu_device *adev = smu->adev; int res, index; + bool poll = true; u32 reg; if (adev->no_hw_access) @@ -366,12 +411,20 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, return index == -EACCES ? 0 : index; mutex_lock(&smu->message_lock); - reg = __smu_cmn_poll_stat(smu); - res = __smu_cmn_reg2errno(smu, reg); - if (reg == SMU_RESP_NONE || - res == -EREMOTEIO) { - __smu_cmn_reg_print_error(smu, reg, index, param, msg); - goto Out; + + if (smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI) { + res = __smu_cmn_ras_filter_msg(smu, msg, &poll); + if (res) + goto Out; + } + + if (poll) { + reg = __smu_cmn_poll_stat(smu); + res = __smu_cmn_reg2errno(smu, reg); + if (reg == SMU_RESP_NONE || res == -EREMOTEIO) { + __smu_cmn_reg_print_error(smu, reg, index, param, msg); + goto Out; + } } __smu_cmn_send_msg(smu, (uint16_t) index, param); reg = __smu_cmn_poll_stat(smu); @@ -437,7 +490,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu, return -EINVAL; if (amdgpu_sriov_vf(smu->adev) && - !msg_mapping.valid_in_vf) + !(msg_mapping.flags & SMU_MSG_VF_FLAG)) return -EACCES; return msg_mapping.map_to; diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile index 1931a7fa1a..cf5287fcbb 100644 --- a/drivers/gpu/drm/arm/display/komeda/Makefile +++ b/drivers/gpu/drm/arm/display/komeda/Makefile @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 ccflags-y := \ - -I $(srctree)/$(src)/../include \ - -I $(srctree)/$(src) + -I $(src)/../include \ + -I $(src) komeda-y := \ komeda_drv.o \ diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c index 42510fdea2..67e5d3b419 100644 --- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c @@ -4,6 +4,8 @@ * Author: James.Qian.Wang * */ + +#include #include "d71_dev.h" #include "komeda_kms.h" #include "malidp_io.h" diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c index d8e449e6eb..50cb8f7ee6 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c @@ -72,11 +72,6 @@ struct gamma_curve_sector { u32 segment_width; }; -struct gamma_curve_segment { - u32 start; - u32 end; -}; - static struct gamma_curve_sector sector_tbl[] = { { 0, 4, 4 }, { 16, 4, 4 }, diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index 2c661f2841..b645c59982 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -5,6 +5,7 @@ * */ #include +#include #include #include @@ -610,12 +611,34 @@ get_crtc_primary(struct komeda_kms_dev *kms, struct komeda_crtc *crtc) return NULL; } +static int komeda_attach_bridge(struct device *dev, + struct komeda_pipeline *pipe, + struct drm_encoder *encoder) +{ + struct drm_bridge *bridge; + int err; + + bridge = devm_drm_of_get_bridge(dev, pipe->of_node, + KOMEDA_OF_PORT_OUTPUT, 0); + if (IS_ERR(bridge)) + return dev_err_probe(dev, PTR_ERR(bridge), "remote bridge not found for pipe: %s\n", + of_node_full_name(pipe->of_node)); + + err = drm_bridge_attach(encoder, bridge, NULL, 0); + if (err) + dev_err(dev, "bridge_attach() failed for pipe: %s\n", + of_node_full_name(pipe->of_node)); + + return err; +} + static int komeda_crtc_add(struct komeda_kms_dev *kms, struct komeda_crtc *kcrtc) { struct drm_crtc *crtc = &kcrtc->base; struct drm_device *base = &kms->base; - struct drm_bridge *bridge; + struct komeda_pipeline *pipe = kcrtc->master; + struct drm_encoder *encoder = &kcrtc->encoder; int err; err = drm_crtc_init_with_planes(base, crtc, @@ -626,27 +649,25 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms, drm_crtc_helper_add(crtc, &komeda_crtc_helper_funcs); - crtc->port = kcrtc->master->of_output_port; + crtc->port = pipe->of_output_port; /* Construct an encoder for each pipeline and attach it to the remote * bridge */ kcrtc->encoder.possible_crtcs = drm_crtc_mask(crtc); - err = drm_simple_encoder_init(base, &kcrtc->encoder, - DRM_MODE_ENCODER_TMDS); + err = drm_simple_encoder_init(base, encoder, DRM_MODE_ENCODER_TMDS); if (err) return err; - bridge = devm_drm_of_get_bridge(base->dev, kcrtc->master->of_node, - KOMEDA_OF_PORT_OUTPUT, 0); - if (IS_ERR(bridge)) - return PTR_ERR(bridge); - - err = drm_bridge_attach(&kcrtc->encoder, bridge, NULL, 0); + if (pipe->of_output_links[0]) { + err = komeda_attach_bridge(base->dev, pipe, encoder); + if (err) + return err; + } drm_crtc_enable_color_mgmt(crtc, 0, true, KOMEDA_COLOR_LUT_SIZE); - return err; + return 0; } int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev) diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c index 14ee79beca..5ba62e637a 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c @@ -12,10 +12,8 @@ #include #include #include -#ifdef CONFIG_DEBUG_FS #include #include -#endif #include @@ -43,7 +41,6 @@ static int komeda_register_show(struct seq_file *sf, void *x) DEFINE_SHOW_ATTRIBUTE(komeda_register); -#ifdef CONFIG_DEBUG_FS static void komeda_debugfs_init(struct komeda_dev *mdev) { if (!debugfs_initialized()) @@ -55,7 +52,6 @@ static void komeda_debugfs_init(struct komeda_dev *mdev) debugfs_create_x16("err_verbosity", 0664, mdev->debugfs_root, &mdev->err_verbosity); } -#endif static ssize_t core_id_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -265,9 +261,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev) mdev->err_verbosity = KOMEDA_DEV_PRINT_ERR_EVENTS; -#ifdef CONFIG_DEBUG_FS komeda_debugfs_init(mdev); -#endif return mdev; @@ -286,9 +280,7 @@ void komeda_dev_destroy(struct komeda_dev *mdev) sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group); -#ifdef CONFIG_DEBUG_FS debugfs_remove_recursive(mdev->debugfs_root); -#endif if (mdev->aclk) clk_prepare_enable(mdev->aclk); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c index 4b7d949615..00f5864a04 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c @@ -5,6 +5,7 @@ * */ #include +#include #include diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c index 29f4b52e3c..a763349dd8 100644 --- a/drivers/gpu/drm/armada/armada_debugfs.c +++ b/drivers/gpu/drm/armada/armada_debugfs.c @@ -5,6 +5,7 @@ */ #include +#include #include #include #include diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile index 5a53ce51fb..d794c076bc 100644 --- a/drivers/gpu/drm/ast/Makefile +++ b/drivers/gpu/drm/ast/Makefile @@ -3,6 +3,14 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -ast-y := ast_drv.o ast_i2c.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o ast_dp.o +ast-y := \ + ast_ddc.o \ + ast_dp501.o \ + ast_dp.o \ + ast_drv.o \ + ast_main.o \ + ast_mm.o \ + ast_mode.o \ + ast_post.o obj-$(CONFIG_DRM_AST) := ast.o diff --git a/drivers/gpu/drm/ast/ast_ddc.c b/drivers/gpu/drm/ast/ast_ddc.c new file mode 100644 index 0000000000..29cf5d157f --- /dev/null +++ b/drivers/gpu/drm/ast/ast_ddc.c @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: MIT +/* + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + */ + +#include +#include + +#include +#include + +#include "ast_ddc.h" +#include "ast_drv.h" + +struct ast_ddc { + struct ast_device *ast; + + struct i2c_algo_bit_data bit; + struct i2c_adapter adapter; +}; + +static void ast_ddc_algo_bit_data_setsda(void *data, int state) +{ + struct ast_ddc *ddc = data; + struct ast_device *ast = ddc->ast; + int i; + u8 ujcrb7, jtemp; + + for (i = 0; i < 0x10000; i++) { + ujcrb7 = ((state & 0x01) ? 0 : 1) << 2; + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0xf1, ujcrb7); + jtemp = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x04); + if (ujcrb7 == jtemp) + break; + } +} + +static void ast_ddc_algo_bit_data_setscl(void *data, int state) +{ + struct ast_ddc *ddc = data; + struct ast_device *ast = ddc->ast; + int i; + u8 ujcrb7, jtemp; + + for (i = 0; i < 0x10000; i++) { + ujcrb7 = ((state & 0x01) ? 0 : 1); + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0xf4, ujcrb7); + jtemp = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x01); + if (ujcrb7 == jtemp) + break; + } +} + +static int ast_ddc_algo_bit_data_pre_xfer(struct i2c_adapter *adapter) +{ + struct ast_ddc *ddc = i2c_get_adapdata(adapter); + struct ast_device *ast = ddc->ast; + + /* + * Protect access to I/O registers from concurrent modesetting + * by acquiring the I/O-register lock. + */ + mutex_lock(&ast->modeset_lock); + + return 0; +} + +static void ast_ddc_algo_bit_data_post_xfer(struct i2c_adapter *adapter) +{ + struct ast_ddc *ddc = i2c_get_adapdata(adapter); + struct ast_device *ast = ddc->ast; + + mutex_unlock(&ast->modeset_lock); +} + +static int ast_ddc_algo_bit_data_getsda(void *data) +{ + struct ast_ddc *ddc = data; + struct ast_device *ast = ddc->ast; + uint32_t val, val2, count, pass; + + count = 0; + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x20) >> 5) & 0x01; + do { + val2 = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x20) >> 5) & 0x01; + if (val == val2) { + pass++; + } else { + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x20) >> 5) & 0x01; + } + } while ((pass < 5) && (count++ < 0x10000)); + + return val & 1 ? 1 : 0; +} + +static int ast_ddc_algo_bit_data_getscl(void *data) +{ + struct ast_ddc *ddc = data; + struct ast_device *ast = ddc->ast; + uint32_t val, val2, count, pass; + + count = 0; + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x10) >> 4) & 0x01; + do { + val2 = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x10) >> 4) & 0x01; + if (val == val2) { + pass++; + } else { + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x10) >> 4) & 0x01; + } + } while ((pass < 5) && (count++ < 0x10000)); + + return val & 1 ? 1 : 0; +} + +static void ast_ddc_release(struct drm_device *dev, void *res) +{ + struct ast_ddc *ddc = res; + + i2c_del_adapter(&ddc->adapter); +} + +struct i2c_adapter *ast_ddc_create(struct ast_device *ast) +{ + struct drm_device *dev = &ast->base; + struct ast_ddc *ddc; + struct i2c_adapter *adapter; + struct i2c_algo_bit_data *bit; + int ret; + + ddc = drmm_kzalloc(dev, sizeof(*ddc), GFP_KERNEL); + if (!ddc) + return ERR_PTR(-ENOMEM); + ddc->ast = ast; + + bit = &ddc->bit; + bit->data = ddc; + bit->setsda = ast_ddc_algo_bit_data_setsda; + bit->setscl = ast_ddc_algo_bit_data_setscl; + bit->getsda = ast_ddc_algo_bit_data_getsda; + bit->getscl = ast_ddc_algo_bit_data_getscl; + bit->pre_xfer = ast_ddc_algo_bit_data_pre_xfer; + bit->post_xfer = ast_ddc_algo_bit_data_post_xfer; + bit->udelay = 20; + bit->timeout = usecs_to_jiffies(2200); + + adapter = &ddc->adapter; + adapter->owner = THIS_MODULE; + adapter->algo_data = bit; + adapter->dev.parent = dev->dev; + snprintf(adapter->name, sizeof(adapter->name), "AST DDC bus"); + i2c_set_adapdata(adapter, ddc); + + ret = i2c_bit_add_bus(adapter); + if (ret) { + drm_err(dev, "Failed to register bit i2c\n"); + return ERR_PTR(ret); + } + + ret = drmm_add_action_or_reset(dev, ast_ddc_release, ddc); + if (ret) + return ERR_PTR(ret); + + return &ddc->adapter; +} diff --git a/drivers/gpu/drm/ast/ast_ddc.h b/drivers/gpu/drm/ast/ast_ddc.h new file mode 100644 index 0000000000..85c93edc9a --- /dev/null +++ b/drivers/gpu/drm/ast/ast_ddc.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef __AST_DDC_H__ +#define __AST_DDC_H__ + +struct ast_device; +struct i2c_adapter; + +struct i2c_adapter *ast_ddc_create(struct ast_device *ast); + +#endif diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 90bcb1eb9c..f8c49ba68e 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -27,6 +27,7 @@ */ #include +#include #include #include diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 3be5ccf1f5..ba3d869739 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -28,8 +28,6 @@ #ifndef __AST_DRV_H__ #define __AST_DRV_H__ -#include -#include #include #include @@ -149,37 +147,9 @@ static inline struct ast_plane *to_ast_plane(struct drm_plane *plane) } /* - * Connector with i2c channel + * BMC */ -struct ast_i2c_chan { - struct i2c_adapter adapter; - struct drm_device *dev; - struct i2c_algo_bit_data bit; -}; - -struct ast_vga_connector { - struct drm_connector base; - struct ast_i2c_chan *i2c; -}; - -static inline struct ast_vga_connector * -to_ast_vga_connector(struct drm_connector *connector) -{ - return container_of(connector, struct ast_vga_connector, base); -} - -struct ast_sil164_connector { - struct drm_connector base; - struct ast_i2c_chan *i2c; -}; - -static inline struct ast_sil164_connector * -to_ast_sil164_connector(struct drm_connector *connector) -{ - return container_of(connector, struct ast_sil164_connector, base); -} - struct ast_bmc_connector { struct drm_connector base; struct drm_connector *physical_connector; @@ -222,11 +192,11 @@ struct ast_device { struct { struct { struct drm_encoder encoder; - struct ast_vga_connector vga_connector; + struct drm_connector connector; } vga; struct { struct drm_encoder encoder; - struct ast_sil164_connector sil164_connector; + struct drm_connector connector; } sil164; struct { struct drm_encoder encoder; @@ -498,9 +468,6 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata); u8 ast_get_dp501_max_clk(struct drm_device *dev); void ast_init_3rdtx(struct drm_device *dev); -/* ast_i2c.c */ -struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev); - /* aspeed DP */ bool ast_astdp_is_connected(struct ast_device *ast); int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata); diff --git a/drivers/gpu/drm/ast/ast_i2c.c b/drivers/gpu/drm/ast/ast_i2c.c deleted file mode 100644 index e5d3f7121d..0000000000 --- a/drivers/gpu/drm/ast/ast_i2c.c +++ /dev/null @@ -1,151 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - */ - -#include -#include - -#include "ast_drv.h" - -static void ast_i2c_setsda(void *i2c_priv, int data) -{ - struct ast_i2c_chan *i2c = i2c_priv; - struct ast_device *ast = to_ast_device(i2c->dev); - int i; - u8 ujcrb7, jtemp; - - for (i = 0; i < 0x10000; i++) { - ujcrb7 = ((data & 0x01) ? 0 : 1) << 2; - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0xf1, ujcrb7); - jtemp = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x04); - if (ujcrb7 == jtemp) - break; - } -} - -static void ast_i2c_setscl(void *i2c_priv, int clock) -{ - struct ast_i2c_chan *i2c = i2c_priv; - struct ast_device *ast = to_ast_device(i2c->dev); - int i; - u8 ujcrb7, jtemp; - - for (i = 0; i < 0x10000; i++) { - ujcrb7 = ((clock & 0x01) ? 0 : 1); - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0xf4, ujcrb7); - jtemp = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x01); - if (ujcrb7 == jtemp) - break; - } -} - -static int ast_i2c_getsda(void *i2c_priv) -{ - struct ast_i2c_chan *i2c = i2c_priv; - struct ast_device *ast = to_ast_device(i2c->dev); - uint32_t val, val2, count, pass; - - count = 0; - pass = 0; - val = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x20) >> 5) & 0x01; - do { - val2 = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x20) >> 5) & 0x01; - if (val == val2) { - pass++; - } else { - pass = 0; - val = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x20) >> 5) & 0x01; - } - } while ((pass < 5) && (count++ < 0x10000)); - - return val & 1 ? 1 : 0; -} - -static int ast_i2c_getscl(void *i2c_priv) -{ - struct ast_i2c_chan *i2c = i2c_priv; - struct ast_device *ast = to_ast_device(i2c->dev); - uint32_t val, val2, count, pass; - - count = 0; - pass = 0; - val = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x10) >> 4) & 0x01; - do { - val2 = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x10) >> 4) & 0x01; - if (val == val2) { - pass++; - } else { - pass = 0; - val = (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x10) >> 4) & 0x01; - } - } while ((pass < 5) && (count++ < 0x10000)); - - return val & 1 ? 1 : 0; -} - -static void ast_i2c_release(struct drm_device *dev, void *res) -{ - struct ast_i2c_chan *i2c = res; - - i2c_del_adapter(&i2c->adapter); - kfree(i2c); -} - -struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev) -{ - struct ast_i2c_chan *i2c; - int ret; - - i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL); - if (!i2c) - return NULL; - - i2c->adapter.owner = THIS_MODULE; - i2c->adapter.dev.parent = dev->dev; - i2c->dev = dev; - i2c_set_adapdata(&i2c->adapter, i2c); - snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), - "AST i2c bit bus"); - i2c->adapter.algo_data = &i2c->bit; - - i2c->bit.udelay = 20; - i2c->bit.timeout = 2; - i2c->bit.data = i2c; - i2c->bit.setsda = ast_i2c_setsda; - i2c->bit.setscl = ast_i2c_setscl; - i2c->bit.getsda = ast_i2c_getsda; - i2c->bit.getscl = ast_i2c_getscl; - ret = i2c_bit_add_bus(&i2c->adapter); - if (ret) { - drm_err(dev, "Failed to register bit i2c\n"); - goto out_kfree; - } - - ret = drmm_add_action_or_reset(dev, ast_i2c_release, i2c); - if (ret) - return NULL; - return i2c; - -out_kfree: - kfree(i2c); - return NULL; -} diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 2f3ad5f949..0637abb703 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -26,6 +26,7 @@ * Authors: Dave Airlie */ +#include #include #include diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index a718646a66..6695af7076 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -43,9 +43,11 @@ #include #include #include +#include #include #include +#include "ast_ddc.h" #include "ast_drv.h" #include "ast_tables.h" @@ -700,12 +702,29 @@ static void ast_primary_plane_helper_atomic_disable(struct drm_plane *plane, ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x1, 0xdf, 0x20); } +static int ast_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane, + struct drm_scanout_buffer *sb) +{ + struct ast_plane *ast_plane = to_ast_plane(plane); + + if (plane->state && plane->state->fb && ast_plane->vaddr) { + sb->format = plane->state->fb->format; + sb->width = plane->state->fb->width; + sb->height = plane->state->fb->height; + sb->pitch[0] = plane->state->fb->pitches[0]; + iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane->vaddr); + return 0; + } + return -ENODEV; +} + static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = { DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, .atomic_check = ast_primary_plane_helper_atomic_check, .atomic_update = ast_primary_plane_helper_atomic_update, .atomic_enable = ast_primary_plane_helper_atomic_enable, .atomic_disable = ast_primary_plane_helper_atomic_disable, + .get_scanout_buffer = ast_primary_plane_helper_get_scanout_buffer, }; static const struct drm_plane_funcs ast_primary_plane_funcs = { @@ -1343,43 +1362,9 @@ static int ast_crtc_init(struct drm_device *dev) * VGA Connector */ -static int ast_vga_connector_helper_get_modes(struct drm_connector *connector) -{ - struct ast_vga_connector *ast_vga_connector = to_ast_vga_connector(connector); - struct drm_device *dev = connector->dev; - struct ast_device *ast = to_ast_device(dev); - struct edid *edid; - int count; - - if (!ast_vga_connector->i2c) - goto err_drm_connector_update_edid_property; - - /* - * Protect access to I/O registers from concurrent modesetting - * by acquiring the I/O-register lock. - */ - mutex_lock(&ast->modeset_lock); - - edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter); - if (!edid) - goto err_mutex_unlock; - - mutex_unlock(&ast->modeset_lock); - - count = drm_add_edid_modes(connector, edid); - kfree(edid); - - return count; - -err_mutex_unlock: - mutex_unlock(&ast->modeset_lock); -err_drm_connector_update_edid_property: - drm_connector_update_edid_property(connector, NULL); - return 0; -} - static const struct drm_connector_helper_funcs ast_vga_connector_helper_funcs = { - .get_modes = ast_vga_connector_helper_get_modes, + .get_modes = drm_connector_helper_get_modes, + .detect_ctx = drm_connector_helper_detect_from_ddc, }; static const struct drm_connector_funcs ast_vga_connector_funcs = { @@ -1390,23 +1375,21 @@ static const struct drm_connector_funcs ast_vga_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static int ast_vga_connector_init(struct drm_device *dev, - struct ast_vga_connector *ast_vga_connector) +static int ast_vga_connector_init(struct drm_device *dev, struct drm_connector *connector) { - struct drm_connector *connector = &ast_vga_connector->base; + struct ast_device *ast = to_ast_device(dev); + struct i2c_adapter *ddc; int ret; - ast_vga_connector->i2c = ast_i2c_create(dev); - if (!ast_vga_connector->i2c) - drm_err(dev, "failed to add ddc bus for connector\n"); + ddc = ast_ddc_create(ast); + if (IS_ERR(ddc)) { + ret = PTR_ERR(ddc); + drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret); + return ret; + } - if (ast_vga_connector->i2c) - ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs, - DRM_MODE_CONNECTOR_VGA, - &ast_vga_connector->i2c->adapter); - else - ret = drm_connector_init(dev, connector, &ast_vga_connector_funcs, - DRM_MODE_CONNECTOR_VGA); + ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs, + DRM_MODE_CONNECTOR_VGA, ddc); if (ret) return ret; @@ -1415,7 +1398,7 @@ static int ast_vga_connector_init(struct drm_device *dev, connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; return 0; } @@ -1425,8 +1408,7 @@ static int ast_vga_output_init(struct ast_device *ast) struct drm_device *dev = &ast->base; struct drm_crtc *crtc = &ast->crtc; struct drm_encoder *encoder = &ast->output.vga.encoder; - struct ast_vga_connector *ast_vga_connector = &ast->output.vga.vga_connector; - struct drm_connector *connector = &ast_vga_connector->base; + struct drm_connector *connector = &ast->output.vga.connector; int ret; ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC); @@ -1434,7 +1416,7 @@ static int ast_vga_output_init(struct ast_device *ast) return ret; encoder->possible_crtcs = drm_crtc_mask(crtc); - ret = ast_vga_connector_init(dev, ast_vga_connector); + ret = ast_vga_connector_init(dev, connector); if (ret) return ret; @@ -1449,43 +1431,9 @@ static int ast_vga_output_init(struct ast_device *ast) * SIL164 Connector */ -static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector) -{ - struct ast_sil164_connector *ast_sil164_connector = to_ast_sil164_connector(connector); - struct drm_device *dev = connector->dev; - struct ast_device *ast = to_ast_device(dev); - struct edid *edid; - int count; - - if (!ast_sil164_connector->i2c) - goto err_drm_connector_update_edid_property; - - /* - * Protect access to I/O registers from concurrent modesetting - * by acquiring the I/O-register lock. - */ - mutex_lock(&ast->modeset_lock); - - edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter); - if (!edid) - goto err_mutex_unlock; - - mutex_unlock(&ast->modeset_lock); - - count = drm_add_edid_modes(connector, edid); - kfree(edid); - - return count; - -err_mutex_unlock: - mutex_unlock(&ast->modeset_lock); -err_drm_connector_update_edid_property: - drm_connector_update_edid_property(connector, NULL); - return 0; -} - static const struct drm_connector_helper_funcs ast_sil164_connector_helper_funcs = { - .get_modes = ast_sil164_connector_helper_get_modes, + .get_modes = drm_connector_helper_get_modes, + .detect_ctx = drm_connector_helper_detect_from_ddc, }; static const struct drm_connector_funcs ast_sil164_connector_funcs = { @@ -1496,23 +1444,21 @@ static const struct drm_connector_funcs ast_sil164_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static int ast_sil164_connector_init(struct drm_device *dev, - struct ast_sil164_connector *ast_sil164_connector) +static int ast_sil164_connector_init(struct drm_device *dev, struct drm_connector *connector) { - struct drm_connector *connector = &ast_sil164_connector->base; + struct ast_device *ast = to_ast_device(dev); + struct i2c_adapter *ddc; int ret; - ast_sil164_connector->i2c = ast_i2c_create(dev); - if (!ast_sil164_connector->i2c) - drm_err(dev, "failed to add ddc bus for connector\n"); + ddc = ast_ddc_create(ast); + if (IS_ERR(ddc)) { + ret = PTR_ERR(ddc); + drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret); + return ret; + } - if (ast_sil164_connector->i2c) - ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs, - DRM_MODE_CONNECTOR_DVII, - &ast_sil164_connector->i2c->adapter); - else - ret = drm_connector_init(dev, connector, &ast_sil164_connector_funcs, - DRM_MODE_CONNECTOR_DVII); + ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs, + DRM_MODE_CONNECTOR_DVII, ddc); if (ret) return ret; @@ -1521,7 +1467,7 @@ static int ast_sil164_connector_init(struct drm_device *dev, connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; return 0; } @@ -1531,8 +1477,7 @@ static int ast_sil164_output_init(struct ast_device *ast) struct drm_device *dev = &ast->base; struct drm_crtc *crtc = &ast->crtc; struct drm_encoder *encoder = &ast->output.sil164.encoder; - struct ast_sil164_connector *ast_sil164_connector = &ast->output.sil164.sil164_connector; - struct drm_connector *connector = &ast_sil164_connector->base; + struct drm_connector *connector = &ast->output.sil164.connector; int ret; ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); @@ -1540,7 +1485,7 @@ static int ast_sil164_output_init(struct ast_device *ast) return ret; encoder->possible_crtcs = drm_crtc_mask(crtc); - ret = ast_sil164_connector_init(dev, ast_sil164_connector); + ret = ast_sil164_connector_init(dev, connector); if (ret) return ret; @@ -1952,13 +1897,13 @@ int ast_mode_config_init(struct ast_device *ast) ret = ast_vga_output_init(ast); if (ret) return ret; - physical_connector = &ast->output.vga.vga_connector.base; + physical_connector = &ast->output.vga.connector; } if (ast->tx_chip_types & AST_TX_SIL164_BIT) { ret = ast_sil164_output_init(ast); if (ret) return ret; - physical_connector = &ast->output.sil164.sil164_connector.base; + physical_connector = &ast->output.sil164.connector; } if (ast->tx_chip_types & AST_TX_DP501_BIT) { ret = ast_dp501_output_init(ast); @@ -1978,7 +1923,9 @@ int ast_mode_config_init(struct ast_device *ast) drm_mode_config_reset(dev); - drm_kms_helper_poll_init(dev); + ret = drmm_kms_helper_poll_init(dev); + if (ret) + return ret; return 0; } diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index efd996f6c1..c621be1a99 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -96,9 +96,8 @@ config DRM_ITE_IT6505 select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HDCP_HELPER select DRM_DISPLAY_HELPER - select DRM_DP_AUX_BUS + select DRM_DISPLAY_DP_AUX_BUS select DRM_KMS_HELPER - select DRM_DP_HELPER select EXTCON select CRYPTO select CRYPTO_HASH @@ -190,6 +189,13 @@ config DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW to DP++. This is used with the i.MX6 imx-ldb driver. You are likely to say N here. +config DRM_MICROCHIP_LVDS_SERIALIZER + tristate "Microchip LVDS serializer support" + depends on OF + depends on DRM_ATMEL_HLCDC + help + Support for Microchip's LVDS serializer. + config DRM_NWL_MIPI_DSI tristate "Northwest Logic MIPI DSI Host controller" depends on DRM @@ -229,7 +235,7 @@ config DRM_PARADE_PS8640 depends on OF select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER - select DRM_DP_AUX_BUS + select DRM_DISPLAY_DP_AUX_BUS select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL @@ -389,7 +395,7 @@ config DRM_TI_SN65DSI86 select DRM_PANEL select DRM_MIPI_DSI select AUXILIARY_BUS - select DRM_DP_AUX_BUS + select DRM_DISPLAY_DP_AUX_BUS help Texas Instruments SN65DSI86 DSI to eDP Bridge driver diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 017b583273..7df87b582d 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o obj-$(CONFIG_DRM_LONTIUM_LT9611UXC) += lontium-lt9611uxc.o obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o +obj-$(CONFIG_DRM_MICROCHIP_LVDS_SERIALIZER) += microchip-lvds.o obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o obj-$(CONFIG_DRM_PARADE_PS8640) += parade-ps8640.o diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index 39c9ece373..ec0b7f3d88 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -356,6 +356,7 @@ struct adv7511 { enum drm_connector_status status; bool powered; + struct drm_bridge *next_bridge; struct drm_display_mode curr_mode; unsigned int f_tmds; @@ -400,7 +401,7 @@ struct adv7511 { #ifdef CONFIG_DRM_I2C_ADV7511_CEC int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511); -void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1); +int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1); #else static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) { diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c index 44451a9658..2e9c88a2b5 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c @@ -119,7 +119,7 @@ static void adv7511_cec_rx(struct adv7511 *adv7511, int rx_buf) cec_received_msg(adv7511->cec_adap, &msg); } -void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1) +int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1) { unsigned int offset = adv7511->info->reg_cec_offset; const u32 irq_tx_mask = ADV7511_INT1_CEC_TX_READY | @@ -131,16 +131,19 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1) unsigned int rx_status; int rx_order[3] = { -1, -1, -1 }; int i; + int irq_status = IRQ_NONE; - if (irq1 & irq_tx_mask) + if (irq1 & irq_tx_mask) { adv_cec_tx_raw_status(adv7511, irq1); + irq_status = IRQ_HANDLED; + } if (!(irq1 & irq_rx_mask)) - return; + return irq_status; if (regmap_read(adv7511->regmap_cec, ADV7511_REG_CEC_RX_STATUS + offset, &rx_status)) - return; + return irq_status; /* * ADV7511_REG_CEC_RX_STATUS[5:0] contains the reception order of RX @@ -172,6 +175,8 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1) adv7511_cec_rx(adv7511, rx_buf); } + + return IRQ_HANDLED; } static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable) diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index b5518ff971..c8d2c4a157 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -468,6 +469,8 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd) { unsigned int irq0, irq1; int ret; + int cec_status = IRQ_NONE; + int irq_status = IRQ_NONE; ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0); if (ret < 0) @@ -480,21 +483,28 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd) regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0); regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1); - if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder) + if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder) { schedule_work(&adv7511->hpd_work); + irq_status = IRQ_HANDLED; + } if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) { adv7511->edid_read = true; if (adv7511->i2c_main->irq) wake_up_all(&adv7511->wq); + irq_status = IRQ_HANDLED; } #ifdef CONFIG_DRM_I2C_ADV7511_CEC - adv7511_cec_irq_process(adv7511, irq1); + cec_status = adv7511_cec_irq_process(adv7511, irq1); #endif - return 0; + /* If there is no IRQ to handle, exit indicating no IRQ data */ + if (irq_status == IRQ_HANDLED || cec_status == IRQ_HANDLED) + return IRQ_HANDLED; + + return IRQ_NONE; } static irqreturn_t adv7511_irq_handler(int irq, void *devid) @@ -503,7 +513,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid) int ret; ret = adv7511_irq_process(adv7511, true); - return ret < 0 ? IRQ_NONE : IRQ_HANDLED; + return ret < 0 ? IRQ_NONE : ret; } /* ----------------------------------------------------------------------------- @@ -946,6 +956,13 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge, struct adv7511 *adv = bridge_to_adv7511(bridge); int ret = 0; + if (adv->next_bridge) { + ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge, + flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + return ret; + } + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { ret = adv7511_connector_init(adv); if (ret < 0) @@ -1216,6 +1233,11 @@ static int adv7511_probe(struct i2c_client *i2c) memset(&link_config, 0, sizeof(link_config)); + ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1, NULL, + &adv7511->next_bridge); + if (ret && ret != -ENODEV) + return ret; + if (adv7511->info->link_config) ret = adv7511_parse_dt(dev->of_node, &link_config); else @@ -1318,7 +1340,8 @@ static int adv7511_probe(struct i2c_client *i2c) ret = devm_request_threaded_irq(dev, i2c->irq, NULL, adv7511_irq_handler, - IRQF_ONESHOT, dev_name(dev), + IRQF_ONESHOT | IRQF_SHARED, + dev_name(dev), adv7511); if (ret) goto err_unregister_audio; diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig index 173dada218..4846b2e9be 100644 --- a/drivers/gpu/drm/bridge/analogix/Kconfig +++ b/drivers/gpu/drm/bridge/analogix/Kconfig @@ -37,7 +37,7 @@ config DRM_ANALOGIX_ANX7625 select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HDCP_HELPER select DRM_DISPLAY_HELPER - select DRM_DP_AUX_BUS + select DRM_DISPLAY_DP_AUX_BUS select DRM_MIPI_DSI help ANX7625 is an ultra-low power 4K mobile HD transmitter diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c index ff3284b6b1..9eecac457d 100644 --- a/drivers/gpu/drm/bridge/chipone-icn6211.c +++ b/drivers/gpu/drm/bridge/chipone-icn6211.c @@ -781,7 +781,6 @@ static struct mipi_dsi_driver chipone_dsi_driver = { .remove = chipone_dsi_remove, .driver = { .name = "chipone-icn6211", - .owner = THIS_MODULE, .of_match_table = chipone_of_match, }, }; diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c index f2a09c879e..073e64dc20 100644 --- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c +++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c @@ -173,15 +173,13 @@ static int imx8mp_hdmi_pvi_probe(struct platform_device *pdev) return 0; } -static int imx8mp_hdmi_pvi_remove(struct platform_device *pdev) +static void imx8mp_hdmi_pvi_remove(struct platform_device *pdev) { struct imx8mp_hdmi_pvi *pvi = platform_get_drvdata(pdev); drm_bridge_remove(&pvi->bridge); pm_runtime_disable(&pdev->dev); - - return 0; } static const struct of_device_id imx8mp_hdmi_pvi_match[] = { @@ -195,7 +193,7 @@ MODULE_DEVICE_TABLE(of, imx8mp_hdmi_pvi_match); static struct platform_driver imx8mp_hdmi_pvi_driver = { .probe = imx8mp_hdmi_pvi_probe, - .remove = imx8mp_hdmi_pvi_remove, + .remove_new = imx8mp_hdmi_pvi_remove, .driver = { .name = "imx-hdmi-pvi", .of_match_table = imx8mp_hdmi_pvi_match, diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c index 89fc432ac6..13bc570c54 100644 --- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c +++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c @@ -104,13 +104,11 @@ static int imx8mp_dw_hdmi_probe(struct platform_device *pdev) return 0; } -static int imx8mp_dw_hdmi_remove(struct platform_device *pdev) +static void imx8mp_dw_hdmi_remove(struct platform_device *pdev) { struct imx8mp_hdmi *hdmi = platform_get_drvdata(pdev); dw_hdmi_remove(hdmi->dw_hdmi); - - return 0; } static int __maybe_unused imx8mp_dw_hdmi_pm_suspend(struct device *dev) @@ -140,7 +138,7 @@ MODULE_DEVICE_TABLE(of, imx8mp_dw_hdmi_of_table); static struct platform_driver imx8mp_dw_hdmi_platform_driver = { .probe = imx8mp_dw_hdmi_probe, - .remove = imx8mp_dw_hdmi_remove, + .remove_new = imx8mp_dw_hdmi_remove, .driver = { .name = "imx8mp-dw-hdmi-tx", .of_match_table = imx8mp_dw_hdmi_of_table, diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index 27334173e9..cf59347d3d 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -3,6 +3,7 @@ * Copyright (c) 2020, The Linux Foundation. All rights reserved. */ #include +#include #include #include #include @@ -1306,9 +1307,15 @@ static void it6505_video_reset(struct it6505 *it6505) it6505_link_reset_step_train(it6505); it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, EN_VID_MUTE); it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_VID_CTRL_PKT, 0x00); - it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET); + + it6505_set_bits(it6505, REG_VID_BUS_CTRL1, TX_FIFO_RESET, TX_FIFO_RESET); + it6505_set_bits(it6505, REG_VID_BUS_CTRL1, TX_FIFO_RESET, 0x00); + it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, RST_501_FIFO); it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, 0x00); + + it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET); + usleep_range(1000, 2000); it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, 0x00); } @@ -2244,12 +2251,11 @@ static void it6505_link_training_work(struct work_struct *work) if (ret) { it6505->auto_train_retry = AUTO_TRAIN_RETRY; it6505_link_train_ok(it6505); - return; } else { it6505->auto_train_retry--; + it6505_dump(it6505); } - it6505_dump(it6505); } static void it6505_plugged_status_to_codec(struct it6505 *it6505) @@ -2470,31 +2476,53 @@ static void it6505_irq_link_train_fail(struct it6505 *it6505) schedule_work(&it6505->link_works); } -static void it6505_irq_video_fifo_error(struct it6505 *it6505) +static bool it6505_test_bit(unsigned int bit, const unsigned int *addr) { - struct device *dev = it6505->dev; - - DRM_DEV_DEBUG_DRIVER(dev, "video fifo overflow interrupt"); - it6505->auto_train_retry = AUTO_TRAIN_RETRY; - flush_work(&it6505->link_works); - it6505_stop_hdcp(it6505); - it6505_video_reset(it6505); + return 1 & (addr[bit / BITS_PER_BYTE] >> (bit % BITS_PER_BYTE)); } -static void it6505_irq_io_latch_fifo_overflow(struct it6505 *it6505) +static void it6505_irq_video_handler(struct it6505 *it6505, const int *int_status) { struct device *dev = it6505->dev; + int reg_0d, reg_int03; - DRM_DEV_DEBUG_DRIVER(dev, "IO latch fifo overflow interrupt"); - it6505->auto_train_retry = AUTO_TRAIN_RETRY; - flush_work(&it6505->link_works); - it6505_stop_hdcp(it6505); - it6505_video_reset(it6505); -} + /* + * When video SCDT change with video not stable, + * Or video FIFO error, need video reset + */ -static bool it6505_test_bit(unsigned int bit, const unsigned int *addr) -{ - return 1 & (addr[bit / BITS_PER_BYTE] >> (bit % BITS_PER_BYTE)); + if ((!it6505_get_video_status(it6505) && + (it6505_test_bit(INT_SCDT_CHANGE, (unsigned int *)int_status))) || + (it6505_test_bit(BIT_INT_IO_FIFO_OVERFLOW, + (unsigned int *)int_status)) || + (it6505_test_bit(BIT_INT_VID_FIFO_ERROR, + (unsigned int *)int_status))) { + it6505->auto_train_retry = AUTO_TRAIN_RETRY; + flush_work(&it6505->link_works); + it6505_stop_hdcp(it6505); + it6505_video_reset(it6505); + + usleep_range(10000, 11000); + + /* + * Clear FIFO error IRQ to prevent fifo error -> reset loop + * HW will trigger SCDT change IRQ again when video stable + */ + + reg_int03 = it6505_read(it6505, INT_STATUS_03); + reg_0d = it6505_read(it6505, REG_SYSTEM_STS); + + reg_int03 &= (BIT(INT_VID_FIFO_ERROR) | BIT(INT_IO_LATCH_FIFO_OVERFLOW)); + it6505_write(it6505, INT_STATUS_03, reg_int03); + + DRM_DEV_DEBUG_DRIVER(dev, "reg08 = 0x%02x", reg_int03); + DRM_DEV_DEBUG_DRIVER(dev, "reg0D = 0x%02x", reg_0d); + + return; + } + + if (it6505_test_bit(INT_SCDT_CHANGE, (unsigned int *)int_status)) + it6505_irq_scdt(it6505); } static irqreturn_t it6505_int_threaded_handler(int unused, void *data) @@ -2507,15 +2535,12 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data) } irq_vec[] = { { BIT_INT_HPD, it6505_irq_hpd }, { BIT_INT_HPD_IRQ, it6505_irq_hpd_irq }, - { BIT_INT_SCDT, it6505_irq_scdt }, { BIT_INT_HDCP_FAIL, it6505_irq_hdcp_fail }, { BIT_INT_HDCP_DONE, it6505_irq_hdcp_done }, { BIT_INT_AUX_CMD_FAIL, it6505_irq_aux_cmd_fail }, { BIT_INT_HDCP_KSV_CHECK, it6505_irq_hdcp_ksv_check }, { BIT_INT_AUDIO_FIFO_ERROR, it6505_irq_audio_fifo_error }, { BIT_INT_LINK_TRAIN_FAIL, it6505_irq_link_train_fail }, - { BIT_INT_VID_FIFO_ERROR, it6505_irq_video_fifo_error }, - { BIT_INT_IO_FIFO_OVERFLOW, it6505_irq_io_latch_fifo_overflow }, }; int int_status[3], i; @@ -2545,6 +2570,7 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data) if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status)) irq_vec[i].handler(it6505); } + it6505_irq_video_handler(it6505, (unsigned int *)int_status); } pm_runtime_put_sync(dev); diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c index 1c3433b5e3..925e42f46c 100644 --- a/drivers/gpu/drm/bridge/ite-it66121.c +++ b/drivers/gpu/drm/bridge/ite-it66121.c @@ -1540,12 +1540,6 @@ static int it66121_probe(struct i2c_client *client) return -EINVAL; } - if (!of_device_is_available(ep)) { - of_node_put(ep); - dev_err(ctx->dev, "The remote device is disabled\n"); - return -ENODEV; - } - ctx->next_bridge = of_drm_find_bridge(ep); of_node_put(ep); if (!ctx->next_bridge) { @@ -1586,13 +1580,18 @@ static int it66121_probe(struct i2c_client *client) ctx->bridge.funcs = &it66121_bridge_funcs; ctx->bridge.of_node = dev->of_node; ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA; - ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD; - - ret = devm_request_threaded_irq(dev, client->irq, NULL, it66121_irq_threaded_handler, - IRQF_ONESHOT, dev_name(dev), ctx); - if (ret < 0) { - dev_err(dev, "Failed to request irq %d:%d\n", client->irq, ret); - return ret; + ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID; + if (client->irq > 0) { + ctx->bridge.ops |= DRM_BRIDGE_OP_HPD; + + ret = devm_request_threaded_irq(dev, client->irq, NULL, + it66121_irq_threaded_handler, + IRQF_ONESHOT, dev_name(dev), + ctx); + if (ret < 0) { + dev_err(dev, "Failed to request irq %d:%d\n", client->irq, ret); + return ret; + } } it66121_audio_codec_init(ctx, dev); diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c new file mode 100644 index 0000000000..b8313dad60 --- /dev/null +++ b/drivers/gpu/drm/bridge/microchip-lvds.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries + * + * Author: Manikandan Muralidharan + * Author: Dharma Balasubiramani + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define LVDS_POLL_TIMEOUT_MS 1000 + +/* LVDSC register offsets */ +#define LVDSC_CR 0x00 +#define LVDSC_CFGR 0x04 +#define LVDSC_SR 0x0C +#define LVDSC_WPMR 0xE4 + +/* Bitfields in LVDSC_CR (Control Register) */ +#define LVDSC_CR_SER_EN BIT(0) + +/* Bitfields in LVDSC_CFGR (Configuration Register) */ +#define LVDSC_CFGR_PIXSIZE_24BITS 0 +#define LVDSC_CFGR_DEN_POL_HIGH 0 +#define LVDSC_CFGR_DC_UNBALANCED 0 +#define LVDSC_CFGR_MAPPING_JEIDA BIT(6) + +/*Bitfields in LVDSC_SR */ +#define LVDSC_SR_CS BIT(0) + +/* Bitfields in LVDSC_WPMR (Write Protection Mode Register) */ +#define LVDSC_WPMR_WPKEY_MASK GENMASK(31, 8) +#define LVDSC_WPMR_WPKEY_PSSWD 0x4C5644 + +struct mchp_lvds { + struct device *dev; + void __iomem *regs; + struct clk *pclk; + struct drm_panel *panel; + struct drm_bridge bridge; + struct drm_bridge *panel_bridge; +}; + +static inline struct mchp_lvds *bridge_to_lvds(struct drm_bridge *bridge) +{ + return container_of(bridge, struct mchp_lvds, bridge); +} + +static inline u32 lvds_readl(struct mchp_lvds *lvds, u32 offset) +{ + return readl_relaxed(lvds->regs + offset); +} + +static inline void lvds_writel(struct mchp_lvds *lvds, u32 offset, u32 val) +{ + writel_relaxed(val, lvds->regs + offset); +} + +static void lvds_serialiser_on(struct mchp_lvds *lvds) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(LVDS_POLL_TIMEOUT_MS); + + /* The LVDSC registers can only be written if WPEN is cleared */ + lvds_writel(lvds, LVDSC_WPMR, (LVDSC_WPMR_WPKEY_PSSWD & + LVDSC_WPMR_WPKEY_MASK)); + + /* Wait for the status of configuration registers to be changed */ + while (lvds_readl(lvds, LVDSC_SR) & LVDSC_SR_CS) { + if (time_after(jiffies, timeout)) { + dev_err(lvds->dev, "%s: timeout error\n", __func__); + return; + } + usleep_range(1000, 2000); + } + + /* Configure the LVDSC */ + lvds_writel(lvds, LVDSC_CFGR, (LVDSC_CFGR_MAPPING_JEIDA | + LVDSC_CFGR_DC_UNBALANCED | + LVDSC_CFGR_DEN_POL_HIGH | + LVDSC_CFGR_PIXSIZE_24BITS)); + + /* Enable the LVDS serializer */ + lvds_writel(lvds, LVDSC_CR, LVDSC_CR_SER_EN); +} + +static int mchp_lvds_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct mchp_lvds *lvds = bridge_to_lvds(bridge); + + return drm_bridge_attach(bridge->encoder, lvds->panel_bridge, + bridge, flags); +} + +static void mchp_lvds_enable(struct drm_bridge *bridge) +{ + struct mchp_lvds *lvds = bridge_to_lvds(bridge); + int ret; + + ret = clk_prepare_enable(lvds->pclk); + if (ret < 0) { + dev_err(lvds->dev, "failed to enable lvds pclk %d\n", ret); + return; + } + + ret = pm_runtime_get_sync(lvds->dev); + if (ret < 0) { + dev_err(lvds->dev, "failed to get pm runtime: %d\n", ret); + return; + } + + lvds_serialiser_on(lvds); +} + +static void mchp_lvds_disable(struct drm_bridge *bridge) +{ + struct mchp_lvds *lvds = bridge_to_lvds(bridge); + + pm_runtime_put(lvds->dev); + clk_disable_unprepare(lvds->pclk); +} + +static const struct drm_bridge_funcs mchp_lvds_bridge_funcs = { + .attach = mchp_lvds_attach, + .enable = mchp_lvds_enable, + .disable = mchp_lvds_disable, +}; + +static int mchp_lvds_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mchp_lvds *lvds; + struct device_node *port; + int ret; + + if (!dev->of_node) + return -ENODEV; + + lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL); + if (!lvds) + return -ENOMEM; + + lvds->dev = dev; + + lvds->regs = devm_ioremap_resource(lvds->dev, + platform_get_resource(pdev, IORESOURCE_MEM, 0)); + if (IS_ERR(lvds->regs)) + return PTR_ERR(lvds->regs); + + lvds->pclk = devm_clk_get(lvds->dev, "pclk"); + if (IS_ERR(lvds->pclk)) + return dev_err_probe(lvds->dev, PTR_ERR(lvds->pclk), + "could not get pclk_lvds\n"); + + port = of_graph_get_remote_node(dev->of_node, 1, 0); + if (!port) { + dev_err(dev, + "can't find port point, please init lvds panel port!\n"); + return -ENODEV; + } + + lvds->panel = of_drm_find_panel(port); + of_node_put(port); + + if (IS_ERR(lvds->panel)) + return -EPROBE_DEFER; + + lvds->panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); + + if (IS_ERR(lvds->panel_bridge)) + return PTR_ERR(lvds->panel_bridge); + + lvds->bridge.of_node = dev->of_node; + lvds->bridge.type = DRM_MODE_CONNECTOR_LVDS; + lvds->bridge.funcs = &mchp_lvds_bridge_funcs; + + dev_set_drvdata(dev, lvds); + ret = devm_pm_runtime_enable(dev); + if (ret < 0) { + dev_err(lvds->dev, "failed to enable pm runtime: %d\n", ret); + return ret; + } + + drm_bridge_add(&lvds->bridge); + + return 0; +} + +static const struct of_device_id mchp_lvds_dt_ids[] = { + { + .compatible = "microchip,sam9x75-lvds", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, mchp_lvds_dt_ids); + +static struct platform_driver mchp_lvds_driver = { + .probe = mchp_lvds_probe, + .driver = { + .name = "microchip-lvds", + .of_match_table = mchp_lvds_dt_ids, + }, +}; +module_platform_driver(mchp_lvds_driver); + +MODULE_AUTHOR("Manikandan Muralidharan "); +MODULE_AUTHOR("Dharma Balasubiramani "); +MODULE_DESCRIPTION("Low Voltage Differential Signaling Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index 3d6e8f096a..fe5fb08c9f 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -4,6 +4,8 @@ * Copyright (C) 2017 Broadcom */ +#include + #include #include #include diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c index 95fedc68b0..8476650c47 100644 --- a/drivers/gpu/drm/bridge/samsung-dsim.c +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -574,8 +574,8 @@ static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi, u16 _m, best_m; u8 _s, best_s; - p_min = DIV_ROUND_UP(fin, (12 * MHZ)); - p_max = fin / (6 * MHZ); + p_min = DIV_ROUND_UP(fin, (driver_data->pll_fin_max * MHZ)); + p_max = fin / (driver_data->pll_fin_min * MHZ); for (_p = p_min; _p <= p_max; ++_p) { for (_s = 0; _s <= 5; ++_s) { diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 8f84e98249..2fbeda9025 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -1092,7 +1092,7 @@ static int sii902x_init(struct sii902x *sii902x) } sii902x->i2cmux->priv = sii902x; - ret = i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0); + ret = i2c_mux_add_adapter(sii902x->i2cmux, 0, 0); if (ret) goto err_unreg_audio; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index cceb5aab6c..9f2bc932c3 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -3291,40 +3291,17 @@ static void dw_hdmi_init_hw(struct dw_hdmi *hdmi) static int dw_hdmi_parse_dt(struct dw_hdmi *hdmi) { - struct device_node *endpoint; struct device_node *remote; if (!hdmi->plat_data->output_port) return 0; - endpoint = of_graph_get_endpoint_by_regs(hdmi->dev->of_node, - hdmi->plat_data->output_port, - -1); - if (!endpoint) { - /* - * On platforms whose bindings don't make the output port - * mandatory (such as Rockchip) the plat_data->output_port - * field isn't set, so it's safe to make this a fatal error. - */ - dev_err(hdmi->dev, "Missing endpoint in port@%u\n", - hdmi->plat_data->output_port); - return -ENODEV; - } - remote = of_graph_get_remote_port_parent(endpoint); - of_node_put(endpoint); - if (!remote) { - dev_err(hdmi->dev, "Endpoint in port@%u unconnected\n", - hdmi->plat_data->output_port); + remote = of_graph_get_remote_node(hdmi->dev->of_node, + hdmi->plat_data->output_port, + -1); + if (!remote) return -ENODEV; - } - - if (!of_device_is_available(remote)) { - dev_err(hdmi->dev, "port@%u remote device is disabled\n", - hdmi->plat_data->output_port); - of_node_put(remote); - return -ENODEV; - } hdmi->next_bridge = of_drm_find_bridge(remote); of_node_put(remote); diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c index deccb39950..3d3d135b43 100644 --- a/drivers/gpu/drm/bridge/tc358764.c +++ b/drivers/gpu/drm/bridge/tc358764.c @@ -401,7 +401,6 @@ static struct mipi_dsi_driver tc358764_driver = { .remove = tc358764_remove, .driver = { .name = "tc358764", - .owner = THIS_MODULE, .of_match_table = tc358764_of_match, }, }; diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c index c737670631..3b7cc3be2c 100644 --- a/drivers/gpu/drm/bridge/tc358775.c +++ b/drivers/gpu/drm/bridge/tc358775.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -107,6 +108,7 @@ #define RDPKTLN 0x0404 /* Command Read Packet Length */ #define VPCTRL 0x0450 /* Video Path Control */ +#define EVTMODE BIT(5) /* Video event mode enable, tc35876x only */ #define HTIM1 0x0454 /* Horizontal Timing Control 1 */ #define HTIM2 0x0458 /* Horizontal Timing Control 2 */ #define VTIM1 0x045C /* Vertical Timing Control 1 */ @@ -254,6 +256,11 @@ enum tc358775_ports { TC358775_LVDS_OUT1, }; +enum tc3587x5_type { + TC358765 = 0x65, + TC358775 = 0x75, +}; + struct tc_data { struct i2c_client *i2c; struct device *dev; @@ -271,6 +278,8 @@ struct tc_data { struct gpio_desc *stby_gpio; u8 lvds_link; /* single-link or dual-link */ u8 bpc; + + enum tc3587x5_type type; }; static inline struct tc_data *bridge_to_tc(struct drm_bridge *b) @@ -424,10 +433,16 @@ static void tc_bridge_enable(struct drm_bridge *bridge) d2l_write(tc->i2c, PPI_STARTPPI, PPI_START_FUNCTION); d2l_write(tc->i2c, DSI_STARTDSI, DSI_RX_START); + /* Video event mode vs pulse mode bit, does not exist for tc358775 */ + if (tc->type == TC358765) + val = EVTMODE; + else + val = 0; + if (tc->bpc == 8) - val = TC358775_VPCTRL_OPXLFMT(1); + val |= TC358775_VPCTRL_OPXLFMT(1); else /* bpc = 6; */ - val = TC358775_VPCTRL_MSF(1); + val |= TC358775_VPCTRL_MSF(1); dsiclk = mode->crtc_clock * 3 * tc->bpc / tc->num_dsi_lanes / 1000; clkdiv = dsiclk / (tc->lvds_link == DUAL_LINK ? DIVIDE_BY_6 : DIVIDE_BY_3); @@ -525,27 +540,24 @@ tc_mode_valid(struct drm_bridge *bridge, static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc) { struct device_node *endpoint; - struct device_node *parent; struct device_node *remote; int dsi_lanes = -1; - /* - * To get the data-lanes of dsi, we need to access the dsi0_out of port1 - * of dsi0 endpoint from bridge port0 of d2l_in - */ endpoint = of_graph_get_endpoint_by_regs(tc->dev->of_node, TC358775_DSI_IN, -1); - if (endpoint) { - /* dsi0_out node */ - parent = of_graph_get_remote_port_parent(endpoint); - of_node_put(endpoint); - if (parent) { - /* dsi0 port 1 */ - dsi_lanes = drm_of_get_data_lanes_count_ep(parent, 1, -1, 1, 4); - of_node_put(parent); - } + dsi_lanes = drm_of_get_data_lanes_count(endpoint, 1, 4); + + /* Quirk old dtb: Use data lanes from the DSI host side instead of bridge */ + if (dsi_lanes == -EINVAL || dsi_lanes == -ENODEV) { + remote = of_graph_get_remote_endpoint(endpoint); + dsi_lanes = drm_of_get_data_lanes_count(remote, 1, 4); + of_node_put(remote); + if (dsi_lanes >= 1) + dev_warn(tc->dev, "no dsi-lanes for the bridge, using host lanes\n"); } + of_node_put(endpoint); + if (dsi_lanes < 0) return dsi_lanes; @@ -620,7 +632,21 @@ static int tc_attach_host(struct tc_data *tc) dsi->lanes = tc->num_dsi_lanes; dsi->format = MIPI_DSI_FMT_RGB888; - dsi->mode_flags = MIPI_DSI_MODE_VIDEO; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM; + + /* + * The hs_rate and lp_rate are data rate values. The HS mode is + * differential, while the LP mode is single ended. As the HS mode + * uses DDR, the DSI clock frequency is half the hs_rate. The 10 Mbs + * data rate for LP mode is not specified in the bridge data sheet, + * but seems to be part of the MIPI DSI spec. + */ + if (tc->type == TC358765) + dsi->hs_rate = 800000000; + else + dsi->hs_rate = 1000000000; + dsi->lp_rate = 10000000; ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { @@ -643,6 +669,7 @@ static int tc_probe(struct i2c_client *client) tc->dev = dev; tc->i2c = client; + tc->type = (enum tc3587x5_type)(unsigned long)of_device_get_match_data(dev); tc->panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, TC358775_LVDS_OUT0, 0); @@ -667,12 +694,9 @@ static int tc_probe(struct i2c_client *client) return ret; } - tc->stby_gpio = devm_gpiod_get(dev, "stby", GPIOD_OUT_HIGH); - if (IS_ERR(tc->stby_gpio)) { - ret = PTR_ERR(tc->stby_gpio); - dev_err(dev, "cannot get stby-gpio %d\n", ret); - return ret; - } + tc->stby_gpio = devm_gpiod_get_optional(dev, "stby", GPIOD_OUT_HIGH); + if (IS_ERR(tc->stby_gpio)) + return PTR_ERR(tc->stby_gpio); tc->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(tc->reset_gpio)) { @@ -683,6 +707,7 @@ static int tc_probe(struct i2c_client *client) tc->bridge.funcs = &tc_bridge_funcs; tc->bridge.of_node = dev->of_node; + tc->bridge.pre_enable_prev_first = true; drm_bridge_add(&tc->bridge); i2c_set_clientdata(client, tc); @@ -706,13 +731,15 @@ static void tc_remove(struct i2c_client *client) } static const struct i2c_device_id tc358775_i2c_ids[] = { - { "tc358775", 0 }, + { "tc358765", TC358765, }, + { "tc358775", TC358775, }, { } }; MODULE_DEVICE_TABLE(i2c, tc358775_i2c_ids); static const struct of_device_id tc358775_of_ids[] = { - { .compatible = "toshiba,tc358775", }, + { .compatible = "toshiba,tc358765", .data = (void *)TC358765, }, + { .compatible = "toshiba,tc358775", .data = (void *)TC358775, }, { } }; MODULE_DEVICE_TABLE(of, tc358775_of_ids); diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c index d4c1a601bb..674efc489e 100644 --- a/drivers/gpu/drm/bridge/thc63lvd1024.c +++ b/drivers/gpu/drm/bridge/thc63lvd1024.c @@ -123,26 +123,11 @@ static int thc63_parse_dt(struct thc63_dev *thc63) struct device_node *endpoint; struct device_node *remote; - endpoint = of_graph_get_endpoint_by_regs(thc63->dev->of_node, - THC63_RGB_OUT0, -1); - if (!endpoint) { - dev_err(thc63->dev, "Missing endpoint in port@%u\n", - THC63_RGB_OUT0); - return -ENODEV; - } - - remote = of_graph_get_remote_port_parent(endpoint); - of_node_put(endpoint); + remote = of_graph_get_remote_node(thc63->dev->of_node, + THC63_RGB_OUT0, -1); if (!remote) { - dev_err(thc63->dev, "Endpoint in port@%u unconnected\n", - THC63_RGB_OUT0); - return -ENODEV; - } - - if (!of_device_is_available(remote)) { - dev_err(thc63->dev, "port@%u remote endpoint is disabled\n", + dev_err(thc63->dev, "No remote endpoint for port@%u\n", THC63_RGB_OUT0); - of_node_put(remote); return -ENODEV; } diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config index 8dbce9919a..4140303d62 100644 --- a/drivers/gpu/drm/ci/arm64.config +++ b/drivers/gpu/drm/ci/arm64.config @@ -87,7 +87,7 @@ CONFIG_DRM_PARADE_PS8640=y CONFIG_DRM_LONTIUM_LT9611UXC=y CONFIG_PHY_QCOM_USB_HS=y CONFIG_QCOM_GPI_DMA=y -CONFIG_USB_ONBOARD_HUB=y +CONFIG_USB_ONBOARD_DEV=y CONFIG_NVMEM_QCOM_QFPROM=y CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2=y @@ -97,7 +97,7 @@ CONFIG_USB_RTL8152=y # db820c ethernet CONFIG_ATL1C=y # Chromebooks ethernet -CONFIG_USB_ONBOARD_HUB=y +CONFIG_USB_ONBOARD_DEV=y # 888 HDK ethernet CONFIG_USB_LAN78XX=y diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig index c0f56888c3..864a6488bf 100644 --- a/drivers/gpu/drm/display/Kconfig +++ b/drivers/gpu/drm/display/Kconfig @@ -1,15 +1,36 @@ # SPDX-License-Identifier: MIT -config DRM_DP_AUX_BUS +config DRM_DISPLAY_HELPER tristate depends on DRM - depends on OF || COMPILE_TEST + help + DRM helpers for display adapters. -config DRM_DISPLAY_HELPER +config DRM_DISPLAY_DP_AUX_BUS tristate depends on DRM + depends on OF || COMPILE_TEST + +config DRM_DISPLAY_DP_AUX_CEC + bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support" + depends on DRM && DRM_DISPLAY_HELPER + select DRM_DISPLAY_DP_HELPER + select CEC_CORE help - DRM helpers for display adapters. + Choose this option if you want to enable HDMI CEC support for + DisplayPort/USB-C to HDMI adapters. + + Note: not all adapters support this feature, and even for those + that do support this they often do not hook up the CEC pin. + +config DRM_DISPLAY_DP_AUX_CHARDEV + bool "DRM DP AUX Interface" + depends on DRM && DRM_DISPLAY_HELPER + select DRM_DISPLAY_DP_HELPER + help + Choose this option to enable a /dev/drm_dp_auxN node that allows to + read and write values to arbitrary DPCD registers on the DP aux + channel. config DRM_DISPLAY_DP_HELPER bool @@ -25,7 +46,7 @@ config DRM_DISPLAY_DP_TUNNEL DP tunnel features like the Bandwidth Allocation mode to maximize the BW utilization for display streams on Thunderbolt links. -config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE +config DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG bool "Enable debugging the DP tunnel state" depends on REF_TRACKER depends on DRM_DISPLAY_DP_TUNNEL @@ -49,24 +70,3 @@ config DRM_DISPLAY_HDMI_HELPER depends on DRM_DISPLAY_HELPER help DRM display helpers for HDMI. - -config DRM_DP_AUX_CHARDEV - bool "DRM DP AUX Interface" - depends on DRM && DRM_DISPLAY_HELPER - select DRM_DISPLAY_DP_HELPER - help - Choose this option to enable a /dev/drm_dp_auxN node that allows to - read and write values to arbitrary DPCD registers on the DP aux - channel. - -config DRM_DP_CEC - bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support" - depends on DRM && DRM_DISPLAY_HELPER - select DRM_DISPLAY_DP_HELPER - select CEC_CORE - help - Choose this option if you want to enable HDMI CEC support for - DisplayPort/USB-C to HDMI adapters. - - Note: not all adapters support this feature, and even for those - that do support this they often do not hook up the CEC pin. diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile index 7ca61333c6..17d2cc73ff 100644 --- a/drivers/gpu/drm/display/Makefile +++ b/drivers/gpu/drm/display/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: MIT -obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o +obj-$(CONFIG_DRM_DISPLAY_DP_AUX_BUS) += drm_dp_aux_bus.o drm_display_helper-y := drm_display_helper_mod.o drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \ @@ -14,7 +14,7 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \ drm_hdmi_helper.o \ drm_scdc_helper.o -drm_display_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o -drm_display_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o +drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_AUX_CHARDEV) += drm_dp_aux_dev.o +drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_AUX_CEC) += drm_dp_cec.o obj-$(CONFIG_DRM_DISPLAY_HELPER) += drm_display_helper.o diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index f5d4be8978..79a615667a 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -2113,7 +2113,7 @@ EXPORT_SYMBOL(drm_dp_aux_init); * drm_dp_aux_register() in &drm_connector_funcs.late_register, and likewise to * call drm_dp_aux_unregister() in &drm_connector_funcs.early_unregister. * Functions which don't follow this will likely Oops when - * %CONFIG_DRM_DP_AUX_CHARDEV is enabled. + * %CONFIG_DRM_DISPLAY_DP_AUX_CHARDEV is enabled. * * For devices where the AUX channel is a device that exists independently of * the &drm_device that uses it, such as SoCs and bridge devices, it is @@ -2281,6 +2281,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = { { OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) }, /* Synaptics DP1.4 MST hubs require DSC for some modes on which it applies HBLANK expansion. */ { OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) }, + /* MediaTek panels (at least in U3224KBA) require DSC for modes with a short HBLANK on UHBR links. */ + { OUI(0x00, 0x0C, 0xE7), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) }, /* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */ { OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) }, }; @@ -2948,6 +2950,43 @@ void drm_dp_vsc_sdp_log(struct drm_printer *p, const struct drm_dp_vsc_sdp *vsc) } EXPORT_SYMBOL(drm_dp_vsc_sdp_log); +void drm_dp_as_sdp_log(struct drm_printer *p, const struct drm_dp_as_sdp *as_sdp) +{ + drm_printf(p, "DP SDP: AS_SDP, revision %u, length %u\n", + as_sdp->revision, as_sdp->length); + drm_printf(p, " vtotal: %d\n", as_sdp->vtotal); + drm_printf(p, " target_rr: %d\n", as_sdp->target_rr); + drm_printf(p, " duration_incr_ms: %d\n", as_sdp->duration_incr_ms); + drm_printf(p, " duration_decr_ms: %d\n", as_sdp->duration_decr_ms); + drm_printf(p, " operation_mode: %d\n", as_sdp->mode); +} +EXPORT_SYMBOL(drm_dp_as_sdp_log); + +/** + * drm_dp_as_sdp_supported() - check if adaptive sync sdp is supported + * @aux: DisplayPort AUX channel + * @dpcd: DisplayPort configuration data + * + * Returns true if adaptive sync sdp is supported, else returns false + */ +bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + u8 rx_feature; + + if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13) + return false; + + if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1, + &rx_feature) != 1) { + drm_dbg_dp(aux->drm_dev, + "Failed to read DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1\n"); + return false; + } + + return (rx_feature & DP_ADAPTIVE_SYNC_SDP_SUPPORTED); +} +EXPORT_SYMBOL(drm_dp_as_sdp_supported); + /** * drm_dp_vsc_sdp_supported() - check if vsc sdp is supported * @aux: DisplayPort AUX channel diff --git a/drivers/gpu/drm/display/drm_dp_helper_internal.h b/drivers/gpu/drm/display/drm_dp_helper_internal.h index 8917fc3af9..737949a282 100644 --- a/drivers/gpu/drm/display/drm_dp_helper_internal.h +++ b/drivers/gpu/drm/display/drm_dp_helper_internal.h @@ -5,7 +5,7 @@ struct drm_dp_aux; -#ifdef CONFIG_DRM_DP_AUX_CHARDEV +#ifdef CONFIG_DRM_DISPLAY_DP_AUX_CHARDEV int drm_dp_aux_dev_init(void); void drm_dp_aux_dev_exit(void); int drm_dp_aux_register_devnode(struct drm_dp_aux *aux); diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 95fd18f24e..68831f4e50 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -2274,7 +2274,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, if (port->pdt != DP_PEER_DEVICE_NONE && drm_dp_mst_is_end_device(port->pdt, port->mcs) && - port->port_num >= DP_MST_LOGICAL_PORT_0) + drm_dp_mst_port_is_logical(port)) port->cached_edid = drm_edid_read_ddc(port->connector, &port->aux.ddc); @@ -2929,7 +2929,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, /* FIXME: Actually do some real error handling here */ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); - if (ret <= 0) { + if (ret < 0) { drm_err(mgr->dev, "Sending link address failed with %d\n", ret); goto out; } @@ -2981,7 +2981,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, mutex_unlock(&mgr->lock); out: - if (ret <= 0) + if (ret < 0) mstb->link_address_sent = false; kfree(txmsg); return ret < 0 ? ret : changed; @@ -3606,24 +3606,30 @@ fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, EXPORT_SYMBOL(drm_dp_get_vc_payload_bw); /** - * drm_dp_read_mst_cap() - check whether or not a sink supports MST + * drm_dp_read_mst_cap() - Read the sink's MST mode capability * @aux: The DP AUX channel to use * @dpcd: A cached copy of the DPCD capabilities for this sink * - * Returns: %True if the sink supports MST, %false otherwise + * Returns: enum drm_dp_mst_mode to indicate MST mode capability */ -bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, - const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { u8 mstm_cap; if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12) - return false; + return DRM_DP_SST; if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1) - return false; + return DRM_DP_SST; - return mstm_cap & DP_MST_CAP; + if (mstm_cap & DP_MST_CAP) + return DRM_DP_MST; + + if (mstm_cap & DP_SINGLE_STREAM_SIDEBAND_MSG) + return DRM_DP_SST_SIDEBAND_MSG; + + return DRM_DP_SST; } EXPORT_SYMBOL(drm_dp_read_mst_cap); @@ -4211,7 +4217,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector, case DP_PEER_DEVICE_SST_SINK: ret = connector_status_connected; /* for logical ports - cache the EDID */ - if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid) + if (drm_dp_mst_port_is_logical(port) && !port->cached_edid) port->cached_edid = drm_edid_read_ddc(connector, &port->aux.ddc); break; case DP_PEER_DEVICE_DP_LEGACY_CONV: @@ -5975,7 +5981,7 @@ static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port) return false; /* Virtual DP Sink (Internal Display Panel) */ - if (port->port_num >= 8) + if (drm_dp_mst_port_is_logical(port)) return true; /* DP-to-HDMI Protocol Converter */ @@ -6002,6 +6008,22 @@ static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port) return false; } +/** + * drm_dp_mst_aux_for_parent() - Get the AUX device for an MST port's parent + * @port: MST port whose parent's AUX device is returned + * + * Return the AUX device for @port's parent or NULL if port's parent is the + * root port. + */ +struct drm_dp_aux *drm_dp_mst_aux_for_parent(struct drm_dp_mst_port *port) +{ + if (!port->parent || !port->parent->port_parent) + return NULL; + + return &port->parent->port_parent->aux; +} +EXPORT_SYMBOL(drm_dp_mst_aux_for_parent); + /** * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC * @port: The port to check. A leaf of the MST tree with an attached display. diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h b/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h index a785ccbfdd..f41c34e26b 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h +++ b/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h @@ -10,7 +10,9 @@ #ifndef _DRM_DP_MST_HELPER_INTERNAL_H_ #define _DRM_DP_MST_HELPER_INTERNAL_H_ -#include +struct drm_dp_sideband_msg_req_body; +struct drm_dp_sideband_msg_tx; +struct drm_printer; void drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req, diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c index 120e0de674..48b2df1200 100644 --- a/drivers/gpu/drm/display/drm_dp_tunnel.c +++ b/drivers/gpu/drm/display/drm_dp_tunnel.c @@ -191,7 +191,7 @@ struct drm_dp_tunnel_mgr { struct drm_dp_tunnel_group *groups; wait_queue_head_t bw_req_queue; -#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE +#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG struct ref_tracker_dir ref_tracker; #endif }; @@ -385,7 +385,7 @@ static void tunnel_put(struct drm_dp_tunnel *tunnel) kref_put(&tunnel->kref, free_tunnel); } -#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE +#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG static void track_tunnel_ref(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker) { @@ -436,8 +436,8 @@ EXPORT_SYMBOL(drm_dp_tunnel_get); /** * drm_dp_tunnel_put - Put a reference for a DP tunnel - * @tunnel - Tunnel object - * @tracker - Debug tracker for the reference + * @tunnel: Tunnel object + * @tracker: Debug tracker for the reference * * Put a reference for @tunnel along with its debug *@tracker, which * was obtained with drm_dp_tunnel_get(). @@ -1170,7 +1170,7 @@ int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw) EXPORT_SYMBOL(drm_dp_tunnel_alloc_bw); /** - * drm_dp_tunnel_atomic_get_allocated_bw - Get the BW allocated for a DP tunnel + * drm_dp_tunnel_get_allocated_bw - Get the BW allocated for a DP tunnel * @tunnel: Tunnel object * * Get the current BW allocated for @tunnel. After the tunnel is created / @@ -1603,7 +1603,7 @@ static void cleanup_group(struct drm_dp_tunnel_group *group) drm_atomic_private_obj_fini(&group->base); } -#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE +#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state) { const struct drm_dp_tunnel_state *tunnel_state; @@ -1881,7 +1881,7 @@ static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr) drm_WARN_ON(mgr->dev, !list_empty(&mgr->groups[i].tunnels)); } -#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE +#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG ref_tracker_dir_exit(&mgr->ref_tracker); #endif @@ -1892,6 +1892,7 @@ static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr) /** * drm_dp_tunnel_mgr_create - Create a DP tunnel manager * @dev: DRM device object + * @max_group_count: Maximum number of tunnel groups * * Creates a DP tunnel manager for @dev. * @@ -1918,7 +1919,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count) return NULL; } -#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE +#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun"); #endif diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 39ef0a6add..fb97b51b38 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -3016,6 +3017,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, bool stall) { int i, ret; + unsigned long flags; struct drm_connector *connector; struct drm_connector_state *old_conn_state, *new_conn_state; struct drm_crtc *crtc; @@ -3099,6 +3101,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, } } + drm_panic_lock(state->dev, flags); for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { WARN_ON(plane->state != old_plane_state); @@ -3108,6 +3111,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, state->planes[i].state = old_plane_state; plane->state = new_plane_state; } + drm_panic_unlock(state->dev, flags); for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) { WARN_ON(obj->state != old_obj_state); diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 29d4940188..fc16fddee5 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -145,10 +145,10 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, &state->mode, blob->data); if (ret) { drm_dbg_atomic(crtc->dev, - "[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n", + "[CRTC:%d:%s] invalid mode (%s, %pe): " DRM_MODE_FMT "\n", crtc->base.id, crtc->name, - ret, drm_get_mode_status_name(state->mode.status)); - drm_mode_debug_printmodeline(&state->mode); + drm_get_mode_status_name(state->mode.status), + ERR_PTR(ret), DRM_MODE_ARG(&state->mode)); return -EINVAL; } diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 17ed94885d..28abe9aa99 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -657,6 +657,13 @@ static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge, * bridge will be called before the previous one to reverse the @pre_enable * calling direction. * + * Example: + * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E + * + * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting + * @post_disable order would be, + * Bridge B, Bridge A, Bridge E, Bridge D, Bridge C. + * * Note: the bridge passed should be the one closest to the encoder */ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge, @@ -753,6 +760,13 @@ static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge, * If a bridge sets @pre_enable_prev_first, then the pre_enable for the * prev bridge will be called before pre_enable of this bridge. * + * Example: + * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E + * + * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting + * @pre_enable order would be, + * Bridge C, Bridge D, Bridge E, Bridge A, Bridge B. + * * Note: the bridge passed should be the one closest to the encoder */ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge, diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index 5ebdd6f8f3..6a8e45e9d0 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -57,6 +57,16 @@ static void list_insert_sorted(struct drm_buddy *mm, __list_add(&block->link, node->link.prev, &node->link); } +static void clear_reset(struct drm_buddy_block *block) +{ + block->header &= ~DRM_BUDDY_HEADER_CLEAR; +} + +static void mark_cleared(struct drm_buddy_block *block) +{ + block->header |= DRM_BUDDY_HEADER_CLEAR; +} + static void mark_allocated(struct drm_buddy_block *block) { block->header &= ~DRM_BUDDY_HEADER_STATE; @@ -82,6 +92,133 @@ static void mark_split(struct drm_buddy_block *block) list_del(&block->link); } +static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2) +{ + return s1 <= e2 && e1 >= s2; +} + +static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2) +{ + return s1 <= s2 && e1 >= e2; +} + +static struct drm_buddy_block * +__get_buddy(struct drm_buddy_block *block) +{ + struct drm_buddy_block *parent; + + parent = block->parent; + if (!parent) + return NULL; + + if (parent->left == block) + return parent->right; + + return parent->left; +} + +static unsigned int __drm_buddy_free(struct drm_buddy *mm, + struct drm_buddy_block *block, + bool force_merge) +{ + struct drm_buddy_block *parent; + unsigned int order; + + while ((parent = block->parent)) { + struct drm_buddy_block *buddy; + + buddy = __get_buddy(block); + + if (!drm_buddy_block_is_free(buddy)) + break; + + if (!force_merge) { + /* + * Check the block and its buddy clear state and exit + * the loop if they both have the dissimilar state. + */ + if (drm_buddy_block_is_clear(block) != + drm_buddy_block_is_clear(buddy)) + break; + + if (drm_buddy_block_is_clear(block)) + mark_cleared(parent); + } + + list_del(&buddy->link); + if (force_merge && drm_buddy_block_is_clear(buddy)) + mm->clear_avail -= drm_buddy_block_size(mm, buddy); + + drm_block_free(mm, block); + drm_block_free(mm, buddy); + + block = parent; + } + + order = drm_buddy_block_order(block); + mark_free(mm, block); + + return order; +} + +static int __force_merge(struct drm_buddy *mm, + u64 start, + u64 end, + unsigned int min_order) +{ + unsigned int order; + int i; + + if (!min_order) + return -ENOMEM; + + if (min_order > mm->max_order) + return -EINVAL; + + for (i = min_order - 1; i >= 0; i--) { + struct drm_buddy_block *block, *prev; + + list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) { + struct drm_buddy_block *buddy; + u64 block_start, block_end; + + if (!block->parent) + continue; + + block_start = drm_buddy_block_offset(block); + block_end = block_start + drm_buddy_block_size(mm, block) - 1; + + if (!contains(start, end, block_start, block_end)) + continue; + + buddy = __get_buddy(block); + if (!drm_buddy_block_is_free(buddy)) + continue; + + WARN_ON(drm_buddy_block_is_clear(block) == + drm_buddy_block_is_clear(buddy)); + + /* + * If the prev block is same as buddy, don't access the + * block in the next iteration as we would free the + * buddy block as part of the free function. + */ + if (prev == buddy) + prev = list_prev_entry(prev, link); + + list_del(&block->link); + if (drm_buddy_block_is_clear(block)) + mm->clear_avail -= drm_buddy_block_size(mm, block); + + order = __drm_buddy_free(mm, block, true); + if (order >= min_order) + return 0; + } + } + + return -ENOMEM; +} + /** * drm_buddy_init - init memory manager * @@ -102,7 +239,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) if (size < chunk_size) return -EINVAL; - if (chunk_size < PAGE_SIZE) + if (chunk_size < SZ_4K) return -EINVAL; if (!is_power_of_2(chunk_size)) @@ -112,6 +249,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) mm->size = size; mm->avail = size; + mm->clear_avail = 0; mm->chunk_size = chunk_size; mm->max_order = ilog2(size) - ilog2(chunk_size); @@ -186,11 +324,21 @@ EXPORT_SYMBOL(drm_buddy_init); */ void drm_buddy_fini(struct drm_buddy *mm) { + u64 root_size, size; + unsigned int order; int i; + size = mm->size; + for (i = 0; i < mm->n_roots; ++i) { + order = ilog2(size) - ilog2(mm->chunk_size); + __force_merge(mm, 0, size, order); + WARN_ON(!drm_buddy_block_is_free(mm->roots[i])); drm_block_free(mm, mm->roots[i]); + + root_size = mm->chunk_size << order; + size -= root_size; } WARN_ON(mm->avail != mm->size); @@ -223,26 +371,17 @@ static int split_block(struct drm_buddy *mm, mark_free(mm, block->left); mark_free(mm, block->right); + if (drm_buddy_block_is_clear(block)) { + mark_cleared(block->left); + mark_cleared(block->right); + clear_reset(block); + } + mark_split(block); return 0; } -static struct drm_buddy_block * -__get_buddy(struct drm_buddy_block *block) -{ - struct drm_buddy_block *parent; - - parent = block->parent; - if (!parent) - return NULL; - - if (parent->left == block) - return parent->right; - - return parent->left; -} - /** * drm_get_buddy - get buddy address * @@ -260,30 +399,6 @@ drm_get_buddy(struct drm_buddy_block *block) } EXPORT_SYMBOL(drm_get_buddy); -static void __drm_buddy_free(struct drm_buddy *mm, - struct drm_buddy_block *block) -{ - struct drm_buddy_block *parent; - - while ((parent = block->parent)) { - struct drm_buddy_block *buddy; - - buddy = __get_buddy(block); - - if (!drm_buddy_block_is_free(buddy)) - break; - - list_del(&buddy->link); - - drm_block_free(mm, block); - drm_block_free(mm, buddy); - - block = parent; - } - - mark_free(mm, block); -} - /** * drm_buddy_free_block - free a block * @@ -295,42 +410,74 @@ void drm_buddy_free_block(struct drm_buddy *mm, { BUG_ON(!drm_buddy_block_is_allocated(block)); mm->avail += drm_buddy_block_size(mm, block); - __drm_buddy_free(mm, block); + if (drm_buddy_block_is_clear(block)) + mm->clear_avail += drm_buddy_block_size(mm, block); + + __drm_buddy_free(mm, block, false); } EXPORT_SYMBOL(drm_buddy_free_block); -/** - * drm_buddy_free_list - free blocks - * - * @mm: DRM buddy manager - * @objects: input list head to free blocks - */ -void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects) +static void __drm_buddy_free_list(struct drm_buddy *mm, + struct list_head *objects, + bool mark_clear, + bool mark_dirty) { struct drm_buddy_block *block, *on; + WARN_ON(mark_dirty && mark_clear); + list_for_each_entry_safe(block, on, objects, link) { + if (mark_clear) + mark_cleared(block); + else if (mark_dirty) + clear_reset(block); drm_buddy_free_block(mm, block); cond_resched(); } INIT_LIST_HEAD(objects); } -EXPORT_SYMBOL(drm_buddy_free_list); -static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2) +static void drm_buddy_free_list_internal(struct drm_buddy *mm, + struct list_head *objects) { - return s1 <= e2 && e1 >= s2; + /* + * Don't touch the clear/dirty bit, since allocation is still internal + * at this point. For example we might have just failed part of the + * allocation. + */ + __drm_buddy_free_list(mm, objects, false, false); } -static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2) +/** + * drm_buddy_free_list - free blocks + * + * @mm: DRM buddy manager + * @objects: input list head to free blocks + * @flags: optional flags like DRM_BUDDY_CLEARED + */ +void drm_buddy_free_list(struct drm_buddy *mm, + struct list_head *objects, + unsigned int flags) { - return s1 <= s2 && e1 >= e2; + bool mark_clear = flags & DRM_BUDDY_CLEARED; + + __drm_buddy_free_list(mm, objects, mark_clear, !mark_clear); +} +EXPORT_SYMBOL(drm_buddy_free_list); + +static bool block_incompatible(struct drm_buddy_block *block, unsigned int flags) +{ + bool needs_clear = flags & DRM_BUDDY_CLEAR_ALLOCATION; + + return needs_clear != drm_buddy_block_is_clear(block); } static struct drm_buddy_block * -alloc_range_bias(struct drm_buddy *mm, - u64 start, u64 end, - unsigned int order) +__alloc_range_bias(struct drm_buddy *mm, + u64 start, u64 end, + unsigned int order, + unsigned long flags, + bool fallback) { u64 req_size = mm->chunk_size << order; struct drm_buddy_block *block; @@ -377,6 +524,9 @@ alloc_range_bias(struct drm_buddy *mm, continue; } + if (!fallback && block_incompatible(block, flags)) + continue; + if (contains(start, end, block_start, block_end) && order == drm_buddy_block_order(block)) { /* @@ -410,30 +560,57 @@ err_undo: if (buddy && (drm_buddy_block_is_free(block) && drm_buddy_block_is_free(buddy))) - __drm_buddy_free(mm, block); + __drm_buddy_free(mm, block, false); return ERR_PTR(err); } static struct drm_buddy_block * -get_maxblock(struct drm_buddy *mm, unsigned int order) +__drm_buddy_alloc_range_bias(struct drm_buddy *mm, + u64 start, u64 end, + unsigned int order, + unsigned long flags) +{ + struct drm_buddy_block *block; + bool fallback = false; + + block = __alloc_range_bias(mm, start, end, order, + flags, fallback); + if (IS_ERR(block)) + return __alloc_range_bias(mm, start, end, order, + flags, !fallback); + + return block; +} + +static struct drm_buddy_block * +get_maxblock(struct drm_buddy *mm, unsigned int order, + unsigned long flags) { - struct drm_buddy_block *max_block = NULL, *node; + struct drm_buddy_block *max_block = NULL, *block = NULL; unsigned int i; for (i = order; i <= mm->max_order; ++i) { - if (!list_empty(&mm->free_list[i])) { - node = list_last_entry(&mm->free_list[i], - struct drm_buddy_block, - link); - if (!max_block) { - max_block = node; + struct drm_buddy_block *tmp_block; + + list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) { + if (block_incompatible(tmp_block, flags)) continue; - } - if (drm_buddy_block_offset(node) > - drm_buddy_block_offset(max_block)) { - max_block = node; - } + block = tmp_block; + break; + } + + if (!block) + continue; + + if (!max_block) { + max_block = block; + continue; + } + + if (drm_buddy_block_offset(block) > + drm_buddy_block_offset(max_block)) { + max_block = block; } } @@ -450,11 +627,29 @@ alloc_from_freelist(struct drm_buddy *mm, int err; if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) { - block = get_maxblock(mm, order); + block = get_maxblock(mm, order, flags); if (block) /* Store the obtained block order */ tmp = drm_buddy_block_order(block); } else { + for (tmp = order; tmp <= mm->max_order; ++tmp) { + struct drm_buddy_block *tmp_block; + + list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) { + if (block_incompatible(tmp_block, flags)) + continue; + + block = tmp_block; + break; + } + + if (block) + break; + } + } + + if (!block) { + /* Fallback method */ for (tmp = order; tmp <= mm->max_order; ++tmp) { if (!list_empty(&mm->free_list[tmp])) { block = list_last_entry(&mm->free_list[tmp], @@ -464,10 +659,10 @@ alloc_from_freelist(struct drm_buddy *mm, break; } } - } - if (!block) - return ERR_PTR(-ENOSPC); + if (!block) + return ERR_PTR(-ENOSPC); + } BUG_ON(!drm_buddy_block_is_free(block)); @@ -483,7 +678,7 @@ alloc_from_freelist(struct drm_buddy *mm, err_undo: if (tmp != order) - __drm_buddy_free(mm, block); + __drm_buddy_free(mm, block, false); return ERR_PTR(err); } @@ -526,16 +721,18 @@ static int __alloc_range(struct drm_buddy *mm, } if (contains(start, end, block_start, block_end)) { - if (!drm_buddy_block_is_free(block)) { + if (drm_buddy_block_is_free(block)) { + mark_allocated(block); + total_allocated += drm_buddy_block_size(mm, block); + mm->avail -= drm_buddy_block_size(mm, block); + if (drm_buddy_block_is_clear(block)) + mm->clear_avail -= drm_buddy_block_size(mm, block); + list_add_tail(&block->link, &allocated); + continue; + } else if (!mm->clear_avail) { err = -ENOSPC; goto err_free; } - - mark_allocated(block); - total_allocated += drm_buddy_block_size(mm, block); - mm->avail -= drm_buddy_block_size(mm, block); - list_add_tail(&block->link, &allocated); - continue; } if (!drm_buddy_block_is_split(block)) { @@ -567,14 +764,14 @@ err_undo: if (buddy && (drm_buddy_block_is_free(block) && drm_buddy_block_is_free(buddy))) - __drm_buddy_free(mm, block); + __drm_buddy_free(mm, block, false); err_free: if (err == -ENOSPC && total_allocated_on_err) { list_splice_tail(&allocated, blocks); *total_allocated_on_err = total_allocated; } else { - drm_buddy_free_list(mm, &allocated); + drm_buddy_free_list_internal(mm, &allocated); } return err; @@ -640,11 +837,11 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, list_splice(&blocks_lhs, blocks); return 0; } else if (err != -ENOSPC) { - drm_buddy_free_list(mm, blocks); + drm_buddy_free_list_internal(mm, blocks); return err; } /* Free blocks for the next iteration */ - drm_buddy_free_list(mm, blocks); + drm_buddy_free_list_internal(mm, blocks); } return -ENOSPC; @@ -700,6 +897,8 @@ int drm_buddy_block_trim(struct drm_buddy *mm, list_del(&block->link); mark_free(mm, block); mm->avail += drm_buddy_block_size(mm, block); + if (drm_buddy_block_is_clear(block)) + mm->clear_avail += drm_buddy_block_size(mm, block); /* Prevent recursively freeing this node */ parent = block->parent; @@ -711,6 +910,8 @@ int drm_buddy_block_trim(struct drm_buddy *mm, if (err) { mark_allocated(block); mm->avail -= drm_buddy_block_size(mm, block); + if (drm_buddy_block_is_clear(block)) + mm->clear_avail -= drm_buddy_block_size(mm, block); list_add(&block->link, blocks); } @@ -719,13 +920,28 @@ int drm_buddy_block_trim(struct drm_buddy *mm, } EXPORT_SYMBOL(drm_buddy_block_trim); +static struct drm_buddy_block * +__drm_buddy_alloc_blocks(struct drm_buddy *mm, + u64 start, u64 end, + unsigned int order, + unsigned long flags) +{ + if (flags & DRM_BUDDY_RANGE_ALLOCATION) + /* Allocate traversing within the range */ + return __drm_buddy_alloc_range_bias(mm, start, end, + order, flags); + else + /* Allocate from freelist */ + return alloc_from_freelist(mm, order, flags); +} + /** * drm_buddy_alloc_blocks - allocate power-of-two blocks * * @mm: DRM buddy manager to allocate from * @start: start of the allowed range for this block * @end: end of the allowed range for this block - * @size: size of the allocation + * @size: size of the allocation in bytes * @min_block_size: alignment of the allocation * @blocks: output list head to add allocated blocks * @flags: DRM_BUDDY_*_ALLOCATION flags @@ -800,23 +1016,33 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, BUG_ON(order < min_order); do { - if (flags & DRM_BUDDY_RANGE_ALLOCATION) - /* Allocate traversing within the range */ - block = alloc_range_bias(mm, start, end, order); - else - /* Allocate from freelist */ - block = alloc_from_freelist(mm, order, flags); - + block = __drm_buddy_alloc_blocks(mm, start, + end, + order, + flags); if (!IS_ERR(block)) break; if (order-- == min_order) { + /* Try allocation through force merge method */ + if (mm->clear_avail && + !__force_merge(mm, start, end, min_order)) { + block = __drm_buddy_alloc_blocks(mm, start, + end, + min_order, + flags); + if (!IS_ERR(block)) { + order = min_order; + break; + } + } + + /* + * Try contiguous block allocation through + * try harder method. + */ if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION && !(flags & DRM_BUDDY_RANGE_ALLOCATION)) - /* - * Try contiguous block allocation through - * try harder method - */ return __alloc_contig_try_harder(mm, original_size, original_min_size, @@ -828,6 +1054,8 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, mark_allocated(block); mm->avail -= drm_buddy_block_size(mm, block); + if (drm_buddy_block_is_clear(block)) + mm->clear_avail -= drm_buddy_block_size(mm, block); kmemleak_update_trace(block); list_add_tail(&block->link, &allocated); @@ -866,7 +1094,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, return 0; err_free: - drm_buddy_free_list(mm, &allocated); + drm_buddy_free_list_internal(mm, &allocated); return err; } EXPORT_SYMBOL(drm_buddy_alloc_blocks); @@ -899,8 +1127,8 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p) { int order; - drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n", - mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20); + drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n", + mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20); for (order = mm->max_order; order >= 0; order--) { struct drm_buddy_block *block; diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index 9403b3f576..2803ac111b 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -172,6 +172,18 @@ void drm_client_release(struct drm_client_dev *client) } EXPORT_SYMBOL(drm_client_release); +/** + * drm_client_dev_unregister - Unregister clients + * @dev: DRM device + * + * This function releases all clients by calling each client's + * &drm_client_funcs.unregister callback. The callback function + * is responsibe for releaseing all resources including the client + * itself. + * + * The helper drm_dev_unregister() calls this function. Drivers + * that use it don't need to call this function themselves. + */ void drm_client_dev_unregister(struct drm_device *dev) { struct drm_client_dev *client, *tmp; @@ -191,6 +203,7 @@ void drm_client_dev_unregister(struct drm_device *dev) } mutex_unlock(&dev->clientlist_mutex); } +EXPORT_SYMBOL(drm_client_dev_unregister); /** * drm_client_dev_hotplug - Send hotplug event to clients @@ -304,6 +317,66 @@ err_delete: return ERR_PTR(ret); } +/** + * drm_client_buffer_vmap_local - Map DRM client buffer into address space + * @buffer: DRM client buffer + * @map_copy: Returns the mapped memory's address + * + * This function maps a client buffer into kernel address space. If the + * buffer is already mapped, it returns the existing mapping's address. + * + * Client buffer mappings are not ref'counted. Each call to + * drm_client_buffer_vmap_local() should be closely followed by a call to + * drm_client_buffer_vunmap_local(). See drm_client_buffer_vmap() for + * long-term mappings. + * + * The returned address is a copy of the internal value. In contrast to + * other vmap interfaces, you don't need it for the client's vunmap + * function. So you can modify it at will during blit and draw operations. + * + * Returns: + * 0 on success, or a negative errno code otherwise. + */ +int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer, + struct iosys_map *map_copy) +{ + struct drm_gem_object *gem = buffer->gem; + struct iosys_map *map = &buffer->map; + int ret; + + drm_gem_lock(gem); + + ret = drm_gem_vmap(gem, map); + if (ret) + goto err_drm_gem_vmap_unlocked; + *map_copy = *map; + + return 0; + +err_drm_gem_vmap_unlocked: + drm_gem_unlock(gem); + return 0; +} +EXPORT_SYMBOL(drm_client_buffer_vmap_local); + +/** + * drm_client_buffer_vunmap_local - Unmap DRM client buffer + * @buffer: DRM client buffer + * + * This function removes a client buffer's memory mapping established + * with drm_client_buffer_vunmap_local(). Calling this function is only + * required by clients that manage their buffer mappings by themselves. + */ +void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer) +{ + struct drm_gem_object *gem = buffer->gem; + struct iosys_map *map = &buffer->map; + + drm_gem_vunmap(gem, map); + drm_gem_unlock(gem); +} +EXPORT_SYMBOL(drm_client_buffer_vunmap_local); + /** * drm_client_buffer_vmap - Map DRM client buffer into address space * @buffer: DRM client buffer @@ -328,24 +401,30 @@ int drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct iosys_map *map_copy) { + struct drm_gem_object *gem = buffer->gem; struct iosys_map *map = &buffer->map; int ret; - /* - * FIXME: The dependency on GEM here isn't required, we could - * convert the driver handle to a dma-buf instead and use the - * backend-agnostic dma-buf vmap support instead. This would - * require that the handle2fd prime ioctl is reworked to pull the - * fd_install step out of the driver backend hooks, to make that - * final step optional for internal users. - */ - ret = drm_gem_vmap_unlocked(buffer->gem, map); + drm_gem_lock(gem); + + ret = drm_gem_pin_locked(gem); if (ret) - return ret; + goto err_drm_gem_pin_locked; + ret = drm_gem_vmap(gem, map); + if (ret) + goto err_drm_gem_vmap; + + drm_gem_unlock(gem); *map_copy = *map; return 0; + +err_drm_gem_vmap: + drm_gem_unpin_locked(buffer->gem); +err_drm_gem_pin_locked: + drm_gem_unlock(gem); + return ret; } EXPORT_SYMBOL(drm_client_buffer_vmap); @@ -359,9 +438,13 @@ EXPORT_SYMBOL(drm_client_buffer_vmap); */ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) { + struct drm_gem_object *gem = buffer->gem; struct iosys_map *map = &buffer->map; - drm_gem_vunmap_unlocked(buffer->gem, map); + drm_gem_lock(gem); + drm_gem_vunmap(gem, map); + drm_gem_unpin_locked(gem); + drm_gem_unlock(gem); } EXPORT_SYMBOL(drm_client_buffer_vunmap); diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 0683a129b3..31af5cf37a 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -242,8 +242,10 @@ static void drm_client_connectors_enabled(struct drm_connector **connectors, for (i = 0; i < connector_count; i++) { connector = connectors[i]; enabled[i] = drm_connector_enabled(connector, true); - DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id, - connector->display_info.non_desktop ? "non desktop" : str_yes_no(enabled[i])); + drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] enabled? %s\n", + connector->base.id, connector->name, + connector->display_info.non_desktop ? + "non desktop" : str_yes_no(enabled[i])); any_enabled |= enabled[i]; } @@ -303,7 +305,7 @@ static bool drm_client_target_cloned(struct drm_device *dev, } if (can_clone) { - DRM_DEBUG_KMS("can clone using command line\n"); + drm_dbg_kms(dev, "can clone using command line\n"); return true; } @@ -332,15 +334,16 @@ static bool drm_client_target_cloned(struct drm_device *dev, kfree(dmt_mode); if (can_clone) { - DRM_DEBUG_KMS("can clone using 1024x768\n"); + drm_dbg_kms(dev, "can clone using 1024x768\n"); return true; } fail: - DRM_INFO("kms: can't enable cloning when we probably wanted to.\n"); + drm_info(dev, "kms: can't enable cloning when we probably wanted to.\n"); return false; } -static int drm_client_get_tile_offsets(struct drm_connector **connectors, +static int drm_client_get_tile_offsets(struct drm_device *dev, + struct drm_connector **connectors, unsigned int connector_count, struct drm_display_mode **modes, struct drm_client_offset *offsets, @@ -357,8 +360,9 @@ static int drm_client_get_tile_offsets(struct drm_connector **connectors, continue; if (!modes[i] && (h_idx || v_idx)) { - DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i, - connector->base.id); + drm_dbg_kms(dev, + "[CONNECTOR:%d:%s] no modes for connector tiled %d\n", + connector->base.id, connector->name, i); continue; } if (connector->tile_h_loc < h_idx) @@ -369,11 +373,12 @@ static int drm_client_get_tile_offsets(struct drm_connector **connectors, } offsets[idx].x = hoffset; offsets[idx].y = voffset; - DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx); + drm_dbg_kms(dev, "returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx); return 0; } -static bool drm_client_target_preferred(struct drm_connector **connectors, +static bool drm_client_target_preferred(struct drm_device *dev, + struct drm_connector **connectors, unsigned int connector_count, struct drm_display_mode **modes, struct drm_client_offset *offsets, @@ -423,17 +428,19 @@ retry: * find the tile offsets for this pass - need to find * all tiles left and above */ - drm_client_get_tile_offsets(connectors, connector_count, modes, offsets, i, + drm_client_get_tile_offsets(dev, connectors, connector_count, + modes, offsets, i, connector->tile_h_loc, connector->tile_v_loc); } - DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", - connector->base.id); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n", + connector->base.id, connector->name); /* got for command line mode first */ modes[i] = drm_connector_pick_cmdline_mode(connector); if (!modes[i]) { - DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n", - connector->base.id, connector->tile_group ? connector->tile_group->id : 0); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for preferred mode, tile %d\n", + connector->base.id, connector->name, + connector->tile_group ? connector->tile_group->id : 0); modes[i] = drm_connector_has_preferred_mode(connector, width, height); } /* No preferred modes, pick one off the list */ @@ -455,16 +462,18 @@ retry: (connector->tile_h_loc == 0 && connector->tile_v_loc == 0 && !drm_connector_get_tiled_mode(connector))) { - DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n", - connector->base.id); + drm_dbg_kms(dev, + "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n", + connector->base.id, connector->name); modes[i] = drm_connector_fallback_non_tiled_mode(connector); } else { modes[i] = drm_connector_get_tiled_mode(connector); } } - DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : - "none"); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Found mode %s\n", + connector->base.id, connector->name, + modes[i] ? modes[i]->name : "none"); conn_configured |= BIT_ULL(i); } @@ -585,7 +594,7 @@ static bool drm_client_firmware_config(struct drm_client_dev *client, if (!drm_drv_uses_atomic_modeset(dev)) return false; - if (WARN_ON(count <= 0)) + if (drm_WARN_ON(dev, count <= 0)) return false; save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL); @@ -624,26 +633,26 @@ retry: num_connectors_detected++; if (!enabled[i]) { - DRM_DEBUG_KMS("connector %s not enabled, skipping\n", - connector->name); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] not enabled, skipping\n", + connector->base.id, connector->name); conn_configured |= BIT(i); continue; } if (connector->force == DRM_FORCE_OFF) { - DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n", - connector->name); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] disabled by user, skipping\n", + connector->base.id, connector->name); enabled[i] = false; continue; } encoder = connector->state->best_encoder; - if (!encoder || WARN_ON(!connector->state->crtc)) { + if (!encoder || drm_WARN_ON(dev, !connector->state->crtc)) { if (connector->force > DRM_FORCE_OFF) goto bail; - DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", - connector->name); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] has no encoder or crtc, skipping\n", + connector->base.id, connector->name); enabled[i] = false; conn_configured |= BIT(i); continue; @@ -660,28 +669,30 @@ retry: */ for (j = 0; j < count; j++) { if (crtcs[j] == new_crtc) { - DRM_DEBUG_KMS("fallback: cloned configuration\n"); + drm_dbg_kms(dev, "fallback: cloned configuration\n"); goto bail; } } - DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n", - connector->name); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n", + connector->base.id, connector->name); /* go for command line mode first */ modes[i] = drm_connector_pick_cmdline_mode(connector); /* try for preferred next */ if (!modes[i]) { - DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n", - connector->name, connector->has_tile); + drm_dbg_kms(dev, + "[CONNECTOR:%d:%s] looking for preferred mode, has tile: %s\n", + connector->base.id, connector->name, + str_yes_no(connector->has_tile)); modes[i] = drm_connector_has_preferred_mode(connector, width, height); } /* No preferred mode marked by the EDID? Are there any modes? */ if (!modes[i] && !list_empty(&connector->modes)) { - DRM_DEBUG_KMS("using first mode listed on connector %s\n", - connector->name); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] using first listed mode\n", + connector->base.id, connector->name); modes[i] = list_first_entry(&connector->modes, struct drm_display_mode, head); @@ -700,8 +711,8 @@ retry: * This is crtc->mode and not crtc->state->mode for the * fastboot check to work correctly. */ - DRM_DEBUG_KMS("looking for current mode on connector %s\n", - connector->name); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for current mode\n", + connector->base.id, connector->name); modes[i] = &connector->state->crtc->mode; } /* @@ -710,18 +721,18 @@ retry: */ if (connector->has_tile && num_tiled_conns < connector->num_h_tile * connector->num_v_tile) { - DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n", - connector->base.id); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n", + connector->base.id, connector->name); modes[i] = drm_connector_fallback_non_tiled_mode(connector); } crtcs[i] = new_crtc; - DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n", - connector->name, - connector->state->crtc->base.id, - connector->state->crtc->name, - modes[i]->hdisplay, modes[i]->vdisplay, - modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : ""); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] on [CRTC:%d:%s]: %dx%d%s\n", + connector->base.id, connector->name, + connector->state->crtc->base.id, + connector->state->crtc->name, + modes[i]->hdisplay, modes[i]->vdisplay, + modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : ""); fallback = false; conn_configured |= BIT(i); @@ -737,15 +748,15 @@ retry: */ if (num_connectors_enabled != num_connectors_detected && num_connectors_enabled < dev->mode_config.num_crtc) { - DRM_DEBUG_KMS("fallback: Not all outputs enabled\n"); - DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled, - num_connectors_detected); + drm_dbg_kms(dev, "fallback: Not all outputs enabled\n"); + drm_dbg_kms(dev, "Enabled: %i, detected: %i\n", + num_connectors_enabled, num_connectors_detected); fallback = true; } if (fallback) { bail: - DRM_DEBUG_KMS("Not using firmware configuration\n"); + drm_dbg_kms(dev, "Not using firmware configuration\n"); memcpy(enabled, save_enabled, count); ret = false; } @@ -783,7 +794,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, int i, ret = 0; bool *enabled; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(dev, "\n"); if (!width) width = dev->mode_config.max_width; @@ -814,7 +825,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, offsets = kcalloc(connector_count, sizeof(*offsets), GFP_KERNEL); enabled = kcalloc(connector_count, sizeof(bool), GFP_KERNEL); if (!crtcs || !modes || !enabled || !offsets) { - DRM_ERROR("Memory allocation failed\n"); ret = -ENOMEM; goto out; } @@ -825,7 +835,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, for (i = 0; i < connector_count; i++) total_modes_count += connectors[i]->funcs->fill_modes(connectors[i], width, height); if (!total_modes_count) - DRM_DEBUG_KMS("No connectors reported connected with modes\n"); + drm_dbg_kms(dev, "No connectors reported connected with modes\n"); drm_client_connectors_enabled(connectors, connector_count, enabled); if (!drm_client_firmware_config(client, connectors, connector_count, crtcs, @@ -836,12 +846,12 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, if (!drm_client_target_cloned(dev, connectors, connector_count, modes, offsets, enabled, width, height) && - !drm_client_target_preferred(connectors, connector_count, modes, + !drm_client_target_preferred(dev, connectors, connector_count, modes, offsets, enabled, width, height)) - DRM_ERROR("Unable to find initial modes\n"); + drm_err(dev, "Unable to find initial modes\n"); - DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", - width, height); + drm_dbg_kms(dev, "picking CRTCs for %dx%d config\n", + width, height); drm_client_pick_crtcs(client, connectors, connector_count, crtcs, modes, 0, width, height); @@ -858,11 +868,12 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, struct drm_mode_set *modeset = drm_client_find_modeset(client, crtc); struct drm_connector *connector = connectors[i]; - DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n", - mode->name, crtc->base.id, offset->x, offset->y); + drm_dbg_kms(dev, "[CRTC:%d:%s] desired mode %s set (%d,%d)\n", + crtc->base.id, crtc->name, + mode->name, offset->x, offset->y); - if (WARN_ON_ONCE(modeset->num_connectors == DRM_CLIENT_MAX_CLONED_CONNECTORS || - (dev->mode_config.num_crtc > 1 && modeset->num_connectors == 1))) { + if (drm_WARN_ON_ONCE(dev, modeset->num_connectors == DRM_CLIENT_MAX_CLONED_CONNECTORS || + (dev->mode_config.num_crtc > 1 && modeset->num_connectors == 1))) { ret = -EINVAL; break; } diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 82c665d3e7..483969b84a 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -716,10 +716,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, crtc = drm_crtc_find(dev, file_priv, crtc_req->crtc_id); if (!crtc) { - DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id); + drm_dbg_kms(dev, "Unknown CRTC ID %d\n", crtc_req->crtc_id); return -ENOENT; } - DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); + drm_dbg_kms(dev, "[CRTC:%d:%s]\n", crtc->base.id, crtc->name); plane = crtc->primary; @@ -742,7 +742,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, old_fb = plane->fb; if (!old_fb) { - DRM_DEBUG_KMS("CRTC doesn't have current FB\n"); + drm_dbg_kms(dev, "CRTC doesn't have current FB\n"); ret = -EINVAL; goto out; } @@ -753,8 +753,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, } else { fb = drm_framebuffer_lookup(dev, file_priv, crtc_req->fb_id); if (!fb) { - DRM_DEBUG_KMS("Unknown FB ID%d\n", - crtc_req->fb_id); + drm_dbg_kms(dev, "Unknown FB ID%d\n", + crtc_req->fb_id); ret = -ENOENT; goto out; } @@ -767,7 +767,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, } if (!file_priv->aspect_ratio_allowed && (crtc_req->mode.flags & DRM_MODE_FLAG_PIC_AR_MASK) != DRM_MODE_FLAG_PIC_AR_NONE) { - DRM_DEBUG_KMS("Unexpected aspect-ratio flag bits\n"); + drm_dbg_kms(dev, "Unexpected aspect-ratio flag bits\n"); ret = -EINVAL; goto out; } @@ -775,9 +775,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode); if (ret) { - DRM_DEBUG_KMS("Invalid mode (ret=%d, status=%s)\n", - ret, drm_get_mode_status_name(mode->status)); - drm_mode_debug_printmodeline(mode); + drm_dbg_kms(dev, "Invalid mode (%s, %pe): " DRM_MODE_FMT "\n", + drm_get_mode_status_name(mode->status), + ERR_PTR(ret), DRM_MODE_ARG(mode)); goto out; } @@ -793,9 +793,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, fb->format->format, fb->modifier); if (ret) { - DRM_DEBUG_KMS("Invalid pixel format %p4cc, modifier 0x%llx\n", - &fb->format->format, - fb->modifier); + drm_dbg_kms(dev, "Invalid pixel format %p4cc, modifier 0x%llx\n", + &fb->format->format, fb->modifier); goto out; } } @@ -808,14 +807,14 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, } if (crtc_req->count_connectors == 0 && mode) { - DRM_DEBUG_KMS("Count connectors is 0 but mode set\n"); + drm_dbg_kms(dev, "Count connectors is 0 but mode set\n"); ret = -EINVAL; goto out; } if (crtc_req->count_connectors > 0 && (!mode || !fb)) { - DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n", - crtc_req->count_connectors); + drm_dbg_kms(dev, "Count connectors is %d but no mode or fb set\n", + crtc_req->count_connectors); ret = -EINVAL; goto out; } @@ -847,14 +846,13 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, connector = drm_connector_lookup(dev, file_priv, out_id); if (!connector) { - DRM_DEBUG_KMS("Connector id %d unknown\n", - out_id); + drm_dbg_kms(dev, "Connector id %d unknown\n", + out_id); ret = -ENOENT; goto out; } - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); connector_set[i] = connector; num_connectors++; diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 2dafc39a27..0955f1c385 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -110,15 +110,15 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder) struct drm_connector_list_iter conn_iter; struct drm_device *dev = encoder->dev; - WARN_ON(drm_drv_uses_atomic_modeset(dev)); + drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev)); /* * We can expect this mutex to be locked if we are not panicking. * Locking is currently fubar in the panic handler. */ if (!oops_in_progress) { - WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex)); + drm_WARN_ON(dev, !drm_modeset_is_locked(&dev->mode_config.connection_mutex)); } @@ -150,14 +150,14 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc) struct drm_encoder *encoder; struct drm_device *dev = crtc->dev; - WARN_ON(drm_drv_uses_atomic_modeset(dev)); + drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev)); /* * We can expect this mutex to be locked if we are not panicking. * Locking is currently fubar in the panic handler. */ if (!oops_in_progress) - WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); + drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex)); drm_for_each_encoder(encoder, dev) if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) @@ -230,7 +230,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev) */ void drm_helper_disable_unused_functions(struct drm_device *dev) { - WARN_ON(drm_drv_uses_atomic_modeset(dev)); + drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev)); drm_modeset_lock_all(dev); __drm_helper_disable_unused_functions(dev); @@ -294,7 +294,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, struct drm_encoder *encoder; bool ret = true; - WARN_ON(drm_drv_uses_atomic_modeset(dev)); + drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev)); drm_warn_on_modeset_not_all_locked(dev); @@ -338,7 +338,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, if (encoder_funcs->mode_fixup) { if (!(ret = encoder_funcs->mode_fixup(encoder, mode, adjusted_mode))) { - DRM_DEBUG_KMS("Encoder fixup failed\n"); + drm_dbg_kms(dev, "[ENCODER:%d:%s] mode fixup failed\n", + encoder->base.id, encoder->name); goto done; } } @@ -347,11 +348,12 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, if (crtc_funcs->mode_fixup) { if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) { - DRM_DEBUG_KMS("CRTC fixup failed\n"); + drm_dbg_kms(dev, "[CRTC:%d:%s] mode fixup failed\n", + crtc->base.id, crtc->name); goto done; } } - DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); + drm_dbg_kms(dev, "[CRTC:%d:%s]\n", crtc->base.id, crtc->name); drm_mode_copy(&crtc->hwmode, adjusted_mode); @@ -390,8 +392,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, if (!encoder_funcs) continue; - DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%s]\n", - encoder->base.id, encoder->name, mode->name); + drm_dbg_kms(dev, "[ENCODER:%d:%s] set [MODE:%s]\n", + encoder->base.id, encoder->name, mode->name); if (encoder_funcs->mode_set) encoder_funcs->mode_set(encoder, mode, adjusted_mode); } @@ -503,7 +505,7 @@ drm_connector_get_single_encoder(struct drm_connector *connector) { struct drm_encoder *encoder; - WARN_ON(hweight32(connector->possible_encoders) > 1); + drm_WARN_ON(connector->dev, hweight32(connector->possible_encoders) > 1); drm_connector_for_each_possible_encoder(connector, encoder) return encoder; @@ -564,8 +566,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set, int ret; int i; - DRM_DEBUG_KMS("\n"); - BUG_ON(!set); BUG_ON(!set->crtc); BUG_ON(!set->crtc->helper_private); @@ -577,19 +577,22 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set, crtc_funcs = set->crtc->helper_private; dev = set->crtc->dev; - WARN_ON(drm_drv_uses_atomic_modeset(dev)); + + drm_dbg_kms(dev, "\n"); + + drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev)); if (!set->mode) set->fb = NULL; if (set->fb) { - DRM_DEBUG_KMS("[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n", - set->crtc->base.id, set->crtc->name, - set->fb->base.id, - (int)set->num_connectors, set->x, set->y); + drm_dbg_kms(dev, "[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n", + set->crtc->base.id, set->crtc->name, + set->fb->base.id, + (int)set->num_connectors, set->x, set->y); } else { - DRM_DEBUG_KMS("[CRTC:%d:%s] [NOFB]\n", - set->crtc->base.id, set->crtc->name); + drm_dbg_kms(dev, "[CRTC:%d:%s] [NOFB]\n", + set->crtc->base.id, set->crtc->name); drm_crtc_helper_disable(set->crtc); return 0; } @@ -639,7 +642,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set, if (set->crtc->primary->fb != set->fb) { /* If we have no fb then treat it as a full mode set */ if (set->crtc->primary->fb == NULL) { - DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); + drm_dbg_kms(dev, "[CRTC:%d:%s] no fb, full mode set\n", + set->crtc->base.id, set->crtc->name); mode_changed = true; } else if (set->fb->format != set->crtc->primary->fb->format) { mode_changed = true; @@ -651,9 +655,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set, fb_changed = true; if (!drm_mode_equal(set->mode, &set->crtc->mode)) { - DRM_DEBUG_KMS("modes are different, full mode set\n"); - drm_mode_debug_printmodeline(&set->crtc->mode); - drm_mode_debug_printmodeline(set->mode); + drm_dbg_kms(dev, "[CRTC:%d:%s] modes are different, full mode set:\n", + set->crtc->base.id, set->crtc->name); + drm_dbg_kms(dev, DRM_MODE_FMT "\n", DRM_MODE_ARG(&set->crtc->mode)); + drm_dbg_kms(dev, DRM_MODE_FMT "\n", DRM_MODE_ARG(set->mode)); mode_changed = true; } @@ -687,7 +692,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set, fail = 1; if (connector->dpms != DRM_MODE_DPMS_ON) { - DRM_DEBUG_KMS("connector dpms not on, full mode switch\n"); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] DPMS not on, full mode switch\n", + connector->base.id, connector->name); mode_changed = true; } @@ -696,7 +702,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set, } if (new_encoder != connector->encoder) { - DRM_DEBUG_KMS("encoder changed, full mode switch\n"); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] encoder changed, full mode switch\n", + connector->base.id, connector->name); mode_changed = true; /* If the encoder is reused for another connector, then * the appropriate crtc will be set later. @@ -737,17 +744,18 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set, goto fail; } if (new_crtc != connector->encoder->crtc) { - DRM_DEBUG_KMS("crtc changed, full mode switch\n"); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] CRTC changed, full mode switch\n", + connector->base.id, connector->name); mode_changed = true; connector->encoder->crtc = new_crtc; } if (new_crtc) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n", - connector->base.id, connector->name, - new_crtc->base.id, new_crtc->name); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n", + connector->base.id, connector->name, + new_crtc->base.id, new_crtc->name); } else { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", - connector->base.id, connector->name); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] to [NOCRTC]\n", + connector->base.id, connector->name); } } drm_connector_list_iter_end(&conn_iter); @@ -758,23 +766,23 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set, if (mode_changed) { if (drm_helper_crtc_in_use(set->crtc)) { - DRM_DEBUG_KMS("attempting to set mode from" - " userspace\n"); - drm_mode_debug_printmodeline(set->mode); + drm_dbg_kms(dev, "[CRTC:%d:%s] attempting to set mode from userspace: " DRM_MODE_FMT "\n", + set->crtc->base.id, set->crtc->name, DRM_MODE_ARG(set->mode)); set->crtc->primary->fb = set->fb; if (!drm_crtc_helper_set_mode(set->crtc, set->mode, set->x, set->y, save_set.fb)) { - DRM_ERROR("failed to set mode on [CRTC:%d:%s]\n", - set->crtc->base.id, set->crtc->name); + drm_err(dev, "[CRTC:%d:%s] failed to set mode\n", + set->crtc->base.id, set->crtc->name); set->crtc->primary->fb = save_set.fb; ret = -EINVAL; goto fail; } - DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); + drm_dbg_kms(dev, "[CRTC:%d:%s] Setting connector DPMS state to on\n", + set->crtc->base.id, set->crtc->name); for (i = 0; i < set->num_connectors; i++) { - DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, - set->connectors[i]->name); + drm_dbg_kms(dev, "\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, + set->connectors[i]->name); set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); } } @@ -823,7 +831,7 @@ fail: if (mode_changed && !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x, save_set.y, save_set.fb)) - DRM_ERROR("failed to restore config after modeset failure\n"); + drm_err(dev, "failed to restore config after modeset failure\n"); kfree(save_connector_encoders); kfree(save_encoder_crtcs); @@ -905,7 +913,7 @@ int drm_helper_connector_dpms(struct drm_connector *connector, int mode) struct drm_crtc *crtc = encoder ? encoder->crtc : NULL; int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF; - WARN_ON(drm_drv_uses_atomic_modeset(connector->dev)); + drm_WARN_ON(connector->dev, drm_drv_uses_atomic_modeset(connector->dev)); if (mode == connector->dpms) return 0; @@ -980,7 +988,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev) int encoder_dpms; bool ret; - WARN_ON(drm_drv_uses_atomic_modeset(dev)); + drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev)); drm_modeset_lock_all(dev); drm_for_each_crtc(crtc, dev) { @@ -993,7 +1001,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev) /* Restoring the old config should never fail! */ if (ret == false) - DRM_ERROR("failed to set mode on crtc %p\n", crtc); + drm_err(dev, "failed to set mode on crtc %p\n", crtc); /* Turn off outputs that were already powered off */ if (drm_helper_choose_crtc_dpms(crtc)) { diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h index 28e04e7501..8059f65c5d 100644 --- a/drivers/gpu/drm/drm_crtc_helper_internal.h +++ b/drivers/gpu/drm/drm_crtc_helper_internal.h @@ -26,10 +26,15 @@ * implementation details and are not exported to drivers. */ -#include -#include -#include -#include +#ifndef __DRM_CRTC_HELPER_INTERNAL_H__ +#define __DRM_CRTC_HELPER_INTERNAL_H__ + +enum drm_mode_status; +struct drm_connector; +struct drm_crtc; +struct drm_display_mode; +struct drm_encoder; +struct drm_modeset_acquire_ctx; /* drm_probe_helper.c */ enum drm_mode_status drm_crtc_mode_valid(struct drm_crtc *crtc, @@ -44,3 +49,5 @@ drm_connector_mode_valid(struct drm_connector *connector, struct drm_encoder * drm_connector_get_single_encoder(struct drm_connector *connector); + +#endif /* __DRM_CRTC_HELPER_INTERNAL_H__ */ diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index a514d5207e..25aaae937c 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -32,6 +32,10 @@ * and are not exported to drivers. */ +#ifndef __DRM_CRTC_INTERNAL_H__ +#define __DRM_CRTC_INTERNAL_H__ + +#include #include enum drm_color_encoding; @@ -39,12 +43,14 @@ enum drm_color_range; enum drm_connector_force; enum drm_mode_status; +struct cea_sad; struct drm_atomic_state; struct drm_bridge; struct drm_connector; struct drm_crtc; struct drm_device; struct drm_display_mode; +struct drm_edid; struct drm_file; struct drm_framebuffer; struct drm_mode_create_dumb; @@ -54,6 +60,7 @@ struct drm_mode_object; struct drm_mode_set; struct drm_plane; struct drm_plane_state; +struct drm_printer; struct drm_property; struct edid; struct fwnode_handle; @@ -292,6 +299,10 @@ void drm_mode_fixup_1366x768(struct drm_display_mode *mode); int drm_edid_override_show(struct drm_connector *connector, struct seq_file *m); int drm_edid_override_set(struct drm_connector *connector, const void *edid, size_t size); int drm_edid_override_reset(struct drm_connector *connector); +const u8 *drm_edid_find_extension(const struct drm_edid *drm_edid, + int ext_id, int *ext_index); +void drm_edid_cta_sad_get(const struct cea_sad *cta_sad, u8 *sad); +void drm_edid_cta_sad_set(struct cea_sad *cta_sad, const u8 *sad); /* drm_edid_load.c */ #ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE @@ -303,3 +314,5 @@ drm_edid_load_firmware(struct drm_connector *connector) return ERR_PTR(-ENOENT); } #endif + +#endif /* __DRM_CRTC_INTERNAL_H__ */ diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c index 9edc111be7..9d01d76280 100644 --- a/drivers/gpu/drm/drm_displayid.c +++ b/drivers/gpu/drm/drm_displayid.c @@ -3,10 +3,12 @@ * Copyright © 2021 Intel Corporation */ -#include #include #include +#include "drm_crtc_internal.h" +#include "drm_displayid_internal.h" + static const struct displayid_header * displayid_get_header(const u8 *displayid, int length, int index) { @@ -53,9 +55,10 @@ static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid, int *length, int *idx, int *ext_index) { - const u8 *displayid = drm_find_edid_extension(drm_edid, DISPLAYID_EXT, ext_index); const struct displayid_header *base; + const u8 *displayid; + displayid = drm_edid_find_extension(drm_edid, DISPLAYID_EXT, ext_index); if (!displayid) return NULL; diff --git a/drivers/gpu/drm/drm_displayid_internal.h b/drivers/gpu/drm/drm_displayid_internal.h new file mode 100644 index 0000000000..aee1b86a73 --- /dev/null +++ b/drivers/gpu/drm/drm_displayid_internal.h @@ -0,0 +1,170 @@ +/* + * Copyright © 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DRM_DISPLAYID_INTERNAL_H__ +#define __DRM_DISPLAYID_INTERNAL_H__ + +#include +#include + +struct drm_edid; + +#define VESA_IEEE_OUI 0x3a0292 + +/* DisplayID Structure versions */ +#define DISPLAY_ID_STRUCTURE_VER_20 0x20 + +/* DisplayID Structure v1r2 Data Blocks */ +#define DATA_BLOCK_PRODUCT_ID 0x00 +#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01 +#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02 +#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03 +#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04 +#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05 +#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06 +#define DATA_BLOCK_VESA_TIMING 0x07 +#define DATA_BLOCK_CEA_TIMING 0x08 +#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09 +#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a +#define DATA_BLOCK_GP_ASCII_STRING 0x0b +#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c +#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d +#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e +#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f +#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10 +#define DATA_BLOCK_TILED_DISPLAY 0x12 +#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f +#define DATA_BLOCK_CTA 0x81 + +/* DisplayID Structure v2r0 Data Blocks */ +#define DATA_BLOCK_2_PRODUCT_ID 0x20 +#define DATA_BLOCK_2_DISPLAY_PARAMETERS 0x21 +#define DATA_BLOCK_2_TYPE_7_DETAILED_TIMING 0x22 +#define DATA_BLOCK_2_TYPE_8_ENUMERATED_TIMING 0x23 +#define DATA_BLOCK_2_TYPE_9_FORMULA_TIMING 0x24 +#define DATA_BLOCK_2_DYNAMIC_VIDEO_TIMING 0x25 +#define DATA_BLOCK_2_DISPLAY_INTERFACE_FEATURES 0x26 +#define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27 +#define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28 +#define DATA_BLOCK_2_CONTAINER_ID 0x29 +#define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e +#define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81 + +/* DisplayID Structure v1r2 Product Type */ +#define PRODUCT_TYPE_EXTENSION 0 +#define PRODUCT_TYPE_TEST 1 +#define PRODUCT_TYPE_PANEL 2 +#define PRODUCT_TYPE_MONITOR 3 +#define PRODUCT_TYPE_TV 4 +#define PRODUCT_TYPE_REPEATER 5 +#define PRODUCT_TYPE_DIRECT_DRIVE 6 + +/* DisplayID Structure v2r0 Display Product Primary Use Case (~Product Type) */ +#define PRIMARY_USE_EXTENSION 0 +#define PRIMARY_USE_TEST 1 +#define PRIMARY_USE_GENERIC 2 +#define PRIMARY_USE_TV 3 +#define PRIMARY_USE_DESKTOP_PRODUCTIVITY 4 +#define PRIMARY_USE_DESKTOP_GAMING 5 +#define PRIMARY_USE_PRESENTATION 6 +#define PRIMARY_USE_HEAD_MOUNTED_VR 7 +#define PRIMARY_USE_HEAD_MOUNTED_AR 8 + +struct displayid_header { + u8 rev; + u8 bytes; + u8 prod_id; + u8 ext_count; +} __packed; + +struct displayid_block { + u8 tag; + u8 rev; + u8 num_bytes; +} __packed; + +struct displayid_tiled_block { + struct displayid_block base; + u8 tile_cap; + u8 topo[3]; + u8 tile_size[4]; + u8 tile_pixel_bezel[5]; + u8 topology_id[8]; +} __packed; + +struct displayid_detailed_timings_1 { + u8 pixel_clock[3]; + u8 flags; + u8 hactive[2]; + u8 hblank[2]; + u8 hsync[2]; + u8 hsw[2]; + u8 vactive[2]; + u8 vblank[2]; + u8 vsync[2]; + u8 vsw[2]; +} __packed; + +struct displayid_detailed_timing_block { + struct displayid_block base; + struct displayid_detailed_timings_1 timings[]; +}; + +#define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0) +#define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5) + +struct displayid_vesa_vendor_specific_block { + struct displayid_block base; + u8 oui[3]; + u8 data_structure_type; + u8 mso; +} __packed; + +/* + * DisplayID iteration. + * + * Do not access directly, this is private. + */ +struct displayid_iter { + const struct drm_edid *drm_edid; + + const u8 *section; + int length; + int idx; + int ext_index; + + u8 version; + u8 primary_use; +}; + +void displayid_iter_edid_begin(const struct drm_edid *drm_edid, + struct displayid_iter *iter); +const struct displayid_block * +__displayid_iter_next(struct displayid_iter *iter); +#define displayid_iter_for_each(__block, __iter) \ + while (((__block) = __displayid_iter_next(__iter))) +void displayid_iter_end(struct displayid_iter *iter); + +u8 displayid_version(const struct displayid_iter *iter); +u8 displayid_primary_use(const struct displayid_iter *iter); + +#endif diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 243cacb357..535b624d4c 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -638,6 +639,7 @@ static int drm_dev_init(struct drm_device *dev, mutex_init(&dev->filelist_mutex); mutex_init(&dev->clientlist_mutex); mutex_init(&dev->master_mutex); + raw_spin_lock_init(&dev->mode_config.panic_lock); ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL); if (ret) @@ -943,6 +945,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) if (ret) goto err_unload; } + drm_panic_register(dev); DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", driver->name, driver->major, driver->minor, @@ -987,6 +990,8 @@ void drm_dev_unregister(struct drm_device *dev) { dev->registered = false; + drm_panic_unregister(dev); + drm_client_dev_unregister(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 9064cdeb13..4f54c91b31 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -29,16 +29,17 @@ */ #include +#include #include #include #include #include #include #include +#include #include #include -#include #include #include #include @@ -46,6 +47,7 @@ #include #include "drm_crtc_internal.h" +#include "drm_displayid_internal.h" #include "drm_internal.h" static int oui(u8 first, u8 second, u8 third) @@ -102,6 +104,11 @@ struct detailed_mode_closure { int modes; }; +struct drm_edid_match_closure { + const struct drm_edid_ident *ident; + bool matched; +}; + #define LEVEL_DMT 0 #define LEVEL_GTF 1 #define LEVEL_GTF2 2 @@ -109,13 +116,15 @@ struct detailed_mode_closure { #define EDID_QUIRK(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _quirks) \ { \ - .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \ - product_id), \ + .ident = { \ + .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, \ + vend_chr_2, product_id), \ + }, \ .quirks = _quirks \ } static const struct edid_quirk { - u32 panel_id; + const struct drm_edid_ident ident; u32 quirks; } edid_quirk_list[] = { /* Acer AL1706 */ @@ -1811,36 +1820,25 @@ static bool edid_block_is_zero(const void *edid) return !memchr_inv(edid, 0, EDID_LENGTH); } -/** - * drm_edid_are_equal - compare two edid blobs. - * @edid1: pointer to first blob - * @edid2: pointer to second blob - * This helper can be used during probing to determine if - * edid had changed. - */ -bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2) +static bool drm_edid_eq(const struct drm_edid *drm_edid, + const void *raw_edid, size_t raw_edid_size) { - int edid1_len, edid2_len; - bool edid1_present = edid1 != NULL; - bool edid2_present = edid2 != NULL; + bool edid1_present = drm_edid && drm_edid->edid && drm_edid->size; + bool edid2_present = raw_edid && raw_edid_size; if (edid1_present != edid2_present) return false; - if (edid1) { - edid1_len = edid_size(edid1); - edid2_len = edid_size(edid2); - - if (edid1_len != edid2_len) + if (edid1_present) { + if (drm_edid->size != raw_edid_size) return false; - if (memcmp(edid1, edid2, edid1_len)) + if (memcmp(drm_edid->edid, raw_edid, drm_edid->size)) return false; } return true; } -EXPORT_SYMBOL(drm_edid_are_equal); enum edid_block_status { EDID_BLOCK_OK = 0, @@ -2749,8 +2747,84 @@ const struct drm_edid *drm_edid_read(struct drm_connector *connector) } EXPORT_SYMBOL(drm_edid_read); -static u32 edid_extract_panel_id(const struct edid *edid) +/** + * drm_edid_get_product_id - Get the vendor and product identification + * @drm_edid: EDID + * @id: Where to place the product id + */ +void drm_edid_get_product_id(const struct drm_edid *drm_edid, + struct drm_edid_product_id *id) +{ + if (drm_edid && drm_edid->edid && drm_edid->size >= EDID_LENGTH) + memcpy(id, &drm_edid->edid->product_id, sizeof(*id)); + else + memset(id, 0, sizeof(*id)); +} +EXPORT_SYMBOL(drm_edid_get_product_id); + +static void decode_date(struct seq_buf *s, const struct drm_edid_product_id *id) +{ + int week = id->week_of_manufacture; + int year = id->year_of_manufacture + 1990; + + if (week == 0xff) + seq_buf_printf(s, "model year: %d", year); + else if (!week) + seq_buf_printf(s, "year of manufacture: %d", year); + else + seq_buf_printf(s, "week/year of manufacture: %d/%d", week, year); +} + +/** + * drm_edid_print_product_id - Print decoded product id to printer + * @p: drm printer + * @id: EDID product id + * @raw: If true, also print the raw hex + * + * See VESA E-EDID 1.4 section 3.4. + */ +void drm_edid_print_product_id(struct drm_printer *p, + const struct drm_edid_product_id *id, bool raw) +{ + DECLARE_SEQ_BUF(date, 40); + char vend[4]; + + drm_edid_decode_mfg_id(be16_to_cpu(id->manufacturer_name), vend); + + decode_date(&date, id); + + drm_printf(p, "manufacturer name: %s, product code: %u, serial number: %u, %s\n", + vend, le16_to_cpu(id->product_code), + le32_to_cpu(id->serial_number), seq_buf_str(&date)); + + if (raw) + drm_printf(p, "raw product id: %*ph\n", (int)sizeof(*id), id); + + WARN_ON(seq_buf_has_overflowed(&date)); +} +EXPORT_SYMBOL(drm_edid_print_product_id); + +/** + * drm_edid_get_panel_id - Get a panel's ID from EDID + * @drm_edid: EDID that contains panel ID. + * + * This function uses the first block of the EDID of a panel and (assuming + * that the EDID is valid) extracts the ID out of it. The ID is a 32-bit value + * (16 bits of manufacturer ID and 16 bits of per-manufacturer ID) that's + * supposed to be different for each different modem of panel. + * + * Return: A 32-bit ID that should be different for each make/model of panel. + * See the functions drm_edid_encode_panel_id() and + * drm_edid_decode_panel_id() for some details on the structure of this + * ID. Return 0 if the EDID size is less than a base block. + */ +u32 drm_edid_get_panel_id(const struct drm_edid *drm_edid) { + const struct edid *edid = drm_edid->edid; + + if (drm_edid->size < EDID_LENGTH) + return 0; + /* * We represent the ID as a 32-bit number so it can easily be compared * with "==". @@ -2768,60 +2842,54 @@ static u32 edid_extract_panel_id(const struct edid *edid) (u32)edid->mfg_id[1] << 16 | (u32)EDID_PRODUCT_ID(edid); } +EXPORT_SYMBOL(drm_edid_get_panel_id); /** - * drm_edid_get_panel_id - Get a panel's ID through DDC + * drm_edid_read_base_block - Get a panel's EDID base block * @adapter: I2C adapter to use for DDC * - * This function reads the first block of the EDID of a panel and (assuming - * that the EDID is valid) extracts the ID out of it. The ID is a 32-bit value - * (16 bits of manufacturer ID and 16 bits of per-manufacturer ID) that's - * supposed to be different for each different modem of panel. + * This function returns the drm_edid containing the first block of the EDID of + * a panel. * * This function is intended to be used during early probing on devices where * more than one panel might be present. Because of its intended use it must - * assume that the EDID of the panel is correct, at least as far as the ID - * is concerned (in other words, we don't process any overrides here). + * assume that the EDID of the panel is correct, at least as far as the base + * block is concerned (in other words, we don't process any overrides here). + * + * Caller should call drm_edid_free() after use. * * NOTE: it's expected that this function and drm_do_get_edid() will both * be read the EDID, but there is no caching between them. Since we're only * reading the first block, hopefully this extra overhead won't be too big. * - * Return: A 32-bit ID that should be different for each make/model of panel. - * See the functions drm_edid_encode_panel_id() and - * drm_edid_decode_panel_id() for some details on the structure of this - * ID. + * WARNING: Only use this function when the connector is unknown. For example, + * during the early probe of panel. The EDID read from the function is temporary + * and should be replaced by the full EDID returned from other drm_edid_read. + * + * Return: Pointer to allocated EDID base block, or NULL on any failure. */ - -u32 drm_edid_get_panel_id(struct i2c_adapter *adapter) +const struct drm_edid *drm_edid_read_base_block(struct i2c_adapter *adapter) { enum edid_block_status status; void *base_block; - u32 panel_id = 0; - - /* - * There are no manufacturer IDs of 0, so if there is a problem reading - * the EDID then we'll just return 0. - */ base_block = kzalloc(EDID_LENGTH, GFP_KERNEL); if (!base_block) - return 0; + return NULL; status = edid_block_read(base_block, 0, drm_do_probe_ddc_edid, adapter); edid_block_status_print(status, base_block, 0); - if (edid_block_status_valid(status, edid_block_tag(base_block))) - panel_id = edid_extract_panel_id(base_block); - else + if (!edid_block_status_valid(status, edid_block_tag(base_block))) { edid_block_dump(KERN_NOTICE, base_block, 0); + kfree(base_block); + return NULL; + } - kfree(base_block); - - return panel_id; + return _drm_edid_alloc(base_block, EDID_LENGTH); } -EXPORT_SYMBOL(drm_edid_get_panel_id); +EXPORT_SYMBOL(drm_edid_read_base_block); /** * drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output @@ -2903,16 +2971,17 @@ EXPORT_SYMBOL(drm_edid_duplicate); * @drm_edid: EDID to process * * This tells subsequent routines what fixes they need to apply. + * + * Return: A u32 represents the quirks to apply. */ static u32 edid_get_quirks(const struct drm_edid *drm_edid) { - u32 panel_id = edid_extract_panel_id(drm_edid->edid); const struct edid_quirk *quirk; int i; for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) { quirk = &edid_quirk_list[i]; - if (quirk->panel_id == panel_id) + if (drm_edid_match(drm_edid, &quirk->ident)) return quirk->quirks; } @@ -4120,7 +4189,7 @@ static int add_detailed_modes(struct drm_connector *connector, * * FIXME: Prefer not returning pointers to raw EDID data. */ -const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid, +const u8 *drm_edid_find_extension(const struct drm_edid *drm_edid, int ext_id, int *ext_index) { const u8 *edid_ext = NULL; @@ -4150,11 +4219,21 @@ static bool drm_edid_has_cta_extension(const struct drm_edid *drm_edid) { const struct displayid_block *block; struct displayid_iter iter; - int ext_index = 0; + struct drm_edid_iter edid_iter; + const u8 *ext; bool found = false; /* Look for a top level CEA extension block */ - if (drm_find_edid_extension(drm_edid, CEA_EXT, &ext_index)) + drm_edid_iter_begin(drm_edid, &edid_iter); + drm_edid_iter_for_each(ext, &edid_iter) { + if (ext[0] == CEA_EXT) { + found = true; + break; + } + } + drm_edid_iter_end(&edid_iter); + + if (found) return true; /* CEA blocks can also be found embedded in a DisplayID block */ @@ -5442,6 +5521,66 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db) connector->audio_latency[0], connector->audio_latency[1]); } +static void +match_identity(const struct detailed_timing *timing, void *data) +{ + struct drm_edid_match_closure *closure = data; + unsigned int i; + const char *name = closure->ident->name; + unsigned int name_len = strlen(name); + const char *desc = timing->data.other_data.data.str.str; + unsigned int desc_len = ARRAY_SIZE(timing->data.other_data.data.str.str); + + if (name_len > desc_len || + !(is_display_descriptor(timing, EDID_DETAIL_MONITOR_NAME) || + is_display_descriptor(timing, EDID_DETAIL_MONITOR_STRING))) + return; + + if (strncmp(name, desc, name_len)) + return; + + for (i = name_len; i < desc_len; i++) { + if (desc[i] == '\n') + break; + /* Allow white space before EDID string terminator. */ + if (!isspace(desc[i])) + return; + } + + closure->matched = true; +} + +/** + * drm_edid_match - match drm_edid with given identity + * @drm_edid: EDID + * @ident: the EDID identity to match with + * + * Check if the EDID matches with the given identity. + * + * Return: True if the given identity matched with EDID, false otherwise. + */ +bool drm_edid_match(const struct drm_edid *drm_edid, + const struct drm_edid_ident *ident) +{ + if (!drm_edid || drm_edid_get_panel_id(drm_edid) != ident->panel_id) + return false; + + /* Match with name only if it's not NULL. */ + if (ident->name) { + struct drm_edid_match_closure closure = { + .ident = ident, + .matched = false, + }; + + drm_for_each_detailed_block(drm_edid, match_identity, &closure); + + return closure.matched; + } + + return true; +} +EXPORT_SYMBOL(drm_edid_match); + static void monitor_name(const struct detailed_timing *timing, void *data) { @@ -6787,15 +6926,14 @@ static int _drm_edid_connector_property_update(struct drm_connector *connector, int ret; if (connector->edid_blob_ptr) { - const struct edid *old_edid = connector->edid_blob_ptr->data; - - if (old_edid) { - if (!drm_edid_are_equal(drm_edid ? drm_edid->edid : NULL, old_edid)) { - connector->epoch_counter++; - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] EDID changed, epoch counter %llu\n", - connector->base.id, connector->name, - connector->epoch_counter); - } + const void *old_edid = connector->edid_blob_ptr->data; + size_t old_edid_size = connector->edid_blob_ptr->length; + + if (old_edid && !drm_edid_eq(drm_edid, old_edid, old_edid_size)) { + connector->epoch_counter++; + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] EDID changed, epoch counter %llu\n", + connector->base.id, connector->name, + connector->epoch_counter); } } diff --git a/drivers/gpu/drm/drm_eld.c b/drivers/gpu/drm/drm_eld.c index 5177991aa2..c0428d07de 100644 --- a/drivers/gpu/drm/drm_eld.c +++ b/drivers/gpu/drm/drm_eld.c @@ -3,10 +3,12 @@ * Copyright © 2023 Intel Corporation */ +#include + #include #include -#include "drm_internal.h" +#include "drm_crtc_internal.h" /** * drm_eld_sad_get - get SAD from ELD to struct cea_sad diff --git a/drivers/gpu/drm/drm_fb_dma_helper.c b/drivers/gpu/drm/drm_fb_dma_helper.c index 3b535ad1b0..e1d61a6521 100644 --- a/drivers/gpu/drm/drm_fb_dma_helper.c +++ b/drivers/gpu/drm/drm_fb_dma_helper.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -148,3 +149,47 @@ void drm_fb_dma_sync_non_coherent(struct drm_device *drm, } } EXPORT_SYMBOL_GPL(drm_fb_dma_sync_non_coherent); + +/** + * drm_fb_dma_get_scanout_buffer - Provide a scanout buffer in case of panic + * @plane: DRM primary plane + * @sb: scanout buffer for the panic handler + * Returns: 0 or negative error code + * + * Generic get_scanout_buffer() implementation, for drivers that uses the + * drm_fb_dma_helper. It won't call vmap in the panic context, so the driver + * should make sure the primary plane is vmapped, otherwise the panic screen + * won't get displayed. + */ +int drm_fb_dma_get_scanout_buffer(struct drm_plane *plane, + struct drm_scanout_buffer *sb) +{ + struct drm_gem_dma_object *dma_obj; + struct drm_framebuffer *fb; + + if (!plane->state || !plane->state->fb) + return -EINVAL; + + fb = plane->state->fb; + /* Only support linear modifier */ + if (fb->modifier != DRM_FORMAT_MOD_LINEAR) + return -ENODEV; + + dma_obj = drm_fb_dma_get_gem_obj(fb, 0); + + /* Buffer should be accessible from the CPU */ + if (dma_obj->base.import_attach) + return -ENODEV; + + /* Buffer should be already mapped to CPU */ + if (!dma_obj->vaddr) + return -ENODEV; + + iosys_map_set_vaddr(&sb->map[0], dma_obj->vaddr); + sb->format = fb->format; + sb->height = fb->height; + sb->width = fb->width; + sb->pitch[0] = fb->pitches[0]; + return 0; +} +EXPORT_SYMBOL(drm_fb_dma_get_scanout_buffer); diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index 13cd754af3..77695339e4 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -90,7 +90,8 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper, sizes->surface_width, sizes->surface_height, sizes->surface_bpp); - format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); + format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, + sizes->surface_depth); buffer = drm_client_framebuffer_create(client, sizes->surface_width, sizes->surface_height, format); if (IS_ERR(buffer)) diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c index cbb7418b78..1e200d815e 100644 --- a/drivers/gpu/drm/drm_fbdev_generic.c +++ b/drivers/gpu/drm/drm_fbdev_generic.c @@ -197,14 +197,14 @@ static int drm_fbdev_generic_damage_blit(struct drm_fb_helper *fb_helper, */ mutex_lock(&fb_helper->lock); - ret = drm_client_buffer_vmap(buffer, &map); + ret = drm_client_buffer_vmap_local(buffer, &map); if (ret) goto out; dst = map; drm_fbdev_generic_damage_blit_real(fb_helper, clip, &dst); - drm_client_buffer_vunmap(buffer); + drm_client_buffer_vunmap_local(buffer); out: mutex_unlock(&fb_helper->lock); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 44a948b80e..d4bbc5d109 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1161,7 +1161,7 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent, obj->funcs->print_info(p, indent, obj); } -int drm_gem_pin(struct drm_gem_object *obj) +int drm_gem_pin_locked(struct drm_gem_object *obj) { if (obj->funcs->pin) return obj->funcs->pin(obj); @@ -1169,12 +1169,30 @@ int drm_gem_pin(struct drm_gem_object *obj) return 0; } -void drm_gem_unpin(struct drm_gem_object *obj) +void drm_gem_unpin_locked(struct drm_gem_object *obj) { if (obj->funcs->unpin) obj->funcs->unpin(obj); } +int drm_gem_pin(struct drm_gem_object *obj) +{ + int ret; + + dma_resv_lock(obj->resv, NULL); + ret = drm_gem_pin_locked(obj); + dma_resv_unlock(obj->resv); + + return ret; +} + +void drm_gem_unpin(struct drm_gem_object *obj) +{ + dma_resv_lock(obj->resv, NULL); + drm_gem_unpin_locked(obj); + dma_resv_unlock(obj->resv); +} + int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) { int ret; @@ -1209,6 +1227,18 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) } EXPORT_SYMBOL(drm_gem_vunmap); +void drm_gem_lock(struct drm_gem_object *obj) +{ + dma_resv_lock(obj->resv, NULL); +} +EXPORT_SYMBOL(drm_gem_lock); + +void drm_gem_unlock(struct drm_gem_object *obj) +{ + dma_resv_unlock(obj->resv); +} +EXPORT_SYMBOL(drm_gem_unlock); + int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map) { int ret; diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 1ff0678be7..53c003983a 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -10,7 +10,6 @@ #include #include #include -#include #ifdef CONFIG_X86 #include @@ -228,23 +227,27 @@ void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) } EXPORT_SYMBOL(drm_gem_shmem_put_pages); -static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) +int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) { int ret; dma_resv_assert_held(shmem->base.resv); + drm_WARN_ON(shmem->base.dev, shmem->base.import_attach); + ret = drm_gem_shmem_get_pages(shmem); return ret; } +EXPORT_SYMBOL(drm_gem_shmem_pin_locked); -static void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem) { dma_resv_assert_held(shmem->base.resv); drm_gem_shmem_put_pages(shmem); } +EXPORT_SYMBOL(drm_gem_shmem_unpin_locked); /** * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 1ac284a9e8..6027584406 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -282,6 +282,8 @@ static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo, struct ttm_operation_ctx ctx = { false, false }; int ret; + dma_resv_assert_held(gbo->bo.base.resv); + if (gbo->bo.pin_count) goto out; @@ -337,6 +339,8 @@ EXPORT_SYMBOL(drm_gem_vram_pin); static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo) { + dma_resv_assert_held(gbo->bo.base.resv); + ttm_bo_unpin(&gbo->bo); } @@ -363,11 +367,28 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) } EXPORT_SYMBOL(drm_gem_vram_unpin); -static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, - struct iosys_map *map) +/** + * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address + * space + * @gbo: The GEM VRAM object to map + * @map: Returns the kernel virtual address of the VRAM GEM object's backing + * store. + * + * The vmap function pins a GEM VRAM object to its current location, either + * system or video memory, and maps its buffer into kernel address space. + * As pinned object cannot be relocated, you should avoid pinning objects + * permanently. Call drm_gem_vram_vunmap() with the returned address to + * unmap and unpin the GEM VRAM object. + * + * Returns: + * 0 on success, or a negative error code otherwise. + */ +int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map) { int ret; + dma_resv_assert_held(gbo->bo.base.resv); + if (gbo->vmap_use_count > 0) goto out; @@ -388,12 +409,23 @@ out: return 0; } +EXPORT_SYMBOL(drm_gem_vram_vmap); -static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo, - struct iosys_map *map) +/** + * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object + * @gbo: The GEM VRAM object to unmap + * @map: Kernel virtual address where the VRAM GEM object was mapped + * + * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See + * the documentation for drm_gem_vram_vmap() for more information. + */ +void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, + struct iosys_map *map) { struct drm_device *dev = gbo->bo.base.dev; + dma_resv_assert_held(gbo->bo.base.resv); + if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count)) return; @@ -410,60 +442,6 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo, * from memory. See drm_gem_vram_bo_driver_move_notify(). */ } - -/** - * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address - * space - * @gbo: The GEM VRAM object to map - * @map: Returns the kernel virtual address of the VRAM GEM object's backing - * store. - * - * The vmap function pins a GEM VRAM object to its current location, either - * system or video memory, and maps its buffer into kernel address space. - * As pinned object cannot be relocated, you should avoid pinning objects - * permanently. Call drm_gem_vram_vunmap() with the returned address to - * unmap and unpin the GEM VRAM object. - * - * Returns: - * 0 on success, or a negative error code otherwise. - */ -int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map) -{ - int ret; - - dma_resv_assert_held(gbo->bo.base.resv); - - ret = drm_gem_vram_pin_locked(gbo, 0); - if (ret) - return ret; - ret = drm_gem_vram_kmap_locked(gbo, map); - if (ret) - goto err_drm_gem_vram_unpin_locked; - - return 0; - -err_drm_gem_vram_unpin_locked: - drm_gem_vram_unpin_locked(gbo); - return ret; -} -EXPORT_SYMBOL(drm_gem_vram_vmap); - -/** - * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object - * @gbo: The GEM VRAM object to unmap - * @map: Kernel virtual address where the VRAM GEM object was mapped - * - * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See - * the documentation for drm_gem_vram_vmap() for more information. - */ -void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, - struct iosys_map *map) -{ - dma_resv_assert_held(gbo->bo.base.resv); - - drm_gem_vram_kunmap_locked(gbo, map); - drm_gem_vram_unpin_locked(gbo); -} EXPORT_SYMBOL(drm_gem_vram_vunmap); /** @@ -768,7 +746,8 @@ static int drm_gem_vram_object_pin(struct drm_gem_object *gem) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); - /* Fbdev console emulation is the use case of these PRIME + /* + * Fbdev console emulation is the use case of these PRIME * helpers. This may involve updating a hardware buffer from * a shadow FB. We pin the buffer to it's current location * (either video RAM or system memory) to prevent it from @@ -776,7 +755,7 @@ static int drm_gem_vram_object_pin(struct drm_gem_object *gem) * the buffer to be pinned to VRAM, implement a callback that * sets the flags accordingly. */ - return drm_gem_vram_pin(gbo, 0); + return drm_gem_vram_pin_locked(gbo, 0); } /** @@ -787,7 +766,7 @@ static void drm_gem_vram_object_unpin(struct drm_gem_object *gem) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); - drm_gem_vram_unpin(gbo); + drm_gem_vram_unpin_locked(gbo); } /** diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 8e4faf0a28..690505a1f7 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -21,6 +21,9 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#ifndef __DRM_INTERNAL_H__ +#define __DRM_INTERNAL_H__ + #include #include @@ -32,7 +35,6 @@ #define DRM_IF_VERSION(maj, min) (maj << 16 | min) -struct cea_sad; struct dentry; struct dma_buf; struct iosys_map; @@ -170,6 +172,8 @@ void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); void drm_gem_print_info(struct drm_printer *p, unsigned int indent, const struct drm_gem_object *obj); +int drm_gem_pin_locked(struct drm_gem_object *obj); +void drm_gem_unpin_locked(struct drm_gem_object *obj); int drm_gem_pin(struct drm_gem_object *obj); void drm_gem_unpin(struct drm_gem_object *obj); int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map); @@ -273,6 +277,4 @@ void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent, const struct drm_framebuffer *fb); void drm_framebuffer_debugfs_init(struct drm_device *dev); -/* drm_edid.c */ -void drm_edid_cta_sad_get(const struct cea_sad *cta_sad, u8 *sad); -void drm_edid_cta_sad_set(struct cea_sad *cta_sad, const u8 *sad); +#endif /* __DRM_INTERNAL_H__ */ diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 9874ff6d47..795001bb7f 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -645,29 +645,56 @@ int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi, EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size); /** - * mipi_dsi_compression_mode() - enable/disable DSC on the peripheral + * mipi_dsi_compression_mode_ext() - enable/disable DSC on the peripheral * @dsi: DSI peripheral device * @enable: Whether to enable or disable the DSC + * @algo: Selected compression algorithm + * @pps_selector: Select PPS from the table of pre-stored or uploaded PPS entries * - * Enable or disable Display Stream Compression on the peripheral using the - * default Picture Parameter Set and VESA DSC 1.1 algorithm. + * Enable or disable Display Stream Compression on the peripheral. * * Return: 0 on success or a negative error code on failure. */ -int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable) +int mipi_dsi_compression_mode_ext(struct mipi_dsi_device *dsi, bool enable, + enum mipi_dsi_compression_algo algo, + unsigned int pps_selector) { - /* Note: Needs updating for non-default PPS or algorithm */ - u8 tx[2] = { enable << 0, 0 }; + u8 tx[2] = { }; struct mipi_dsi_msg msg = { .channel = dsi->channel, .type = MIPI_DSI_COMPRESSION_MODE, .tx_len = sizeof(tx), .tx_buf = tx, }; - int ret = mipi_dsi_device_transfer(dsi, &msg); + int ret; + + if (algo > 3 || pps_selector > 3) + return -EINVAL; + + tx[0] = (enable << 0) | + (algo << 1) | + (pps_selector << 4); + + ret = mipi_dsi_device_transfer(dsi, &msg); return (ret < 0) ? ret : 0; } +EXPORT_SYMBOL(mipi_dsi_compression_mode_ext); + +/** + * mipi_dsi_compression_mode() - enable/disable DSC on the peripheral + * @dsi: DSI peripheral device + * @enable: Whether to enable or disable the DSC + * + * Enable or disable Display Stream Compression on the peripheral using the + * default Picture Parameter Set and VESA DSC 1.1 algorithm. + * + * Return: 0 on success or a negative error code on failure. + */ +int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable) +{ + return mipi_dsi_compression_mode_ext(dsi, enable, MIPI_DSI_COMPRESSION_DSC, 0); +} EXPORT_SYMBOL(mipi_dsi_compression_mode); /** diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 48fd2d67f3..5689722582 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -372,6 +372,13 @@ static int drm_mode_create_standard_properties(struct drm_device *dev) return -ENOMEM; dev->mode_config.modifiers_property = prop; + prop = drm_property_create(dev, + DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB, + "SIZE_HINTS", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.size_hints_property = prop; + return 0; } diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index c4f88c3a93..2d8b037161 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -373,8 +373,8 @@ static int fill_analog_mode(struct drm_device *dev, if (!bt601 && (hact_duration_ns < params->hact_ns.min || hact_duration_ns > params->hact_ns.max)) { - DRM_ERROR("Invalid horizontal active area duration: %uns (min: %u, max %u)\n", - hact_duration_ns, params->hact_ns.min, params->hact_ns.max); + drm_err(dev, "Invalid horizontal active area duration: %uns (min: %u, max %u)\n", + hact_duration_ns, params->hact_ns.min, params->hact_ns.max); return -EINVAL; } @@ -385,8 +385,8 @@ static int fill_analog_mode(struct drm_device *dev, if (!bt601 && (hblk_duration_ns < params->hblk_ns.min || hblk_duration_ns > params->hblk_ns.max)) { - DRM_ERROR("Invalid horizontal blanking duration: %uns (min: %u, max %u)\n", - hblk_duration_ns, params->hblk_ns.min, params->hblk_ns.max); + drm_err(dev, "Invalid horizontal blanking duration: %uns (min: %u, max %u)\n", + hblk_duration_ns, params->hblk_ns.min, params->hblk_ns.max); return -EINVAL; } @@ -397,8 +397,8 @@ static int fill_analog_mode(struct drm_device *dev, if (!bt601 && (hslen_duration_ns < params->hslen_ns.min || hslen_duration_ns > params->hslen_ns.max)) { - DRM_ERROR("Invalid horizontal sync duration: %uns (min: %u, max %u)\n", - hslen_duration_ns, params->hslen_ns.min, params->hslen_ns.max); + drm_err(dev, "Invalid horizontal sync duration: %uns (min: %u, max %u)\n", + hslen_duration_ns, params->hslen_ns.min, params->hslen_ns.max); return -EINVAL; } @@ -409,7 +409,8 @@ static int fill_analog_mode(struct drm_device *dev, if (!bt601 && (porches_duration_ns > (params->hfp_ns.max + params->hbp_ns.max) || porches_duration_ns < (params->hfp_ns.min + params->hbp_ns.min))) { - DRM_ERROR("Invalid horizontal porches duration: %uns\n", porches_duration_ns); + drm_err(dev, "Invalid horizontal porches duration: %uns\n", + porches_duration_ns); return -EINVAL; } @@ -431,8 +432,8 @@ static int fill_analog_mode(struct drm_device *dev, if (!bt601 && (hfp_duration_ns < params->hfp_ns.min || hfp_duration_ns > params->hfp_ns.max)) { - DRM_ERROR("Invalid horizontal front porch duration: %uns (min: %u, max %u)\n", - hfp_duration_ns, params->hfp_ns.min, params->hfp_ns.max); + drm_err(dev, "Invalid horizontal front porch duration: %uns (min: %u, max %u)\n", + hfp_duration_ns, params->hfp_ns.min, params->hfp_ns.max); return -EINVAL; } @@ -443,8 +444,8 @@ static int fill_analog_mode(struct drm_device *dev, if (!bt601 && (hbp_duration_ns < params->hbp_ns.min || hbp_duration_ns > params->hbp_ns.max)) { - DRM_ERROR("Invalid horizontal back porch duration: %uns (min: %u, max %u)\n", - hbp_duration_ns, params->hbp_ns.min, params->hbp_ns.max); + drm_err(dev, "Invalid horizontal back porch duration: %uns (min: %u, max %u)\n", + hbp_duration_ns, params->hbp_ns.min, params->hbp_ns.max); return -EINVAL; } @@ -495,8 +496,8 @@ static int fill_analog_mode(struct drm_device *dev, vtotal = vactive + vfp + vslen + vbp; if (params->num_lines != vtotal) { - DRM_ERROR("Invalid vertical total: %upx (expected %upx)\n", - vtotal, params->num_lines); + drm_err(dev, "Invalid vertical total: %upx (expected %upx)\n", + vtotal, params->num_lines); return -EINVAL; } @@ -1200,9 +1201,8 @@ int of_get_drm_display_mode(struct device_node *np, if (bus_flags) drm_bus_flags_from_videomode(&vm, bus_flags); - pr_debug("%pOF: got %dx%d display mode\n", - np, vm.hactive, vm.vactive); - drm_mode_debug_printmodeline(dmode); + pr_debug("%pOF: got %dx%d display mode: " DRM_MODE_FMT "\n", + np, vm.hactive, vm.vactive, DRM_MODE_ARG(dmode)); return 0; } @@ -1250,7 +1250,7 @@ int of_get_drm_panel_display_mode(struct device_node *np, dmode->width_mm = width_mm; dmode->height_mm = height_mm; - drm_mode_debug_printmodeline(dmode); + pr_debug(DRM_MODE_FMT "\n", DRM_MODE_ARG(dmode)); return 0; } @@ -1812,10 +1812,8 @@ void drm_mode_prune_invalid(struct drm_device *dev, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); } if (verbose) { - drm_mode_debug_printmodeline(mode); - DRM_DEBUG_KMS("Not using %s mode: %s\n", - mode->name, - drm_get_mode_status_name(mode->status)); + drm_dbg_kms(dev, "Rejected mode: " DRM_MODE_FMT " (%s)\n", + DRM_MODE_ARG(mode), drm_get_mode_status_name(mode->status)); } drm_mode_destroy(dev, mode); } diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 5db52d6c5c..3860a8ce1e 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -420,14 +420,14 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"), }, .driver_data = (void *)&lcd1280x1920_rightside_up, - }, { /* Valve Steam Deck */ + }, { /* Valve Steam Deck (Jupiter) */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"), DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"), }, .driver_data = (void *)&lcd800x1280_rightside_up, - }, { /* Valve Steam Deck */ + }, { /* Valve Steam Deck (Galileo) */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galileo"), diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c new file mode 100644 index 0000000000..831b214975 --- /dev/null +++ b/drivers/gpu/drm/drm_panic.c @@ -0,0 +1,579 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT +/* + * Copyright (c) 2023 Red Hat. + * Author: Jocelyn Falempe + * inspired by the drm_log driver from David Herrmann + * Tux Ascii art taken from cowsay written by Tony Monroe + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Jocelyn Falempe"); +MODULE_DESCRIPTION("DRM panic handler"); +MODULE_LICENSE("GPL"); + +/** + * DOC: overview + * + * To enable DRM panic for a driver, the primary plane must implement a + * &drm_plane_helper_funcs.get_scanout_buffer helper function. It is then + * automatically registered to the drm panic handler. + * When a panic occurs, the &drm_plane_helper_funcs.get_scanout_buffer will be + * called, and the driver can provide a framebuffer so the panic handler can + * draw the panic screen on it. Currently only linear buffer and a few color + * formats are supported. + * Optionally the driver can also provide a &drm_plane_helper_funcs.panic_flush + * callback, that will be called after that, to send additional commands to the + * hardware to make the scanout buffer visible. + */ + +/* + * This module displays a user friendly message on screen when a kernel panic + * occurs. This is conflicting with fbcon, so you can only enable it when fbcon + * is disabled. + * It's intended for end-user, so have minimal technical/debug information. + * + * Implementation details: + * + * It is a panic handler, so it can't take lock, allocate memory, run tasks/irq, + * or attempt to sleep. It's a best effort, and it may not be able to display + * the message in all situations (like if the panic occurs in the middle of a + * modesetting). + * It will display only one static frame, so performance optimizations are low + * priority as the machine is already in an unusable state. + */ + +struct drm_panic_line { + u32 len; + const char *txt; +}; + +#define PANIC_LINE(s) {.len = sizeof(s) - 1, .txt = s} + +static struct drm_panic_line panic_msg[] = { + PANIC_LINE("KERNEL PANIC !"), + PANIC_LINE(""), + PANIC_LINE("Please reboot your computer."), +}; + +static const struct drm_panic_line logo[] = { + PANIC_LINE(" .--. _"), + PANIC_LINE(" |o_o | | |"), + PANIC_LINE(" |:_/ | | |"), + PANIC_LINE(" // \\ \\ |_|"), + PANIC_LINE(" (| | ) _"), + PANIC_LINE(" /'\\_ _/`\\ (_)"), + PANIC_LINE(" \\___)=(___/"), +}; + +/* + * Color conversion + */ + +static u16 convert_xrgb8888_to_rgb565(u32 pix) +{ + return ((pix & 0x00F80000) >> 8) | + ((pix & 0x0000FC00) >> 5) | + ((pix & 0x000000F8) >> 3); +} + +static u16 convert_xrgb8888_to_rgba5551(u32 pix) +{ + return ((pix & 0x00f80000) >> 8) | + ((pix & 0x0000f800) >> 5) | + ((pix & 0x000000f8) >> 2) | + BIT(0); /* set alpha bit */ +} + +static u16 convert_xrgb8888_to_xrgb1555(u32 pix) +{ + return ((pix & 0x00f80000) >> 9) | + ((pix & 0x0000f800) >> 6) | + ((pix & 0x000000f8) >> 3); +} + +static u16 convert_xrgb8888_to_argb1555(u32 pix) +{ + return BIT(15) | /* set alpha bit */ + ((pix & 0x00f80000) >> 9) | + ((pix & 0x0000f800) >> 6) | + ((pix & 0x000000f8) >> 3); +} + +static u32 convert_xrgb8888_to_argb8888(u32 pix) +{ + return pix | GENMASK(31, 24); /* fill alpha bits */ +} + +static u32 convert_xrgb8888_to_xbgr8888(u32 pix) +{ + return ((pix & 0x00ff0000) >> 16) << 0 | + ((pix & 0x0000ff00) >> 8) << 8 | + ((pix & 0x000000ff) >> 0) << 16 | + ((pix & 0xff000000) >> 24) << 24; +} + +static u32 convert_xrgb8888_to_abgr8888(u32 pix) +{ + return ((pix & 0x00ff0000) >> 16) << 0 | + ((pix & 0x0000ff00) >> 8) << 8 | + ((pix & 0x000000ff) >> 0) << 16 | + GENMASK(31, 24); /* fill alpha bits */ +} + +static u32 convert_xrgb8888_to_xrgb2101010(u32 pix) +{ + pix = ((pix & 0x000000FF) << 2) | + ((pix & 0x0000FF00) << 4) | + ((pix & 0x00FF0000) << 6); + return pix | ((pix >> 8) & 0x00300C03); +} + +static u32 convert_xrgb8888_to_argb2101010(u32 pix) +{ + pix = ((pix & 0x000000FF) << 2) | + ((pix & 0x0000FF00) << 4) | + ((pix & 0x00FF0000) << 6); + return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03); +} + +/* + * convert_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format + * @color: input color, in xrgb8888 format + * @format: output format + * + * Returns: + * Color in the format specified, casted to u32. + * Or 0 if the format is not supported. + */ +static u32 convert_from_xrgb8888(u32 color, u32 format) +{ + switch (format) { + case DRM_FORMAT_RGB565: + return convert_xrgb8888_to_rgb565(color); + case DRM_FORMAT_RGBA5551: + return convert_xrgb8888_to_rgba5551(color); + case DRM_FORMAT_XRGB1555: + return convert_xrgb8888_to_xrgb1555(color); + case DRM_FORMAT_ARGB1555: + return convert_xrgb8888_to_argb1555(color); + case DRM_FORMAT_RGB888: + case DRM_FORMAT_XRGB8888: + return color; + case DRM_FORMAT_ARGB8888: + return convert_xrgb8888_to_argb8888(color); + case DRM_FORMAT_XBGR8888: + return convert_xrgb8888_to_xbgr8888(color); + case DRM_FORMAT_ABGR8888: + return convert_xrgb8888_to_abgr8888(color); + case DRM_FORMAT_XRGB2101010: + return convert_xrgb8888_to_xrgb2101010(color); + case DRM_FORMAT_ARGB2101010: + return convert_xrgb8888_to_argb2101010(color); + default: + WARN_ONCE(1, "Can't convert to %p4cc\n", &format); + return 0; + } +} + +/* + * Blit & Fill + */ +/* check if the pixel at coord x,y is 1 (foreground) or 0 (background) */ +static bool drm_panic_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, int y) +{ + return (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) != 0; +} + +static void drm_panic_blit16(struct iosys_map *dmap, unsigned int dpitch, + const u8 *sbuf8, unsigned int spitch, + unsigned int height, unsigned int width, + u16 fg16) +{ + unsigned int y, x; + + for (y = 0; y < height; y++) + for (x = 0; x < width; x++) + if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y)) + iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16); +} + +static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch, + const u8 *sbuf8, unsigned int spitch, + unsigned int height, unsigned int width, + u32 fg32) +{ + unsigned int y, x; + + for (y = 0; y < height; y++) { + for (x = 0; x < width; x++) { + u32 off = y * dpitch + x * 3; + + if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y)) { + /* write blue-green-red to output in little endianness */ + iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0); + iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8); + iosys_map_wr(dmap, off + 2, u8, (fg32 & 0x00FF0000) >> 16); + } + } + } +} + +static void drm_panic_blit32(struct iosys_map *dmap, unsigned int dpitch, + const u8 *sbuf8, unsigned int spitch, + unsigned int height, unsigned int width, + u32 fg32) +{ + unsigned int y, x; + + for (y = 0; y < height; y++) + for (x = 0; x < width; x++) + if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y)) + iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32); +} + +/* + * drm_panic_blit - convert a monochrome image to a linear framebuffer + * @dmap: destination iosys_map + * @dpitch: destination pitch in bytes + * @sbuf8: source buffer, in monochrome format, 8 pixels per byte. + * @spitch: source pitch in bytes + * @height: height of the image to copy, in pixels + * @width: width of the image to copy, in pixels + * @fg_color: foreground color, in destination format + * @pixel_width: pixel width in bytes. + * + * This can be used to draw a font character, which is a monochrome image, to a + * framebuffer in other supported format. + */ +static void drm_panic_blit(struct iosys_map *dmap, unsigned int dpitch, + const u8 *sbuf8, unsigned int spitch, + unsigned int height, unsigned int width, + u32 fg_color, unsigned int pixel_width) +{ + switch (pixel_width) { + case 2: + drm_panic_blit16(dmap, dpitch, sbuf8, spitch, + height, width, fg_color); + break; + case 3: + drm_panic_blit24(dmap, dpitch, sbuf8, spitch, + height, width, fg_color); + break; + case 4: + drm_panic_blit32(dmap, dpitch, sbuf8, spitch, + height, width, fg_color); + break; + default: + WARN_ONCE(1, "Can't blit with pixel width %d\n", pixel_width); + } +} + +static void drm_panic_fill16(struct iosys_map *dmap, unsigned int dpitch, + unsigned int height, unsigned int width, + u16 color) +{ + unsigned int y, x; + + for (y = 0; y < height; y++) + for (x = 0; x < width; x++) + iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, color); +} + +static void drm_panic_fill24(struct iosys_map *dmap, unsigned int dpitch, + unsigned int height, unsigned int width, + u32 color) +{ + unsigned int y, x; + + for (y = 0; y < height; y++) { + for (x = 0; x < width; x++) { + unsigned int off = y * dpitch + x * 3; + + /* write blue-green-red to output in little endianness */ + iosys_map_wr(dmap, off, u8, (color & 0x000000FF) >> 0); + iosys_map_wr(dmap, off + 1, u8, (color & 0x0000FF00) >> 8); + iosys_map_wr(dmap, off + 2, u8, (color & 0x00FF0000) >> 16); + } + } +} + +static void drm_panic_fill32(struct iosys_map *dmap, unsigned int dpitch, + unsigned int height, unsigned int width, + u32 color) +{ + unsigned int y, x; + + for (y = 0; y < height; y++) + for (x = 0; x < width; x++) + iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, color); +} + +/* + * drm_panic_fill - Fill a rectangle with a color + * @dmap: destination iosys_map, pointing to the top left corner of the rectangle + * @dpitch: destination pitch in bytes + * @height: height of the rectangle, in pixels + * @width: width of the rectangle, in pixels + * @color: color to fill the rectangle. + * @pixel_width: pixel width in bytes + * + * Fill a rectangle with a color, in a linear framebuffer. + */ +static void drm_panic_fill(struct iosys_map *dmap, unsigned int dpitch, + unsigned int height, unsigned int width, + u32 color, unsigned int pixel_width) +{ + switch (pixel_width) { + case 2: + drm_panic_fill16(dmap, dpitch, height, width, color); + break; + case 3: + drm_panic_fill24(dmap, dpitch, height, width, color); + break; + case 4: + drm_panic_fill32(dmap, dpitch, height, width, color); + break; + default: + WARN_ONCE(1, "Can't fill with pixel width %d\n", pixel_width); + } +} + +static const u8 *get_char_bitmap(const struct font_desc *font, char c, size_t font_pitch) +{ + return font->data + (c * font->height) * font_pitch; +} + +static unsigned int get_max_line_len(const struct drm_panic_line *lines, int len) +{ + int i; + unsigned int max = 0; + + for (i = 0; i < len; i++) + max = max(lines[i].len, max); + return max; +} + +/* + * Draw a text in a rectangle on a framebuffer. The text is truncated if it overflows the rectangle + */ +static void draw_txt_rectangle(struct drm_scanout_buffer *sb, + const struct font_desc *font, + const struct drm_panic_line *msg, + unsigned int msg_lines, + bool centered, + struct drm_rect *clip, + u32 color) +{ + int i, j; + const u8 *src; + size_t font_pitch = DIV_ROUND_UP(font->width, 8); + struct iosys_map dst; + unsigned int px_width = sb->format->cpp[0]; + int left = 0; + + msg_lines = min(msg_lines, drm_rect_height(clip) / font->height); + for (i = 0; i < msg_lines; i++) { + size_t line_len = min(msg[i].len, drm_rect_width(clip) / font->width); + + if (centered) + left = (drm_rect_width(clip) - (line_len * font->width)) / 2; + + dst = sb->map[0]; + iosys_map_incr(&dst, (clip->y1 + i * font->height) * sb->pitch[0] + + (clip->x1 + left) * px_width); + for (j = 0; j < line_len; j++) { + src = get_char_bitmap(font, msg[i].txt[j], font_pitch); + drm_panic_blit(&dst, sb->pitch[0], src, font_pitch, + font->height, font->width, color, px_width); + iosys_map_incr(&dst, font->width * px_width); + } + } +} + +/* + * Draw the panic message at the center of the screen + */ +static void draw_panic_static(struct drm_scanout_buffer *sb) +{ + size_t msg_lines = ARRAY_SIZE(panic_msg); + size_t logo_lines = ARRAY_SIZE(logo); + u32 fg_color = CONFIG_DRM_PANIC_FOREGROUND_COLOR; + u32 bg_color = CONFIG_DRM_PANIC_BACKGROUND_COLOR; + const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL); + struct drm_rect r_logo, r_msg; + + if (!font) + return; + + fg_color = convert_from_xrgb8888(fg_color, sb->format->format); + bg_color = convert_from_xrgb8888(bg_color, sb->format->format); + + r_logo = DRM_RECT_INIT(0, 0, + get_max_line_len(logo, logo_lines) * font->width, + logo_lines * font->height); + r_msg = DRM_RECT_INIT(0, 0, + min(get_max_line_len(panic_msg, msg_lines) * font->width, sb->width), + min(msg_lines * font->height, sb->height)); + + /* Center the panic message */ + drm_rect_translate(&r_msg, (sb->width - r_msg.x2) / 2, (sb->height - r_msg.y2) / 2); + + /* Fill with the background color, and draw text on top */ + drm_panic_fill(&sb->map[0], sb->pitch[0], sb->height, sb->width, + bg_color, sb->format->cpp[0]); + + if ((r_msg.x1 >= drm_rect_width(&r_logo) || r_msg.y1 >= drm_rect_height(&r_logo)) && + drm_rect_width(&r_logo) <= sb->width && drm_rect_height(&r_logo) <= sb->height) { + draw_txt_rectangle(sb, font, logo, logo_lines, false, &r_logo, fg_color); + } + draw_txt_rectangle(sb, font, panic_msg, msg_lines, true, &r_msg, fg_color); +} + +/* + * drm_panic_is_format_supported() + * @format: a fourcc color code + * Returns: true if supported, false otherwise. + * + * Check if drm_panic will be able to use this color format. + */ +static bool drm_panic_is_format_supported(const struct drm_format_info *format) +{ + if (format->num_planes != 1) + return false; + return convert_from_xrgb8888(0xffffff, format->format) != 0; +} + +static void draw_panic_plane(struct drm_plane *plane) +{ + struct drm_scanout_buffer sb; + int ret; + unsigned long flags; + + if (!drm_panic_trylock(plane->dev, flags)) + return; + + ret = plane->helper_private->get_scanout_buffer(plane, &sb); + + if (!ret && drm_panic_is_format_supported(sb.format)) { + draw_panic_static(&sb); + if (plane->helper_private->panic_flush) + plane->helper_private->panic_flush(plane); + } + drm_panic_unlock(plane->dev, flags); +} + +static struct drm_plane *to_drm_plane(struct kmsg_dumper *kd) +{ + return container_of(kd, struct drm_plane, kmsg_panic); +} + +static void drm_panic(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason) +{ + struct drm_plane *plane = to_drm_plane(dumper); + + if (reason == KMSG_DUMP_PANIC) + draw_panic_plane(plane); +} + + +/* + * DEBUG FS, This is currently unsafe. + * Create one file per plane, so it's possible to debug one plane at a time. + * TODO: It would be better to emulate an NMI context. + */ +#ifdef CONFIG_DRM_PANIC_DEBUG +#include + +static ssize_t debugfs_trigger_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + bool run; + + if (kstrtobool_from_user(user_buf, count, &run) == 0 && run) { + struct drm_plane *plane = file->private_data; + + draw_panic_plane(plane); + } + return count; +} + +static const struct file_operations dbg_drm_panic_ops = { + .owner = THIS_MODULE, + .write = debugfs_trigger_write, + .open = simple_open, +}; + +static void debugfs_register_plane(struct drm_plane *plane, int index) +{ + char fname[32]; + + snprintf(fname, 32, "drm_panic_plane_%d", index); + debugfs_create_file(fname, 0200, plane->dev->debugfs_root, + plane, &dbg_drm_panic_ops); +} +#else +static void debugfs_register_plane(struct drm_plane *plane, int index) {} +#endif /* CONFIG_DRM_PANIC_DEBUG */ + +/** + * drm_panic_register() - Initialize DRM panic for a device + * @dev: the drm device on which the panic screen will be displayed. + */ +void drm_panic_register(struct drm_device *dev) +{ + struct drm_plane *plane; + int registered_plane = 0; + + if (!dev->mode_config.num_total_plane) + return; + + drm_for_each_plane(plane, dev) { + if (!plane->helper_private || !plane->helper_private->get_scanout_buffer) + continue; + plane->kmsg_panic.dump = drm_panic; + plane->kmsg_panic.max_reason = KMSG_DUMP_PANIC; + if (kmsg_dump_register(&plane->kmsg_panic)) + drm_warn(dev, "Failed to register panic handler\n"); + else { + debugfs_register_plane(plane, registered_plane); + registered_plane++; + } + } + if (registered_plane) + drm_info(dev, "Registered %d planes with drm panic\n", registered_plane); +} +EXPORT_SYMBOL(drm_panic_register); + +/** + * drm_panic_unregister() + * @dev: the drm device previously registered. + */ +void drm_panic_unregister(struct drm_device *dev) +{ + struct drm_plane *plane; + + if (!dev->mode_config.num_total_plane) + return; + + drm_for_each_plane(plane, dev) { + if (!plane->helper_private || !plane->helper_private->get_scanout_buffer) + continue; + kmsg_dump_unregister(&plane->kmsg_panic); + } +} +EXPORT_SYMBOL(drm_panic_unregister); diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 672c655c7a..57662a1fd3 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -140,6 +140,25 @@ * DRM_FORMAT_MOD_LINEAR. Before linux kernel release v5.1 there have been * various bugs in this area with inconsistencies between the capability * flag and per-plane properties. + * + * SIZE_HINTS: + * Blob property which contains the set of recommended plane size + * which can used for simple "cursor like" use cases (eg. no scaling). + * Using these hints frees userspace from extensive probing of + * supported plane sizes through atomic/setcursor ioctls. + * + * The blob contains an array of struct drm_plane_size_hint, in + * order of preference. For optimal usage userspace should pick + * the first size that satisfies its own requirements. + * + * Drivers should only attach this property to planes that + * support a very limited set of sizes. + * + * Note that property value 0 (ie. no blob) is reserved for potential + * future use. Current userspace is expected to ignore the property + * if the value is 0, and fall back to some other means (eg. + * &DRM_CAP_CURSOR_WIDTH and &DRM_CAP_CURSOR_HEIGHT) to determine + * the appropriate plane size to use. */ static unsigned int drm_num_planes(struct drm_device *dev) @@ -1729,3 +1748,40 @@ int drm_plane_create_scaling_filter_property(struct drm_plane *plane, return 0; } EXPORT_SYMBOL(drm_plane_create_scaling_filter_property); + +/** + * drm_plane_add_size_hints_property - create a size hints property + * + * @plane: drm plane + * @hints: size hints + * @num_hints: number of size hints + * + * Create a size hints property for the plane. + * + * RETURNS: + * Zero for success or -errno + */ +int drm_plane_add_size_hints_property(struct drm_plane *plane, + const struct drm_plane_size_hint *hints, + int num_hints) +{ + struct drm_device *dev = plane->dev; + struct drm_mode_config *config = &dev->mode_config; + struct drm_property_blob *blob; + + /* extending to other plane types needs actual thought */ + if (drm_WARN_ON(dev, plane->type != DRM_PLANE_TYPE_CURSOR)) + return -EINVAL; + + blob = drm_property_create_blob(dev, + array_size(sizeof(hints[0]), num_hints), + hints); + if (IS_ERR(blob)) + return PTR_ERR(blob); + + drm_object_attach_property(&plane->base, config->size_hints_property, + blob->base.id); + + return 0; +} +EXPORT_SYMBOL(drm_plane_add_size_hints_property); diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c index 699b7dbffd..cf2efb4472 100644 --- a/drivers/gpu/drm/drm_print.c +++ b/drivers/gpu/drm/drm_print.c @@ -23,13 +23,13 @@ * Rob Clark */ -#include - +#include +#include #include #include #include #include -#include +#include #include #include diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index bf2dd1f46b..4f75a1cfd8 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -566,8 +567,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, drm_modeset_acquire_init(&ctx, 0); - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, - connector->name); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s]\n", connector->base.id, + connector->name); retry: ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); @@ -610,11 +611,10 @@ retry: * check here, and if anything changed start the hotplug code. */ if (old_status != connector->status) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", - connector->base.id, - connector->name, - drm_get_connector_status_name(old_status), - drm_get_connector_status_name(connector->status)); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, connector->name, + drm_get_connector_status_name(old_status), + drm_get_connector_status_name(connector->status)); /* * The hotplug event code might call into the fb @@ -637,8 +637,8 @@ retry: drm_kms_helper_poll_enable(dev); if (connector->status == connector_status_disconnected) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", - connector->base.id, connector->name); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] disconnected\n", + connector->base.id, connector->name); drm_connector_update_edid_property(connector, NULL); drm_mode_prune_invalid(dev, &connector->modes, false); goto exit; @@ -696,11 +696,13 @@ exit: drm_mode_sort(&connector->modes); - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id, - connector->name); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] probed modes:\n", + connector->base.id, connector->name); + list_for_each_entry(mode, &connector->modes, head) { drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); - drm_mode_debug_printmodeline(mode); + drm_dbg_kms(dev, "Probed mode: " DRM_MODE_FMT "\n", + DRM_MODE_ARG(mode)); } return count; @@ -833,14 +835,12 @@ static void output_poll_execute(struct work_struct *work) old = drm_get_connector_status_name(old_status); new = drm_get_connector_status_name(connector->status); - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] " - "status updated from %s to %s\n", - connector->base.id, - connector->name, - old, new); - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] epoch counter %llu -> %llu\n", - connector->base.id, connector->name, - old_epoch_counter, connector->epoch_counter); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, connector->name, + old, new); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] epoch counter %llu -> %llu\n", + connector->base.id, connector->name, + old_epoch_counter, connector->epoch_counter); changed = true; } @@ -951,6 +951,32 @@ void drm_kms_helper_poll_fini(struct drm_device *dev) } EXPORT_SYMBOL(drm_kms_helper_poll_fini); +static void drm_kms_helper_poll_init_release(struct drm_device *dev, void *res) +{ + drm_kms_helper_poll_fini(dev); +} + +/** + * drmm_kms_helper_poll_init - initialize and enable output polling + * @dev: drm_device + * + * This function initializes and then also enables output polling support for + * @dev similar to drm_kms_helper_poll_init(). Polling will automatically be + * cleaned up when the DRM device goes away. + * + * See drm_kms_helper_poll_init() for more information. + * + * Returns: + * 0 on success, or a negative errno code otherwise. + */ +int drmm_kms_helper_poll_init(struct drm_device *dev) +{ + drm_kms_helper_poll_init(dev); + + return drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev); +} +EXPORT_SYMBOL(drmm_kms_helper_poll_init); + static bool check_connector_changed(struct drm_connector *connector) { struct drm_device *dev = connector->dev; @@ -1279,3 +1305,32 @@ int drm_connector_helper_tv_get_modes(struct drm_connector *connector) return i; } EXPORT_SYMBOL(drm_connector_helper_tv_get_modes); + +/** + * drm_connector_helper_detect_from_ddc - Read EDID and detect connector status. + * @connector: The connector + * @ctx: Acquire context + * @force: Perform screen-destructive operations, if necessary + * + * Detects the connector status by reading the EDID using drm_probe_ddc(), + * which requires connector->ddc to be set. Returns connector_status_connected + * on success or connector_status_disconnected on failure. + * + * Returns: + * The connector status as defined by enum drm_connector_status. + */ +int drm_connector_helper_detect_from_ddc(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct i2c_adapter *ddc = connector->ddc; + + if (!ddc) + return connector_status_unknown; + + if (drm_probe_ddc(ddc)) + return connector_status_connected; + + return connector_status_disconnected; +} +EXPORT_SYMBOL(drm_connector_helper_detect_from_ddc); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index a953f69a34..bd9b8ab4f8 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -209,10 +209,9 @@ static ssize_t status_store(struct device *device, ret = -EINVAL; if (old_force != connector->force || !connector->force) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force updated from %d to %d or reprobing\n", - connector->base.id, - connector->name, - old_force, connector->force); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] force updated from %d to %d or reprobing\n", + connector->base.id, connector->name, + old_force, connector->force); connector->funcs->fill_modes(connector, dev->mode_config.max_width, @@ -383,8 +382,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector) if (r) goto err_free; - DRM_DEBUG("adding \"%s\" to sysfs\n", - connector->name); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] adding connector to sysfs\n", + connector->base.id, connector->name); r = device_add(kdev); if (r) { @@ -430,8 +429,9 @@ void drm_sysfs_connector_remove(struct drm_connector *connector) if (dev_fwnode(connector->kdev)) component_del(connector->kdev, &typec_connector_ops); - DRM_DEBUG("removing \"%s\" from sysfs\n", - connector->name); + drm_dbg_kms(connector->dev, + "[CONNECTOR:%d:%s] removing connector from sysfs\n", + connector->base.id, connector->name); device_unregister(connector->kdev); connector->kdev = NULL; @@ -442,7 +442,7 @@ void drm_sysfs_lease_event(struct drm_device *dev) char *event_string = "LEASE=1"; char *envp[] = { event_string, NULL }; - DRM_DEBUG("generating lease event\n"); + drm_dbg_lease(dev, "generating lease event\n"); kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp); } @@ -463,7 +463,7 @@ void drm_sysfs_hotplug_event(struct drm_device *dev) char *event_string = "HOTPLUG=1"; char *envp[] = { event_string, NULL }; - DRM_DEBUG("generating hotplug event\n"); + drm_dbg_kms(dev, "generating hotplug event\n"); kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp); } diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 702a12bc93..cc3571e25a 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -166,11 +166,24 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600) MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)"); MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); +static struct drm_vblank_crtc * +drm_vblank_crtc(struct drm_device *dev, unsigned int pipe) +{ + return &dev->vblank[pipe]; +} + +struct drm_vblank_crtc * +drm_crtc_vblank_crtc(struct drm_crtc *crtc) +{ + return drm_vblank_crtc(crtc->dev, drm_crtc_index(crtc)); +} +EXPORT_SYMBOL(drm_crtc_vblank_crtc); + static void store_vblank(struct drm_device *dev, unsigned int pipe, u32 vblank_count_inc, ktime_t t_vblank, u32 last) { - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); assert_spin_locked(&dev->vblank_time_lock); @@ -184,7 +197,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe, static u32 drm_max_vblank_count(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); return vblank->max_vblank_count ?: dev->max_vblank_count; } @@ -273,7 +286,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, bool in_vblank_irq) { - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); u32 cur_vblank, diff; bool rc; ktime_t t_vblank; @@ -364,7 +377,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); u64 count; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) @@ -438,7 +451,7 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe) */ void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); unsigned long irqflags; assert_spin_locked(&dev->vbl_lock); @@ -600,7 +613,7 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); int linedur_ns = 0, framedur_ns = 0; int dotclock = mode->crtc_clock; @@ -930,7 +943,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_count); static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, ktime_t *vblanktime) { - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); u64 vblank_count; unsigned int seq; @@ -985,7 +998,6 @@ EXPORT_SYMBOL(drm_crtc_vblank_count_and_time); */ int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime) { - unsigned int pipe = drm_crtc_index(crtc); struct drm_vblank_crtc *vblank; struct drm_display_mode *mode; u64 vblank_start; @@ -993,7 +1005,7 @@ int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime) if (!drm_dev_has_vblank(crtc->dev)) return -EINVAL; - vblank = &crtc->dev->vblank[pipe]; + vblank = drm_crtc_vblank_crtc(crtc); mode = &vblank->hwmode; if (!vblank->framedur_ns || !vblank->linedur_ns) @@ -1147,7 +1159,7 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe) static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); int ret = 0; assert_spin_locked(&dev->vbl_lock); @@ -1185,7 +1197,7 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe) int drm_vblank_get(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); unsigned long irqflags; int ret = 0; @@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get); void drm_vblank_put(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return; @@ -1274,7 +1286,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_put); */ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); int ret; u64 last; @@ -1327,7 +1339,7 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); struct drm_pending_vblank_event *e, *t; ktime_t now; u64 seq; @@ -1405,8 +1417,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_off); void drm_crtc_vblank_reset(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - unsigned int pipe = drm_crtc_index(crtc); - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); spin_lock_irq(&dev->vbl_lock); /* @@ -1445,8 +1456,7 @@ void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc, u32 max_vblank_count) { struct drm_device *dev = crtc->dev; - unsigned int pipe = drm_crtc_index(crtc); - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); drm_WARN_ON(dev, dev->max_vblank_count); drm_WARN_ON(dev, !READ_ONCE(vblank->inmodeset)); @@ -1469,7 +1479,7 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return; @@ -1512,7 +1522,7 @@ static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe) assert_spin_locked(&dev->vbl_lock); assert_spin_locked(&dev->vblank_time_lock); - vblank = &dev->vblank[pipe]; + vblank = drm_vblank_crtc(dev, pipe); drm_WARN_ONCE(dev, drm_debug_enabled(DRM_UT_VBL) && !vblank->framedur_ns, "Cannot compute missed vblanks without frame duration\n"); @@ -1564,7 +1574,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, union drm_wait_vblank *vblwait, struct drm_file *file_priv) { - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); struct drm_pending_vblank_event *e; ktime_t now; u64 seq; @@ -1872,7 +1882,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe) */ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); unsigned long irqflags; bool disable_irq; @@ -1981,7 +1991,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data, pipe = drm_crtc_index(crtc); - vblank = &dev->vblank[pipe]; + vblank = drm_crtc_vblank_crtc(crtc); vblank_enabled = dev->vblank_disable_immediate && READ_ONCE(vblank->enabled); if (!vblank_enabled) { @@ -2046,7 +2056,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data, pipe = drm_crtc_index(crtc); - vblank = &dev->vblank[pipe]; + vblank = drm_crtc_vblank_crtc(crtc); e = kzalloc(sizeof(*e), GFP_KERNEL); if (e == NULL) diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c index 43cd5c0f4f..4fe9b1d3b0 100644 --- a/drivers/gpu/drm/drm_vblank_work.c +++ b/drivers/gpu/drm/drm_vblank_work.c @@ -245,7 +245,7 @@ void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc, { kthread_init_work(&work->base, func); INIT_LIST_HEAD(&work->node); - work->vblank = &crtc->dev->vblank[drm_crtc_index(crtc)]; + work->vblank = drm_crtc_vblank_crtc(crtc); } EXPORT_SYMBOL(drm_vblank_work_init); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 71a6d2b1c8..5c0c9d4e3b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -355,9 +355,11 @@ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj) static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op) { - if (op & ETNA_PREP_READ) + op &= ETNA_PREP_READ | ETNA_PREP_WRITE; + + if (op == ETNA_PREP_READ) return DMA_FROM_DEVICE; - else if (op & ETNA_PREP_WRITE) + else if (op == ETNA_PREP_WRITE) return DMA_TO_DEVICE; else return DMA_BIDIRECTIONAL; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index c4b04b0dee..62dcfdc789 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -38,9 +38,6 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job u32 dma_addr; int change; - /* block scheduler */ - drm_sched_stop(&gpu->sched, sched_job); - /* * If the GPU managed to complete this jobs fence, the timout is * spurious. Bail out. @@ -63,6 +60,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job goto out_no_timeout; } + /* block scheduler */ + drm_sched_stop(&gpu->sched, sched_job); + if(sched_job) drm_sched_increase_karma(sched_job); @@ -76,8 +76,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job return DRM_GPU_SCHED_STAT_NOMINAL; out_no_timeout: - /* restart scheduler after GPU is usable again */ - drm_sched_start(&gpu->sched, true); + list_add(&sched_job->list, &sched_job->sched->pending_list); return DRM_GPU_SCHED_STAT_NOMINAL; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 2fe0e5f3f6..bf16deaae6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -184,7 +184,6 @@ struct platform_driver dsi_driver = { .remove_new = samsung_dsim_remove, .driver = { .name = "exynos-dsi", - .owner = THIS_MODULE, .pm = &samsung_dsim_pm_ops, .of_match_table = exynos_dsi_of_match, }, diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index e81a576de3..142184c8c3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1412,7 +1412,6 @@ struct platform_driver fimc_driver = { .driver = { .of_match_table = fimc_of_match, .name = "exynos-drm-fimc", - .owner = THIS_MODULE, .pm = pm_ptr(&fimc_pm_ops), }, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index f2145227a1..f57df8c481 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -1326,7 +1326,6 @@ struct platform_driver fimd_driver = { .remove_new = fimd_remove, .driver = { .name = "exynos4-fb", - .owner = THIS_MODULE, .pm = pm_ptr(&exynos_fimd_pm_ops), .of_match_table = fimd_driver_dt_match, }, diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index f313842361..3a3b2c00e4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1610,7 +1610,6 @@ struct platform_driver g2d_driver = { .remove_new = g2d_remove, .driver = { .name = "exynos-drm-g2d", - .owner = THIS_MODULE, .pm = pm_ptr(&g2d_pm_ops), .of_match_table = exynos_g2d_match, }, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 180507a477..1b111e2c33 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1423,7 +1423,6 @@ struct platform_driver gsc_driver = { .remove_new = gsc_remove, .driver = { .name = "exynos-drm-gsc", - .owner = THIS_MODULE, .pm = &gsc_pm_ops, .of_match_table = exynos_drm_gsc_of_match, }, diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index e292096018..d61ec45180 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -464,7 +464,6 @@ struct platform_driver mic_driver = { .driver = { .name = "exynos-mic", .pm = pm_ptr(&exynos_mic_pm_ops), - .owner = THIS_MODULE, .of_match_table = exynos_mic_of_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 5f7516655b..2eb0b70167 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -454,7 +454,6 @@ struct platform_driver rotator_driver = { .remove_new = rotator_remove, .driver = { .name = "exynos-rotator", - .owner = THIS_MODULE, .pm = pm_ptr(&rotator_pm_ops), .of_match_table = exynos_rotator_match, }, diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index 392f721f13..a9d4698968 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -722,7 +722,6 @@ struct platform_driver scaler_driver = { .remove_new = scaler_remove, .driver = { .name = "exynos-scaler", - .owner = THIS_MODULE, .pm = pm_ptr(&scaler_pm_ops), .of_match_table = exynos_scaler_match, }, diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index b1e9a70217..11a720fef3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -484,7 +484,6 @@ struct platform_driver vidi_driver = { .remove_new = vidi_remove, .driver = { .name = "exynos-drm-vidi", - .owner = THIS_MODULE, .dev_groups = vidi_groups, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 6385202cd2..1e26cd4f83 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1922,10 +1922,9 @@ static int hdmi_get_ddc_adapter(struct hdmi_context *hdata) static int hdmi_get_phy_io(struct hdmi_context *hdata) { const char *compatible_str = "samsung,exynos4212-hdmiphy"; - struct device_node *np; - int ret = 0; + struct device_node *np __free(device_node) = + of_find_compatible_node(NULL, NULL, compatible_str); - np = of_find_compatible_node(NULL, NULL, compatible_str); if (!np) { np = of_parse_phandle(hdata->dev->of_node, "phy", 0); if (!np) { @@ -1940,21 +1939,17 @@ static int hdmi_get_phy_io(struct hdmi_context *hdata) if (!hdata->regs_hdmiphy) { DRM_DEV_ERROR(hdata->dev, "failed to ioremap hdmi phy\n"); - ret = -ENOMEM; - goto out; + return -ENOMEM; } } else { hdata->hdmiphy_port = of_find_i2c_device_by_node(np); if (!hdata->hdmiphy_port) { DRM_INFO("Failed to get hdmi phy i2c client\n"); - ret = -EPROBE_DEFER; - goto out; + return -EPROBE_DEFER; } } -out: - of_node_put(np); - return ret; + return 0; } static int hdmi_probe(struct platform_device *pdev) @@ -2129,7 +2124,6 @@ struct platform_driver hdmi_driver = { .remove_new = hdmi_remove, .driver = { .name = "exynos-hdmi", - .owner = THIS_MODULE, .pm = &exynos_hdmi_pm_ops, .of_match_table = hdmi_match_types, }, diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 6822333fd0..1db955f000 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1331,7 +1331,6 @@ static const struct dev_pm_ops exynos_mixer_pm_ops = { struct platform_driver mixer_driver = { .driver = { .name = "exynos-mixer", - .owner = THIS_MODULE, .pm = &exynos_mixer_pm_ops, .of_match_table = mixer_match_types, }, diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index f08a6803dc..3adc2c9ab7 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -311,6 +311,9 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector) if (mode_dev->panel_fixed_mode != NULL) { struct drm_display_mode *mode = drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); + if (!mode) + return 0; + drm_mode_probed_add(connector, mode); return 1; } diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c index a70b01ccdf..4d78b33eaa 100644 --- a/drivers/gpu/drm/gma500/mmu.c +++ b/drivers/gpu/drm/gma500/mmu.c @@ -5,6 +5,7 @@ **************************************************************************/ #include +#include #include "mmu.h" #include "psb_drv.h" diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index d974d0c60d..72191d6f0d 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -11,8 +11,6 @@ #include #include -#include - #include #include #include diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index 8486de230e..8d1be94a44 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -504,6 +504,9 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector) if (mode_dev->panel_fixed_mode != NULL) { struct drm_display_mode *mode = drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); + if (!mode) + return 0; + drm_mode_probed_add(connector, mode); return 1; } diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c index 034e78360d..0f07d77c5d 100644 --- a/drivers/gpu/drm/gud/gud_connector.c +++ b/drivers/gpu/drm/gud/gud_connector.c @@ -221,7 +221,7 @@ static int gud_connector_get_modes(struct drm_connector *connector) struct gud_display_mode_req *reqmodes = NULL; struct gud_connector_get_edid_ctx edid_ctx; unsigned int i, num_modes = 0; - struct edid *edid = NULL; + const struct drm_edid *drm_edid = NULL; int idx, ret; if (!drm_dev_enter(connector->dev, &idx)) @@ -238,13 +238,13 @@ static int gud_connector_get_modes(struct drm_connector *connector) gud_conn_err(connector, "Invalid EDID size", ret); } else if (ret > 0) { edid_ctx.len = ret; - edid = drm_do_get_edid(connector, gud_connector_get_edid_block, &edid_ctx); + drm_edid = drm_edid_read_custom(connector, gud_connector_get_edid_block, &edid_ctx); } kfree(edid_ctx.buf); - drm_connector_update_edid_property(connector, edid); + drm_edid_connector_update(connector, drm_edid); - if (edid && edid_ctx.edid_override) + if (drm_edid && edid_ctx.edid_override) goto out; reqmodes = kmalloc_array(GUD_CONNECTOR_MAX_NUM_MODES, sizeof(*reqmodes), GFP_KERNEL); @@ -276,10 +276,10 @@ static int gud_connector_get_modes(struct drm_connector *connector) } out: if (!num_modes) - num_modes = drm_add_edid_modes(connector, edid); + num_modes = drm_edid_connector_add_modes(connector); kfree(reqmodes); - kfree(edid); + drm_edid_free(drm_edid); drm_dev_exit(idx); return num_modes; diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index bc18e2d9ea..d8397065c3 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -27,8 +27,8 @@ config DRM_I915_DEBUG select REF_TRACKER select STACKDEPOT select STACKTRACE - select DRM_DP_AUX_CHARDEV - select DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE if DRM_I915_DP_TUNNEL + select DRM_DISPLAY_DP_AUX_CHARDEV + select DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG if DRM_I915_DP_TUNNEL select X86_MSR # used by igt/pm_rpm select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) select DRM_DEBUG_MM if DRM=y diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index fba73c38e2..c8c8b31da4 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -32,16 +32,11 @@ endif # Enable -Werror in CI and development subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror -# Fine grained warnings disable -CFLAGS_i915_pci.o = -Wno-override-init -CFLAGS_display/intel_display_device.o = -Wno-override-init -CFLAGS_display/intel_fbdev.o = -Wno-override-init - # Support compiling the display code separately for both i915 and xe # drivers. Define I915 when building i915. subdir-ccflags-y += -DI915 -subdir-ccflags-y += -I$(srctree)/$(src) +subdir-ccflags-y += -I$(src) # Please keep these build lists sorted! @@ -271,6 +266,7 @@ i915-y += \ display/intel_display_rps.o \ display/intel_display_wa.o \ display/intel_dmc.o \ + display/intel_dmc_wl.o \ display/intel_dpio_phy.o \ display/intel_dpll.o \ display/intel_dpll_mgr.o \ @@ -434,7 +430,7 @@ no-header-test := \ always-$(CONFIG_DRM_I915_WERROR) += \ $(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \ - $(shell cd $(srctree)/$(src) && find * -name '*.h'))) + $(shell cd $(src) && find * -name '*.h'))) quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@) cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; \ diff --git a/drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h b/drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h new file mode 100644 index 0000000000..275f4d9c3f --- /dev/null +++ b/drivers/gpu/drm/i915/display/bxt_dpio_phy_regs.h @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __BXT_DPIO_PHY_REGS_H__ +#define __BXT_DPIO_PHY_REGS_H__ + +#include "intel_display_reg_defs.h" + +/* BXT PHY registers */ +#define _BXT_PHY0_BASE 0x6C000 +#define _BXT_PHY1_BASE 0x162000 +#define _BXT_PHY2_BASE 0x163000 +#define BXT_PHY_BASE(phy) \ + _PICK_EVEN_2RANGES(phy, 1, \ + _BXT_PHY0_BASE, _BXT_PHY0_BASE, \ + _BXT_PHY1_BASE, _BXT_PHY2_BASE) + +#define _BXT_PHY(phy, reg) \ + _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg)) + +#define _BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \ + (BXT_PHY_BASE(phy) + _PIPE((ch), (reg_ch0) - _BXT_PHY0_BASE, \ + (reg_ch1) - _BXT_PHY0_BASE)) +#define _MMIO_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \ + _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1)) +#define _BXT_LANE_OFFSET(lane) (((lane) >> 1) * 0x200 + \ + ((lane) & 1) * 0x80) +#define _MMIO_BXT_PHY_CH_LN(phy, ch, lane, reg_ch0, reg_ch1) \ + _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) + _BXT_LANE_OFFSET(lane)) + +/* BXT PHY PLL registers */ +#define _PORT_PLL_A 0x46074 +#define _PORT_PLL_B 0x46078 +#define _PORT_PLL_C 0x4607c +#define PORT_PLL_ENABLE REG_BIT(31) +#define PORT_PLL_LOCK REG_BIT(30) +#define PORT_PLL_REF_SEL REG_BIT(27) +#define PORT_PLL_POWER_ENABLE REG_BIT(26) +#define PORT_PLL_POWER_STATE REG_BIT(25) +#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B) + +#define _PORT_PLL_EBB_0_A 0x162034 +#define _PORT_PLL_EBB_0_B 0x6C034 +#define _PORT_PLL_EBB_0_C 0x6C340 +#define PORT_PLL_P1_MASK REG_GENMASK(15, 13) +#define PORT_PLL_P1(p1) REG_FIELD_PREP(PORT_PLL_P1_MASK, (p1)) +#define PORT_PLL_P2_MASK REG_GENMASK(12, 8) +#define PORT_PLL_P2(p2) REG_FIELD_PREP(PORT_PLL_P2_MASK, (p2)) +#define BXT_PORT_PLL_EBB_0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ + _PORT_PLL_EBB_0_B, \ + _PORT_PLL_EBB_0_C) + +#define _PORT_PLL_EBB_4_A 0x162038 +#define _PORT_PLL_EBB_4_B 0x6C038 +#define _PORT_PLL_EBB_4_C 0x6C344 +#define PORT_PLL_RECALIBRATE REG_BIT(14) +#define PORT_PLL_10BIT_CLK_ENABLE REG_BIT(13) +#define BXT_PORT_PLL_EBB_4(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ + _PORT_PLL_EBB_4_B, \ + _PORT_PLL_EBB_4_C) + +#define _PORT_PLL_0_A 0x162100 +#define _PORT_PLL_0_B 0x6C100 +#define _PORT_PLL_0_C 0x6C380 +/* PORT_PLL_0_A */ +#define PORT_PLL_M2_INT_MASK REG_GENMASK(7, 0) +#define PORT_PLL_M2_INT(m2_int) REG_FIELD_PREP(PORT_PLL_M2_INT_MASK, (m2_int)) +/* PORT_PLL_1_A */ +#define PORT_PLL_N_MASK REG_GENMASK(11, 8) +#define PORT_PLL_N(n) REG_FIELD_PREP(PORT_PLL_N_MASK, (n)) +/* PORT_PLL_2_A */ +#define PORT_PLL_M2_FRAC_MASK REG_GENMASK(21, 0) +#define PORT_PLL_M2_FRAC(m2_frac) REG_FIELD_PREP(PORT_PLL_M2_FRAC_MASK, (m2_frac)) +/* PORT_PLL_3_A */ +#define PORT_PLL_M2_FRAC_ENABLE REG_BIT(16) +/* PORT_PLL_6_A */ +#define PORT_PLL_GAIN_CTL_MASK REG_GENMASK(18, 16) +#define PORT_PLL_GAIN_CTL(x) REG_FIELD_PREP(PORT_PLL_GAIN_CTL_MASK, (x)) +#define PORT_PLL_INT_COEFF_MASK REG_GENMASK(12, 8) +#define PORT_PLL_INT_COEFF(x) REG_FIELD_PREP(PORT_PLL_INT_COEFF_MASK, (x)) +#define PORT_PLL_PROP_COEFF_MASK REG_GENMASK(3, 0) +#define PORT_PLL_PROP_COEFF(x) REG_FIELD_PREP(PORT_PLL_PROP_COEFF_MASK, (x)) +/* PORT_PLL_8_A */ +#define PORT_PLL_TARGET_CNT_MASK REG_GENMASK(9, 0) +#define PORT_PLL_TARGET_CNT(x) REG_FIELD_PREP(PORT_PLL_TARGET_CNT_MASK, (x)) +/* PORT_PLL_9_A */ +#define PORT_PLL_LOCK_THRESHOLD_MASK REG_GENMASK(3, 1) +#define PORT_PLL_LOCK_THRESHOLD(x) REG_FIELD_PREP(PORT_PLL_LOCK_THRESHOLD_MASK, (x)) +/* PORT_PLL_10_A */ +#define PORT_PLL_DCO_AMP_OVR_EN_H REG_BIT(27) +#define PORT_PLL_DCO_AMP_MASK REG_GENMASK(13, 10) +#define PORT_PLL_DCO_AMP(x) REG_FIELD_PREP(PORT_PLL_DCO_AMP_MASK, (x)) +#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \ + _PORT_PLL_0_B, \ + _PORT_PLL_0_C) +#define BXT_PORT_PLL(phy, ch, idx) _MMIO(_PORT_PLL_BASE(phy, ch) + \ + (idx) * 4) + +/* BXT PHY common lane registers */ +#define _PORT_CL1CM_DW0_A 0x162000 +#define _PORT_CL1CM_DW0_BC 0x6C000 +#define PHY_POWER_GOOD REG_BIT(16) +#define PHY_RESERVED REG_BIT(7) +#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC) + +#define _PORT_CL1CM_DW9_A 0x162024 +#define _PORT_CL1CM_DW9_BC 0x6C024 +#define IREF0RC_OFFSET_MASK REG_GENMASK(15, 8) +#define IREF0RC_OFFSET(x) REG_FIELD_PREP(IREF0RC_OFFSET_MASK, (x)) +#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC) + +#define _PORT_CL1CM_DW10_A 0x162028 +#define _PORT_CL1CM_DW10_BC 0x6C028 +#define IREF1RC_OFFSET_MASK REG_GENMASK(15, 8) +#define IREF1RC_OFFSET(x) REG_FIELD_PREP(IREF1RC_OFFSET_MASK, (x)) +#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC) + +#define _PORT_CL1CM_DW28_A 0x162070 +#define _PORT_CL1CM_DW28_BC 0x6C070 +#define OCL1_POWER_DOWN_EN REG_BIT(23) +#define DW28_OLDO_DYN_PWR_DOWN_EN REG_BIT(22) +#define SUS_CLK_CONFIG REG_GENMASK(1, 0) +#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC) + +#define _PORT_CL1CM_DW30_A 0x162078 +#define _PORT_CL1CM_DW30_BC 0x6C078 +#define OCL2_LDOFUSE_PWR_DIS REG_BIT(6) +#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC) + +/* The spec defines this only for BXT PHY0, but lets assume that this + * would exist for PHY1 too if it had a second channel. + */ +#define _PORT_CL2CM_DW6_A 0x162358 +#define _PORT_CL2CM_DW6_BC 0x6C358 +#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC) +#define DW6_OLDO_DYN_PWR_DOWN_EN REG_BIT(28) + +/* BXT PHY Ref registers */ +#define _PORT_REF_DW3_A 0x16218C +#define _PORT_REF_DW3_BC 0x6C18C +#define GRC_DONE REG_BIT(22) +#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC) + +#define _PORT_REF_DW6_A 0x162198 +#define _PORT_REF_DW6_BC 0x6C198 +#define GRC_CODE_MASK REG_GENMASK(31, 24) +#define GRC_CODE(x) REG_FIELD_PREP(GRC_CODE_MASK, (x)) +#define GRC_CODE_FAST_MASK REG_GENMASK(23, 16) +#define GRC_CODE_FAST(x) REG_FIELD_PREP(GRC_CODE_FAST_MASK, (x)) +#define GRC_CODE_SLOW_MASK REG_GENMASK(15, 8) +#define GRC_CODE_SLOW(x) REG_FIELD_PREP(GRC_CODE_SLOW_MASK, (x)) +#define GRC_CODE_NOM_MASK REG_GENMASK(7, 0) +#define GRC_CODE_NOM(x) REG_FIELD_PREP(GRC_CODE_NOM_MASK, (x)) +#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC) + +#define _PORT_REF_DW8_A 0x1621A0 +#define _PORT_REF_DW8_BC 0x6C1A0 +#define GRC_DIS REG_BIT(15) +#define GRC_RDY_OVRD REG_BIT(1) +#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC) + +/* BXT PHY PCS registers */ +#define _PORT_PCS_DW10_LN01_A 0x162428 +#define _PORT_PCS_DW10_LN01_B 0x6C428 +#define _PORT_PCS_DW10_LN01_C 0x6C828 +#define _PORT_PCS_DW10_GRP_A 0x162C28 +#define _PORT_PCS_DW10_GRP_B 0x6CC28 +#define _PORT_PCS_DW10_GRP_C 0x6CE28 +#define BXT_PORT_PCS_DW10_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ + _PORT_PCS_DW10_LN01_B, \ + _PORT_PCS_DW10_LN01_C) +#define BXT_PORT_PCS_DW10_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ + _PORT_PCS_DW10_GRP_B, \ + _PORT_PCS_DW10_GRP_C) + +#define TX2_SWING_CALC_INIT REG_BIT(31) +#define TX1_SWING_CALC_INIT REG_BIT(30) + +#define _PORT_PCS_DW12_LN01_A 0x162430 +#define _PORT_PCS_DW12_LN01_B 0x6C430 +#define _PORT_PCS_DW12_LN01_C 0x6C830 +#define _PORT_PCS_DW12_LN23_A 0x162630 +#define _PORT_PCS_DW12_LN23_B 0x6C630 +#define _PORT_PCS_DW12_LN23_C 0x6CA30 +#define _PORT_PCS_DW12_GRP_A 0x162c30 +#define _PORT_PCS_DW12_GRP_B 0x6CC30 +#define _PORT_PCS_DW12_GRP_C 0x6CE30 +#define LANESTAGGER_STRAP_OVRD REG_BIT(6) +#define LANE_STAGGER_MASK REG_GENMASK(4, 0) +#define BXT_PORT_PCS_DW12_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ + _PORT_PCS_DW12_LN01_B, \ + _PORT_PCS_DW12_LN01_C) +#define BXT_PORT_PCS_DW12_LN23(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ + _PORT_PCS_DW12_LN23_B, \ + _PORT_PCS_DW12_LN23_C) +#define BXT_PORT_PCS_DW12_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ + _PORT_PCS_DW12_GRP_B, \ + _PORT_PCS_DW12_GRP_C) + +/* BXT PHY TX registers */ +#define _PORT_TX_DW2_LN0_A 0x162508 +#define _PORT_TX_DW2_LN0_B 0x6C508 +#define _PORT_TX_DW2_LN0_C 0x6C908 +#define _PORT_TX_DW2_GRP_A 0x162D08 +#define _PORT_TX_DW2_GRP_B 0x6CD08 +#define _PORT_TX_DW2_GRP_C 0x6CF08 +#define BXT_PORT_TX_DW2_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \ + _PORT_TX_DW2_LN0_B, \ + _PORT_TX_DW2_LN0_C) +#define BXT_PORT_TX_DW2_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ + _PORT_TX_DW2_GRP_B, \ + _PORT_TX_DW2_GRP_C) +#define MARGIN_000_MASK REG_GENMASK(23, 16) +#define MARGIN_000(x) REG_FIELD_PREP(MARGIN_000_MASK, (x)) +#define UNIQ_TRANS_SCALE_MASK REG_GENMASK(15, 8) +#define UNIQ_TRANS_SCALE(x) REG_FIELD_PREP(UNIQ_TRANS_SCALE_MASK, (x)) + +#define _PORT_TX_DW3_LN0_A 0x16250C +#define _PORT_TX_DW3_LN0_B 0x6C50C +#define _PORT_TX_DW3_LN0_C 0x6C90C +#define _PORT_TX_DW3_GRP_A 0x162D0C +#define _PORT_TX_DW3_GRP_B 0x6CD0C +#define _PORT_TX_DW3_GRP_C 0x6CF0C +#define BXT_PORT_TX_DW3_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \ + _PORT_TX_DW3_LN0_B, \ + _PORT_TX_DW3_LN0_C) +#define BXT_PORT_TX_DW3_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ + _PORT_TX_DW3_GRP_B, \ + _PORT_TX_DW3_GRP_C) +#define SCALE_DCOMP_METHOD REG_BIT(26) +#define UNIQUE_TRANGE_EN_METHOD REG_BIT(27) + +#define _PORT_TX_DW4_LN0_A 0x162510 +#define _PORT_TX_DW4_LN0_B 0x6C510 +#define _PORT_TX_DW4_LN0_C 0x6C910 +#define _PORT_TX_DW4_GRP_A 0x162D10 +#define _PORT_TX_DW4_GRP_B 0x6CD10 +#define _PORT_TX_DW4_GRP_C 0x6CF10 +#define BXT_PORT_TX_DW4_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \ + _PORT_TX_DW4_LN0_B, \ + _PORT_TX_DW4_LN0_C) +#define BXT_PORT_TX_DW4_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ + _PORT_TX_DW4_GRP_B, \ + _PORT_TX_DW4_GRP_C) +#define DE_EMPHASIS_MASK REG_GENMASK(31, 24) +#define DE_EMPHASIS(x) REG_FIELD_PREP(DE_EMPHASIS_MASK, (x)) + +#define _PORT_TX_DW5_LN0_A 0x162514 +#define _PORT_TX_DW5_LN0_B 0x6C514 +#define _PORT_TX_DW5_LN0_C 0x6C914 +#define _PORT_TX_DW5_GRP_A 0x162D14 +#define _PORT_TX_DW5_GRP_B 0x6CD14 +#define _PORT_TX_DW5_GRP_C 0x6CF14 +#define BXT_PORT_TX_DW5_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \ + _PORT_TX_DW5_LN0_B, \ + _PORT_TX_DW5_LN0_C) +#define BXT_PORT_TX_DW5_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ + _PORT_TX_DW5_GRP_B, \ + _PORT_TX_DW5_GRP_C) +#define DCC_DELAY_RANGE_1 REG_BIT(9) +#define DCC_DELAY_RANGE_2 REG_BIT(8) + +#define _PORT_TX_DW14_LN0_A 0x162538 +#define _PORT_TX_DW14_LN0_B 0x6C538 +#define _PORT_TX_DW14_LN0_C 0x6C938 +#define LATENCY_OPTIM REG_BIT(30) +#define BXT_PORT_TX_DW14_LN(phy, ch, lane) _MMIO_BXT_PHY_CH_LN(phy, ch, lane, \ + _PORT_TX_DW14_LN0_B, \ + _PORT_TX_DW14_LN0_C) + +#endif /* __BXT_DPIO_PHY_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index ac456a2275..79ecfc3394 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1616,8 +1616,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, - base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_connector *intel_connector = intel_dsi->attached_connector; struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index 2bb270f829..7a77ae3dc3 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -62,7 +62,7 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector, { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_digital_connector_state *intel_conn_state = + const struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(state); if (property == dev_priv->display.properties.force_audio) diff --git a/drivers/gpu/drm/i915/display/intel_audio_regs.h b/drivers/gpu/drm/i915/display/intel_audio_regs.h index 616e7b1275..88ea274036 100644 --- a/drivers/gpu/drm/i915/display/intel_audio_regs.h +++ b/drivers/gpu/drm/i915/display/intel_audio_regs.h @@ -148,4 +148,20 @@ #define HBLANK_START_COUNT_96 4 #define HBLANK_START_COUNT_128 5 +/* LPE Audio */ +#define I915_HDMI_LPE_AUDIO_BASE (VLV_DISPLAY_BASE + 0x65000) +#define I915_HDMI_LPE_AUDIO_SIZE 0x1000 + +#define VLV_AUD_CHICKEN_BIT_REG _MMIO(VLV_DISPLAY_BASE + 0x62F38) +#define VLV_CHICKEN_BIT_DBG_ENABLE (1 << 0) + +#define _VLV_AUD_PORT_EN_B_DBG 0x62F20 +#define _VLV_AUD_PORT_EN_C_DBG 0x62F30 +#define _VLV_AUD_PORT_EN_D_DBG 0x62F34 +#define VLV_AUD_PORT_EN_DBG(port) _MMIO_BASE_PORT3(VLV_DISPLAY_BASE, (port) - PORT_B, \ + _VLV_AUD_PORT_EN_B_DBG, \ + _VLV_AUD_PORT_EN_C_DBG, \ + _VLV_AUD_PORT_EN_D_DBG) +#define VLV_AMP_MUTE (1 << 1) + #endif /* __INTEL_AUDIO_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 1946d7fb3c..071668bfe5 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -83,16 +83,16 @@ static u32 scale_hw_to_user(struct intel_connector *connector, u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_panel *panel = &connector->panel; - drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0); + drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0); - if (i915->display.params.invert_brightness < 0) + if (display->params.invert_brightness < 0) return val; - if (i915->display.params.invert_brightness > 0 || - intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)) { + if (display->params.invert_brightness > 0 || + intel_has_quirk(display, QUIRK_INVERT_BRIGHTNESS)) { return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min; } @@ -126,15 +126,15 @@ u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 val) u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_panel *panel = &connector->panel; - drm_WARN_ON_ONCE(&i915->drm, + drm_WARN_ON_ONCE(display->drm, panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0); - if (i915->display.params.invert_brightness > 0 || - (i915->display.params.invert_brightness == 0 && - intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS))) + if (display->params.invert_brightness > 0 || + (display->params.invert_brightness == 0 && + intel_has_quirk(display, QUIRK_INVERT_BRIGHTNESS))) val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min); return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max, @@ -761,8 +761,8 @@ static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state, WARN_ON(panel->backlight.max == 0); - if (panel->backlight.level <= panel->backlight.min) { - panel->backlight.level = panel->backlight.max; + if (panel->backlight.level < panel->backlight.min) { + panel->backlight.level = panel->backlight.min; if (panel->backlight.device) panel->backlight.device->props.brightness = scale_hw_to_user(connector, @@ -949,7 +949,7 @@ int intel_backlight_device_register(struct intel_connector *connector) else props.power = FB_BLANK_POWERDOWN; - name = kstrdup("intel_backlight", GFP_KERNEL); + name = kstrdup_const("intel_backlight", GFP_KERNEL); if (!name) return -ENOMEM; @@ -963,7 +963,7 @@ int intel_backlight_device_register(struct intel_connector *connector) * compatibility. Use unique names for subsequent backlight devices as a * fallback when the default name already exists. */ - kfree(name); + kfree_const(name); name = kasprintf(GFP_KERNEL, "card%d-%s-backlight", i915->drm.primary->index, connector->base.name); if (!name) @@ -987,7 +987,7 @@ int intel_backlight_device_register(struct intel_connector *connector) connector->base.base.id, connector->base.name, name); out: - kfree(name); + kfree_const(name); return ret; } @@ -1642,17 +1642,17 @@ void intel_backlight_update(struct intel_atomic_state *state, int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_panel *panel = &connector->panel; int ret; if (!connector->panel.vbt.backlight.present) { - if (intel_has_quirk(i915, QUIRK_BACKLIGHT_PRESENT)) { - drm_dbg_kms(&i915->drm, + if (intel_has_quirk(display, QUIRK_BACKLIGHT_PRESENT)) { + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] no backlight present per VBT, but present per quirk\n", connector->base.base.id, connector->base.name); } else { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] no backlight present per VBT\n", connector->base.base.id, connector->base.name); return 0; @@ -1660,16 +1660,16 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) } /* ensure intel_panel has been initialized first */ - if (drm_WARN_ON(&i915->drm, !panel->backlight.funcs)) + if (drm_WARN_ON(display->drm, !panel->backlight.funcs)) return -ENODEV; /* set level and max in panel struct */ - mutex_lock(&i915->display.backlight.lock); + mutex_lock(&display->backlight.lock); ret = panel->backlight.funcs->setup(connector, pipe); - mutex_unlock(&i915->display.backlight.lock); + mutex_unlock(&display->backlight.lock); if (ret) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] failed to setup backlight\n", connector->base.base.id, connector->base.name); return ret; @@ -1677,7 +1677,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) panel->backlight.present = true; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] backlight initialized, %s, brightness %u/%u\n", connector->base.base.id, connector->base.name, str_enabled_disabled(panel->backlight.enabled), @@ -1821,7 +1821,7 @@ void intel_backlight_init_funcs(struct intel_panel *panel) if (intel_dp_aux_init_backlight_funcs(connector) == 0) return; - if (!intel_has_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK)) + if (!intel_has_quirk(&i915->display, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK)) connector->panel.backlight.power = intel_pps_backlight_power; } diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 7d1e443f97..5fb48b6129 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -25,6 +25,8 @@ * */ +#include + #include #include #include @@ -593,11 +595,14 @@ get_lvds_fp_timing(const struct bdb_lvds_lfp_data *data, return (const void *)data + ptrs->ptr[index].fp_timing.offset; } -static const struct lvds_pnp_id * +static const struct drm_edid_product_id * get_lvds_pnp_id(const struct bdb_lvds_lfp_data *data, const struct bdb_lvds_lfp_data_ptrs *ptrs, int index) { + /* These two are supposed to have the same layout in memory. */ + BUILD_BUG_ON(sizeof(struct lvds_pnp_id) != sizeof(struct drm_edid_product_id)); + return (const void *)data + ptrs->ptr[index].panel_pnp_id.offset; } @@ -611,19 +616,6 @@ get_lfp_data_tail(const struct bdb_lvds_lfp_data *data, return NULL; } -static void dump_pnp_id(struct drm_i915_private *i915, - const struct lvds_pnp_id *pnp_id, - const char *name) -{ - u16 mfg_name = be16_to_cpu((__force __be16)pnp_id->mfg_name); - char vend[4]; - - drm_dbg_kms(&i915->drm, "%s PNPID mfg: %s (0x%x), prod: %u, serial: %u, week: %d, year: %d\n", - name, drm_edid_decode_mfg_id(mfg_name, vend), - pnp_id->mfg_name, pnp_id->product_code, pnp_id->serial, - pnp_id->mfg_week, pnp_id->mfg_year + 1990); -} - static int opregion_get_panel_type(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid, bool use_fallback) @@ -662,21 +654,21 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915, { const struct bdb_lvds_lfp_data *data; const struct bdb_lvds_lfp_data_ptrs *ptrs; - const struct lvds_pnp_id *edid_id; - struct lvds_pnp_id edid_id_nodate; - const struct edid *edid = drm_edid_raw(drm_edid); /* FIXME */ + struct drm_edid_product_id product_id, product_id_nodate; + struct drm_printer p; int i, best = -1; - if (!edid) + if (!drm_edid) return -1; - edid_id = (const void *)&edid->mfg_id[0]; + drm_edid_get_product_id(drm_edid, &product_id); - edid_id_nodate = *edid_id; - edid_id_nodate.mfg_week = 0; - edid_id_nodate.mfg_year = 0; + product_id_nodate = product_id; + product_id_nodate.week_of_manufacture = 0; + product_id_nodate.year_of_manufacture = 0; - dump_pnp_id(i915, edid_id, "EDID"); + p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, "EDID"); + drm_edid_print_product_id(&p, &product_id, true); ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS); if (!ptrs) @@ -687,11 +679,11 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915, return -1; for (i = 0; i < 16; i++) { - const struct lvds_pnp_id *vbt_id = + const struct drm_edid_product_id *vbt_id = get_lvds_pnp_id(data, ptrs, i); /* full match? */ - if (!memcmp(vbt_id, edid_id, sizeof(*vbt_id))) + if (!memcmp(vbt_id, &product_id, sizeof(*vbt_id))) return i; /* @@ -699,7 +691,7 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915, * and the VBT entry does not specify a date. */ if (best < 0 && - !memcmp(vbt_id, &edid_id_nodate, sizeof(*vbt_id))) + !memcmp(vbt_id, &product_id_nodate, sizeof(*vbt_id))) best = i; } @@ -885,7 +877,8 @@ parse_lfp_data(struct drm_i915_private *i915, const struct bdb_lvds_lfp_data *data; const struct bdb_lvds_lfp_data_tail *tail; const struct bdb_lvds_lfp_data_ptrs *ptrs; - const struct lvds_pnp_id *pnp_id; + const struct drm_edid_product_id *pnp_id; + struct drm_printer p; int panel_type = panel->vbt.panel_type; ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS); @@ -900,7 +893,9 @@ parse_lfp_data(struct drm_i915_private *i915, parse_lfp_panel_dtd(i915, panel, data, ptrs); pnp_id = get_lvds_pnp_id(data, ptrs, panel_type); - dump_pnp_id(i915, pnp_id, "Panel"); + + p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, "Panel"); + drm_edid_print_product_id(&p, pnp_id, false); tail = get_lfp_data_tail(data, ptrs); if (!tail) @@ -2719,6 +2714,57 @@ static void parse_ddi_ports(struct drm_i915_private *i915) print_ddi_port(devdata); } +static int child_device_expected_size(u16 version) +{ + BUILD_BUG_ON(sizeof(struct child_device_config) < 40); + + if (version > 256) + return -ENOENT; + else if (version >= 256) + return 40; + else if (version >= 216) + return 39; + else if (version >= 196) + return 38; + else if (version >= 195) + return 37; + else if (version >= 111) + return LEGACY_CHILD_DEVICE_CONFIG_SIZE; + else if (version >= 106) + return 27; + else + return 22; +} + +static bool child_device_size_valid(struct drm_i915_private *i915, int size) +{ + int expected_size; + + expected_size = child_device_expected_size(i915->display.vbt.version); + if (expected_size < 0) { + expected_size = sizeof(struct child_device_config); + drm_dbg(&i915->drm, + "Expected child device config size for VBT version %u not known; assuming %d\n", + i915->display.vbt.version, expected_size); + } + + /* Flag an error for unexpected size, but continue anyway. */ + if (size != expected_size) + drm_err(&i915->drm, + "Unexpected child device config size %d (expected %d for VBT version %u)\n", + size, expected_size, i915->display.vbt.version); + + /* The legacy sized child device config is the minimum we need. */ + if (size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) { + drm_dbg_kms(&i915->drm, + "Child device config size %d is too small.\n", + size); + return false; + } + + return true; +} + static void parse_general_definitions(struct drm_i915_private *i915) { @@ -2726,7 +2772,6 @@ parse_general_definitions(struct drm_i915_private *i915) struct intel_bios_encoder_data *devdata; const struct child_device_config *child; int i, child_device_num; - u8 expected_size; u16 block_size; int bus_pin; @@ -2750,39 +2795,8 @@ parse_general_definitions(struct drm_i915_private *i915) if (intel_gmbus_is_valid_pin(i915, bus_pin)) i915->display.vbt.crt_ddc_pin = bus_pin; - if (i915->display.vbt.version < 106) { - expected_size = 22; - } else if (i915->display.vbt.version < 111) { - expected_size = 27; - } else if (i915->display.vbt.version < 195) { - expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE; - } else if (i915->display.vbt.version == 195) { - expected_size = 37; - } else if (i915->display.vbt.version <= 215) { - expected_size = 38; - } else if (i915->display.vbt.version <= 250) { - expected_size = 39; - } else { - expected_size = sizeof(*child); - BUILD_BUG_ON(sizeof(*child) < 39); - drm_dbg(&i915->drm, - "Expected child device config size for VBT version %u not known; assuming %u\n", - i915->display.vbt.version, expected_size); - } - - /* Flag an error for unexpected size, but continue anyway. */ - if (defs->child_dev_size != expected_size) - drm_err(&i915->drm, - "Unexpected child device config size %u (expected %u for VBT version %u)\n", - defs->child_dev_size, expected_size, i915->display.vbt.version); - - /* The legacy sized child device config is the minimum we need. */ - if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) { - drm_dbg_kms(&i915->drm, - "Child device config size %u is too small.\n", - defs->child_dev_size); + if (!child_device_size_valid(i915, defs->child_dev_size)) return; - } /* get the number of child device */ child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size; @@ -2858,9 +2872,8 @@ init_vbt_panel_defaults(struct intel_panel *panel) static void init_vbt_missing_defaults(struct drm_i915_private *i915) { + unsigned int ports = DISPLAY_RUNTIME_INFO(i915)->port_mask; enum port port; - int ports = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | - BIT(PORT_D) | BIT(PORT_E) | BIT(PORT_F); if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915)) return; @@ -2970,6 +2983,43 @@ bool intel_bios_is_valid_vbt(struct drm_i915_private *i915, return vbt; } +static struct vbt_header *firmware_get_vbt(struct drm_i915_private *i915, + size_t *size) +{ + struct vbt_header *vbt = NULL; + const struct firmware *fw = NULL; + const char *name = i915->display.params.vbt_firmware; + int ret; + + if (!name || !*name) + return NULL; + + ret = request_firmware(&fw, name, i915->drm.dev); + if (ret) { + drm_err(&i915->drm, + "Requesting VBT firmware \"%s\" failed (%d)\n", + name, ret); + return NULL; + } + + if (intel_bios_is_valid_vbt(i915, fw->data, fw->size)) { + vbt = kmemdup(fw->data, fw->size, GFP_KERNEL); + if (vbt) { + drm_dbg_kms(&i915->drm, + "Found valid VBT firmware \"%s\"\n", name); + if (size) + *size = fw->size; + } + } else { + drm_dbg_kms(&i915->drm, "Invalid VBT firmware \"%s\"\n", + name); + } + + release_firmware(fw); + + return vbt; +} + static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset) { intel_uncore_write(uncore, PRIMARY_SPI_ADDRESS, offset); @@ -2977,7 +3027,8 @@ static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset) return intel_uncore_read(uncore, PRIMARY_SPI_TRIGGER); } -static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915) +static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915, + size_t *size) { u32 count, data, found, store = 0; u32 static_region, oprom_offset; @@ -3020,6 +3071,9 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915) drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n"); + if (size) + *size = vbt_size; + return (struct vbt_header *)vbt; err_free_vbt: @@ -3028,7 +3082,8 @@ err_not_found: return NULL; } -static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915) +static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915, + size_t *sizep) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); void __iomem *p = NULL, *oprom; @@ -3077,6 +3132,9 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915) pci_unmap_rom(pdev, oprom); + if (sizep) + *sizep = vbt_size; + drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n"); return vbt; @@ -3089,6 +3147,32 @@ err_unmap_oprom: return NULL; } +static const struct vbt_header *intel_bios_get_vbt(struct drm_i915_private *i915, + size_t *sizep) +{ + const struct vbt_header *vbt = NULL; + intel_wakeref_t wakeref; + + vbt = firmware_get_vbt(i915, sizep); + + if (!vbt) + vbt = intel_opregion_get_vbt(i915, sizep); + + /* + * If the OpRegion does not have VBT, look in SPI flash + * through MMIO or PCI mapping + */ + if (!vbt && IS_DGFX(i915)) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) + vbt = spi_oprom_get_vbt(i915, sizep); + + if (!vbt) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) + vbt = oprom_get_vbt(i915, sizep); + + return vbt; +} + /** * intel_bios_init - find VBT and initialize settings from the BIOS * @i915: i915 device instance @@ -3100,7 +3184,6 @@ err_unmap_oprom: void intel_bios_init(struct drm_i915_private *i915) { const struct vbt_header *vbt; - struct vbt_header *oprom_vbt = NULL; const struct bdb_header *bdb; INIT_LIST_HEAD(&i915->display.vbt.display_devices); @@ -3114,21 +3197,7 @@ void intel_bios_init(struct drm_i915_private *i915) init_vbt_defaults(i915); - vbt = intel_opregion_get_vbt(i915, NULL); - - /* - * If the OpRegion does not have VBT, look in SPI flash through MMIO or - * PCI mapping - */ - if (!vbt && IS_DGFX(i915)) { - oprom_vbt = spi_oprom_get_vbt(i915); - vbt = oprom_vbt; - } - - if (!vbt) { - oprom_vbt = oprom_get_vbt(i915); - vbt = oprom_vbt; - } + vbt = intel_bios_get_vbt(i915, NULL); if (!vbt) goto out; @@ -3161,7 +3230,7 @@ out: parse_sdvo_device_mapping(i915); parse_ddi_ports(i915); - kfree(oprom_vbt); + kfree(vbt); } static void intel_bios_init_panel(struct drm_i915_private *i915, @@ -3333,8 +3402,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) * additional data. Trust that if the VBT was written into * the OpRegion then they have validated the LVDS's existence. */ - if (intel_opregion_get_vbt(i915, NULL)) - return true; + return intel_opregion_vbt_present(i915); } return false; @@ -3695,13 +3763,12 @@ static int intel_bios_vbt_show(struct seq_file *m, void *unused) const void *vbt; size_t vbt_size; - /* - * FIXME: VBT might originate from other places than opregion, and then - * this would be incorrect. - */ - vbt = intel_opregion_get_vbt(i915, &vbt_size); - if (vbt) + vbt = intel_bios_get_vbt(i915, &vbt_size); + + if (vbt) { seq_write(m, vbt, vbt_size); + kfree(vbt); + } return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 7f2a50b4f4..972ea887e2 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -162,7 +162,9 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, 1); if (ret < 0) { - drm_err(&dev_priv->drm, "Failed to disable qgv points (%d) points: 0x%x\n", ret, points_mask); + drm_err(&dev_priv->drm, + "Failed to disable qgv points (0x%x) points: 0x%x\n", + ret, points_mask); return ret; } @@ -290,8 +292,10 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, struct intel_qgv_point *sp = &qi->points[i]; ret = intel_read_qgv_point_info(dev_priv, sp, i); - if (ret) + if (ret) { + drm_dbg_kms(&dev_priv->drm, "Could not read QGV %d info\n", i); return ret; + } drm_dbg_kms(&dev_priv->drm, "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n", @@ -659,6 +663,22 @@ static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv, return bi->psf_bw[psf_gv_point]; } +static unsigned int icl_qgv_bw(struct drm_i915_private *i915, + int num_active_planes, int qgv_point) +{ + unsigned int idx; + + if (DISPLAY_VER(i915) >= 12) + idx = tgl_max_bw_index(i915, num_active_planes, qgv_point); + else + idx = icl_max_bw_index(i915, num_active_planes, qgv_point); + + if (idx >= ARRAY_SIZE(i915->display.bw.max)) + return 0; + + return i915->display.bw.max[idx].deratedbw[qgv_point]; +} + void intel_bw_init_hw(struct drm_i915_private *dev_priv) { if (!HAS_DISPLAY(dev_priv)) @@ -735,6 +755,7 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state, intel_bw_crtc_data_rate(crtc_state); bw_state->num_active_planes[crtc->pipe] = intel_bw_crtc_num_active_planes(crtc_state); + bw_state->force_check_qgv = true; drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n", pipe_name(crtc->pipe), @@ -804,6 +825,80 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state) return to_intel_bw_state(bw_state); } +static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915, + int num_active_planes) +{ + unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; + unsigned int max_bw_point = 0; + unsigned int max_bw = 0; + int i; + + for (i = 0; i < num_qgv_points; i++) { + unsigned int max_data_rate = + icl_qgv_bw(i915, num_active_planes, i); + + /* + * We need to know which qgv point gives us + * maximum bandwidth in order to disable SAGV + * if we find that we exceed SAGV block time + * with watermarks. By that moment we already + * have those, as it is calculated earlier in + * intel_atomic_check, + */ + if (max_data_rate > max_bw) { + max_bw_point = BIT(i); + max_bw = max_data_rate; + } + } + + return max_bw_point; +} + +static u16 icl_prepare_qgv_points_mask(struct drm_i915_private *i915, + unsigned int qgv_points, + unsigned int psf_points) +{ + return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) | + ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(i915); +} + +static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915) +{ + unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; + unsigned int max_bw_point_mask = 0; + unsigned int max_bw = 0; + int i; + + for (i = 0; i < num_psf_gv_points; i++) { + unsigned int max_data_rate = adl_psf_bw(i915, i); + + if (max_data_rate > max_bw) { + max_bw_point_mask = BIT(i); + max_bw = max_data_rate; + } else if (max_data_rate == max_bw) { + max_bw_point_mask |= BIT(i); + } + } + + return max_bw_point_mask; +} + +static void icl_force_disable_sagv(struct drm_i915_private *i915, + struct intel_bw_state *bw_state) +{ + unsigned int qgv_points = icl_max_bw_qgv_point_mask(i915, 0); + unsigned int psf_points = icl_max_bw_psf_gv_point_mask(i915); + + bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915, + qgv_points, + psf_points); + + drm_dbg_kms(&i915->drm, "Forcing SAGV disable: mask 0x%x\n", + bw_state->qgv_points_mask); + + icl_pcode_restrict_qgv_points(i915, bw_state->qgv_points_mask); +} + static int mtl_find_qgv_points(struct drm_i915_private *i915, unsigned int data_rate, unsigned int num_active_planes, @@ -881,8 +976,6 @@ static int icl_find_qgv_points(struct drm_i915_private *i915, const struct intel_bw_state *old_bw_state, struct intel_bw_state *new_bw_state) { - unsigned int max_bw_point = 0; - unsigned int max_bw = 0; unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; u16 psf_points = 0; @@ -895,31 +988,8 @@ static int icl_find_qgv_points(struct drm_i915_private *i915, return ret; for (i = 0; i < num_qgv_points; i++) { - unsigned int idx; - unsigned int max_data_rate; - - if (DISPLAY_VER(i915) >= 12) - idx = tgl_max_bw_index(i915, num_active_planes, i); - else - idx = icl_max_bw_index(i915, num_active_planes, i); - - if (idx >= ARRAY_SIZE(i915->display.bw.max)) - continue; - - max_data_rate = i915->display.bw.max[idx].deratedbw[i]; - - /* - * We need to know which qgv point gives us - * maximum bandwidth in order to disable SAGV - * if we find that we exceed SAGV block time - * with watermarks. By that moment we already - * have those, as it is calculated earlier in - * intel_atomic_check, - */ - if (max_data_rate > max_bw) { - max_bw_point = i; - max_bw = max_data_rate; - } + unsigned int max_data_rate = icl_qgv_bw(i915, + num_active_planes, i); if (max_data_rate >= data_rate) qgv_points |= BIT(i); @@ -963,20 +1033,18 @@ static int icl_find_qgv_points(struct drm_i915_private *i915, * cause. */ if (!intel_can_enable_sagv(i915, new_bw_state)) { - qgv_points = BIT(max_bw_point); - drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point %d\n", - max_bw_point); + qgv_points = icl_max_bw_qgv_point_mask(i915, num_active_planes); + drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point mask 0x%x\n", + qgv_points); } /* * We store the ones which need to be masked as that is what PCode * actually accepts as a parameter. */ - new_bw_state->qgv_points_mask = - ~(ICL_PCODE_REQ_QGV_PT(qgv_points) | - ADLS_PCODE_REQ_PSF_PT(psf_points)) & - icl_qgv_points_mask(i915); - + new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915, + qgv_points, + psf_points); /* * If the actual mask had changed we need to make sure that * the commits are serialized(in case this is a nomodeset, nonblocking) @@ -1272,8 +1340,9 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) new_bw_state = intel_atomic_get_new_bw_state(state); if (new_bw_state && - intel_can_enable_sagv(i915, old_bw_state) != - intel_can_enable_sagv(i915, new_bw_state)) + (intel_can_enable_sagv(i915, old_bw_state) != + intel_can_enable_sagv(i915, new_bw_state) || + new_bw_state->force_check_qgv)) changed = true; /* @@ -1287,6 +1356,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) if (ret) return ret; + new_bw_state->force_check_qgv = false; + return 0; } @@ -1313,7 +1384,7 @@ static const struct intel_global_state_funcs intel_bw_funcs = { .atomic_destroy_state = intel_bw_destroy_state, }; -int intel_bw_init(struct drm_i915_private *dev_priv) +int intel_bw_init(struct drm_i915_private *i915) { struct intel_bw_state *state; @@ -1321,8 +1392,15 @@ int intel_bw_init(struct drm_i915_private *dev_priv) if (!state) return -ENOMEM; - intel_atomic_global_obj_init(dev_priv, &dev_priv->display.bw.obj, + intel_atomic_global_obj_init(i915, &i915->display.bw.obj, &state->base, &intel_bw_funcs); + /* + * Limit this only if we have SAGV. And for Display version 14 onwards + * sagv is handled though pmdemand requests + */ + if (intel_has_sagv(i915) && IS_DISPLAY_VER(i915, 11, 13)) + icl_force_disable_sagv(i915, state); + return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index 59cb4fc5db..161813cca4 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -47,12 +47,19 @@ struct intel_bw_state { */ u16 qgv_points_mask; + /* + * Flag to force the QGV comparison in atomic check right after the + * hw state readout + */ + bool force_check_qgv; + int min_cdclk[I915_MAX_PIPES]; unsigned int data_rate[I915_MAX_PIPES]; u8 num_active_planes[I915_MAX_PIPES]; }; -#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base) +#define to_intel_bw_state(global_state) \ + container_of_const((global_state), struct intel_bw_state, base) struct intel_bw_state * intel_atomic_get_old_bw_state(struct intel_atomic_state *state); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index f672bfd70d..7a833b5f2d 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -39,6 +39,8 @@ #include "intel_pcode.h" #include "intel_psr.h" #include "intel_vdsc.h" +#include "skl_watermark.h" +#include "skl_watermark_regs.h" #include "vlv_sideband.h" /** @@ -63,6 +65,32 @@ * DMC will not change the active CDCLK frequency however, so that part * will still be performed by the driver directly. * + * There are multiple components involved in the generation of the CDCLK + * frequency: + * + * - We have the CDCLK PLL, which generates an output clock based on a + * reference clock and a ratio parameter. + * - The CD2X Divider, which divides the output of the PLL based on a + * divisor selected from a set of pre-defined choices. + * - The CD2X Squasher, which further divides the output based on a + * waveform represented as a sequence of bits where each zero + * "squashes out" a clock cycle. + * - And, finally, a fixed divider that divides the output frequency by 2. + * + * As such, the resulting CDCLK frequency can be calculated with the + * following formula: + * + * cdclk = vco / cd2x_div / (sq_len / sq_div) / 2 + * + * , where vco is the frequency generated by the PLL; cd2x_div + * represents the CD2X Divider; sq_len and sq_div are the bit length + * and the number of high bits for the CD2X Squasher waveform, respectively; + * and 2 represents the fixed divider. + * + * Note that some older platforms do not contain the CD2X Divider + * and/or CD2X Squasher, in which case we can ignore their respective + * factors in the formula above. + * * Several methods exist to change the CDCLK frequency, which ones are * supported depends on the platform: * @@ -993,15 +1021,14 @@ static int skl_cdclk_decimal(int cdclk) return DIV_ROUND_CLOSEST(cdclk - 1000, 500); } -static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, - int vco) +static void skl_set_preferred_cdclk_vco(struct drm_i915_private *i915, int vco) { - bool changed = dev_priv->skl_preferred_vco_freq != vco; + bool changed = i915->display.cdclk.skl_preferred_vco_freq != vco; - dev_priv->skl_preferred_vco_freq = vco; + i915->display.cdclk.skl_preferred_vco_freq = vco; if (changed) - intel_update_max_cdclk(dev_priv); + intel_update_max_cdclk(i915); } static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco) @@ -1205,7 +1232,7 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv) * Use the current vco as our initial * guess as to what the preferred vco is. */ - if (dev_priv->skl_preferred_vco_freq == 0) + if (dev_priv->display.cdclk.skl_preferred_vco_freq == 0) skl_set_preferred_cdclk_vco(dev_priv, dev_priv->display.cdclk.hw.vco); return; @@ -1213,7 +1240,7 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv) cdclk_config = dev_priv->display.cdclk.hw; - cdclk_config.vco = dev_priv->skl_preferred_vco_freq; + cdclk_config.vco = dev_priv->display.cdclk.skl_preferred_vco_freq; if (cdclk_config.vco == 0) cdclk_config.vco = 8100000; cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco); @@ -1391,7 +1418,7 @@ static const struct intel_cdclk_vals mtl_cdclk_table[] = { {} }; -static const struct intel_cdclk_vals lnl_cdclk_table[] = { +static const struct intel_cdclk_vals xe2lpd_cdclk_table[] = { { .refclk = 38400, .cdclk = 153600, .ratio = 16, .waveform = 0xaaaa }, { .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a }, { .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 }, @@ -1656,6 +1683,8 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, } out: + if (DISPLAY_VER(dev_priv) >= 20) + cdclk_config->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN; /* * Can't read this out :( Let's assume it's * at least what the CDCLK frequency requires. @@ -1850,6 +1879,37 @@ static bool cdclk_pll_is_unknown(unsigned int vco) return vco == ~0; } +static bool mdclk_source_is_cdclk_pll(struct drm_i915_private *i915) +{ + return DISPLAY_VER(i915) >= 20; +} + +static u32 xe2lpd_mdclk_source_sel(struct drm_i915_private *i915) +{ + if (mdclk_source_is_cdclk_pll(i915)) + return MDCLK_SOURCE_SEL_CDCLK_PLL; + + return MDCLK_SOURCE_SEL_CD2XCLK; +} + +int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915, + const struct intel_cdclk_config *cdclk_config) +{ + if (mdclk_source_is_cdclk_pll(i915)) + return DIV_ROUND_UP(cdclk_config->vco, cdclk_config->cdclk); + + /* Otherwise, source for MDCLK is CD2XCLK. */ + return 2; +} + +static void xe2lpd_mdclk_cdclk_ratio_program(struct drm_i915_private *i915, + const struct intel_cdclk_config *cdclk_config) +{ + intel_dbuf_mdclk_cdclk_ratio_update(i915, + intel_mdclk_cdclk_ratio(i915, cdclk_config), + cdclk_config->joined_mbus); +} + static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i915, const struct intel_cdclk_config *old_cdclk_config, const struct intel_cdclk_config *new_cdclk_config, @@ -1954,7 +2014,7 @@ static u32 bxt_cdclk_ctl(struct drm_i915_private *i915, val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; if (DISPLAY_VER(i915) >= 20) - val |= MDCLK_SOURCE_SEL_CDCLK_PLL; + val |= xe2lpd_mdclk_source_sel(i915); else val |= skl_cdclk_decimal(cdclk); @@ -1967,7 +2027,6 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv, { int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; - u16 waveform; if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 && !cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) { @@ -1982,10 +2041,11 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv, } else bxt_cdclk_pll_update(dev_priv, vco); - waveform = cdclk_squash_waveform(dev_priv, cdclk); + if (HAS_CDCLK_SQUASH(dev_priv)) { + u16 waveform = cdclk_squash_waveform(dev_priv, cdclk); - if (HAS_CDCLK_SQUASH(dev_priv)) dg2_cdclk_squash_program(dev_priv, waveform); + } intel_de_write(dev_priv, CDCLK_CTL, bxt_cdclk_ctl(dev_priv, cdclk_config, pipe)); @@ -2030,6 +2090,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, return; } + if (DISPLAY_VER(dev_priv) >= 20 && cdclk < dev_priv->display.cdclk.hw.cdclk) + xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config); + if (cdclk_compute_crawl_and_squash_midpoint(dev_priv, &dev_priv->display.cdclk.hw, cdclk_config, &mid_cdclk_config)) { _bxt_set_cdclk(dev_priv, &mid_cdclk_config, pipe); @@ -2038,6 +2101,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, _bxt_set_cdclk(dev_priv, cdclk_config, pipe); } + if (DISPLAY_VER(dev_priv) >= 20 && cdclk > dev_priv->display.cdclk.hw.cdclk) + xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config); + if (DISPLAY_VER(dev_priv) >= 14) /* * NOOP - No Pcode communication needed for @@ -2260,16 +2326,15 @@ static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv, } /** - * intel_cdclk_needs_modeset - Determine if changong between the CDCLK - * configurations requires a modeset on all pipes + * intel_cdclk_clock_changed - Check whether the clock changed * @a: first CDCLK configuration * @b: second CDCLK configuration * * Returns: - * True if changing between the two CDCLK configurations - * requires all pipes to be off, false if not. + * True if CDCLK changed in a way that requires re-programming and + * False otherwise. */ -bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a, +bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { return a->cdclk != b->cdclk || @@ -2322,7 +2387,7 @@ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv, static bool intel_cdclk_changed(const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { - return intel_cdclk_needs_modeset(a, b) || + return intel_cdclk_clock_changed(a, b) || a->voltage_level != b->voltage_level; } @@ -2368,18 +2433,9 @@ static void intel_pcode_notify(struct drm_i915_private *i915, ret); } -/** - * intel_set_cdclk - Push the CDCLK configuration to the hardware - * @dev_priv: i915 device - * @cdclk_config: new CDCLK configuration - * @pipe: pipe with which to synchronize the update - * - * Program the hardware based on the passed in CDCLK state, - * if necessary. - */ static void intel_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, - enum pipe pipe) + enum pipe pipe, const char *context) { struct intel_encoder *encoder; @@ -2389,7 +2445,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.funcs.cdclk->set_cdclk)) return; - intel_cdclk_dump_config(dev_priv, cdclk_config, "Changing CDCLK to"); + intel_cdclk_dump_config(dev_priv, cdclk_config, context); for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -2519,6 +2575,17 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state) update_cdclk, update_pipe_count); } +bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state) +{ + const struct intel_cdclk_state *old_cdclk_state = + intel_atomic_get_old_cdclk_state(state); + const struct intel_cdclk_state *new_cdclk_state = + intel_atomic_get_new_cdclk_state(state); + + return new_cdclk_state && !new_cdclk_state->disable_pipes && + new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk; +} + /** * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware * @state: intel atomic state @@ -2560,9 +2627,16 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) old_cdclk_state->actual.voltage_level); } + /* + * mbus joining will be changed later by + * intel_dbuf_mbus_{pre,post}_ddb_update() + */ + cdclk_config.joined_mbus = old_cdclk_state->actual.joined_mbus; + drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed); - intel_set_cdclk(i915, &cdclk_config, pipe); + intel_set_cdclk(i915, &cdclk_config, pipe, + "Pre changing CDCLK to"); } /** @@ -2597,7 +2671,8 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state) drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed); - intel_set_cdclk(i915, &new_cdclk_state->actual, pipe); + intel_set_cdclk(i915, &new_cdclk_state->actual, pipe, + "Post changing CDCLK to"); } static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state) @@ -2748,25 +2823,6 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) if (crtc_state->dsc.compression_enable) min_cdclk = max(min_cdclk, intel_vdsc_min_cdclk(crtc_state)); - /* - * HACK. Currently for TGL/DG2 platforms we calculate - * min_cdclk initially based on pixel_rate divided - * by 2, accounting for also plane requirements, - * however in some cases the lowest possible CDCLK - * doesn't work and causing the underruns. - * Explicitly stating here that this seems to be currently - * rather a Hack, than final solution. - */ - if (IS_TIGERLAKE(dev_priv) || IS_DG2(dev_priv)) { - /* - * Clamp to max_cdclk_freq in case pixel rate is higher, - * in order not to break an 8K, but still leave W/A at place. - */ - min_cdclk = max_t(int, min_cdclk, - min_t(int, crtc_state->pixel_rate, - dev_priv->display.cdclk.max_cdclk_freq)); - } - return min_cdclk; } @@ -2954,7 +3010,7 @@ static int skl_dpll0_vco(struct intel_cdclk_state *cdclk_state) vco = cdclk_state->logical.vco; if (!vco) - vco = dev_priv->skl_preferred_vco_freq; + vco = dev_priv->display.cdclk.skl_preferred_vco_freq; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { if (!crtc_state->hw.enable) @@ -3139,6 +3195,20 @@ int intel_cdclk_atomic_check(struct intel_atomic_state *state, return 0; } +int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus) +{ + struct intel_cdclk_state *cdclk_state; + + cdclk_state = intel_atomic_get_cdclk_state(state); + if (IS_ERR(cdclk_state)) + return PTR_ERR(cdclk_state); + + cdclk_state->actual.joined_mbus = joined_mbus; + cdclk_state->logical.joined_mbus = joined_mbus; + + return intel_atomic_lock_global_state(&cdclk_state->base); +} + int intel_cdclk_init(struct drm_i915_private *dev_priv) { struct intel_cdclk_state *cdclk_state; @@ -3247,7 +3317,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) drm_dbg_kms(&dev_priv->drm, "Can change cdclk cd2x divider with pipe %c active\n", pipe_name(pipe)); - } else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual, + } else if (intel_cdclk_clock_changed(&old_cdclk_state->actual, &new_cdclk_state->actual)) { /* All pipes must be switched off while we change the cdclk. */ ret = intel_modeset_all_pipes_late(state, "CDCLK change"); @@ -3260,6 +3330,15 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) "Modeset required for cdclk change\n"); } + if (intel_mdclk_cdclk_ratio(dev_priv, &old_cdclk_state->actual) != + intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual)) { + int ratio = intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual); + + ret = intel_dbuf_state_set_mdclk_cdclk_ratio(state, ratio); + if (ret) + return ret; + } + drm_dbg_kms(&dev_priv->drm, "New cdclk calculated to be logical %u kHz, actual %u kHz\n", new_cdclk_state->logical.cdclk, @@ -3317,7 +3396,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; int max_cdclk, vco; - vco = dev_priv->skl_preferred_vco_freq; + vco = dev_priv->display.cdclk.skl_preferred_vco_freq; drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000); /* @@ -3359,13 +3438,13 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) dev_priv->display.cdclk.max_cdclk_freq = dev_priv->display.cdclk.hw.cdclk; } - dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv); + dev_priv->display.cdclk.max_dotclk_freq = intel_compute_max_dotclk(dev_priv); drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n", dev_priv->display.cdclk.max_cdclk_freq); drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n", - dev_priv->max_dotclk_freq); + dev_priv->display.cdclk.max_dotclk_freq); } /** @@ -3539,7 +3618,7 @@ static int i915_cdclk_info_show(struct seq_file *m, void *unused) seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk); seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq); - seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq); + seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->display.cdclk.max_dotclk_freq); return 0; } @@ -3554,13 +3633,6 @@ void intel_cdclk_debugfs_register(struct drm_i915_private *i915) i915, &i915_cdclk_info_fops); } -static const struct intel_cdclk_funcs mtl_cdclk_funcs = { - .get_cdclk = bxt_get_cdclk, - .set_cdclk = bxt_set_cdclk, - .modeset_calc_cdclk = bxt_modeset_calc_cdclk, - .calc_voltage_level = rplu_calc_voltage_level, -}; - static const struct intel_cdclk_funcs rplu_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, @@ -3704,10 +3776,10 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = { void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { if (DISPLAY_VER(dev_priv) >= 20) { - dev_priv->display.funcs.cdclk = &mtl_cdclk_funcs; - dev_priv->display.cdclk.table = lnl_cdclk_table; + dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs; + dev_priv->display.cdclk.table = xe2lpd_cdclk_table; } else if (DISPLAY_VER(dev_priv) >= 14) { - dev_priv->display.funcs.cdclk = &mtl_cdclk_funcs; + dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs; dev_priv->display.cdclk.table = mtl_cdclk_table; } else if (IS_DG2(dev_priv)) { dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index 71bc032bfe..cfdcdec07a 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -18,6 +18,8 @@ struct intel_crtc_state; struct intel_cdclk_config { unsigned int cdclk, vco, ref, bypass; u8 voltage_level; + /* This field is only valid for Xe2LPD and above. */ + bool joined_mbus; }; struct intel_cdclk_state { @@ -63,8 +65,11 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv); void intel_update_max_cdclk(struct drm_i915_private *dev_priv); void intel_update_cdclk(struct drm_i915_private *dev_priv); u32 intel_read_rawclk(struct drm_i915_private *dev_priv); -bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a, +bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a, const struct intel_cdclk_config *b); +int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915, + const struct intel_cdclk_config *cdclk_config); +bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state); void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state); void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state); void intel_cdclk_dump_config(struct drm_i915_private *i915, @@ -75,10 +80,13 @@ void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config); int intel_cdclk_atomic_check(struct intel_atomic_state *state, bool *need_cdclk_calc); +int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus); struct intel_cdclk_state * intel_atomic_get_cdclk_state(struct intel_atomic_state *state); -#define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base) +#define to_intel_cdclk_state(global_state) \ + container_of_const((global_state), struct intel_cdclk_state, base) + #define intel_atomic_get_old_cdclk_state(state) \ to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj)) #define intel_atomic_get_new_cdclk_state(state) \ diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index ca7112b32c..d23163dc64 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -616,19 +616,19 @@ static void vlv_load_wgc_csc(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - intel_de_write_fw(dev_priv, PIPE_WGC_C01_C00(pipe), + intel_de_write_fw(dev_priv, PIPE_WGC_C01_C00(dev_priv, pipe), csc->coeff[1] << 16 | csc->coeff[0]); - intel_de_write_fw(dev_priv, PIPE_WGC_C02(pipe), + intel_de_write_fw(dev_priv, PIPE_WGC_C02(dev_priv, pipe), csc->coeff[2]); - intel_de_write_fw(dev_priv, PIPE_WGC_C11_C10(pipe), + intel_de_write_fw(dev_priv, PIPE_WGC_C11_C10(dev_priv, pipe), csc->coeff[4] << 16 | csc->coeff[3]); - intel_de_write_fw(dev_priv, PIPE_WGC_C12(pipe), + intel_de_write_fw(dev_priv, PIPE_WGC_C12(dev_priv, pipe), csc->coeff[5]); - intel_de_write_fw(dev_priv, PIPE_WGC_C21_C20(pipe), + intel_de_write_fw(dev_priv, PIPE_WGC_C21_C20(dev_priv, pipe), csc->coeff[7] << 16 | csc->coeff[6]); - intel_de_write_fw(dev_priv, PIPE_WGC_C22(pipe), + intel_de_write_fw(dev_priv, PIPE_WGC_C22(dev_priv, pipe), csc->coeff[8]); } @@ -639,25 +639,25 @@ static void vlv_read_wgc_csc(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; u32 tmp; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C01_C00(pipe)); + tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C01_C00(dev_priv, pipe)); csc->coeff[0] = tmp & 0xffff; csc->coeff[1] = tmp >> 16; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C02(pipe)); + tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C02(dev_priv, pipe)); csc->coeff[2] = tmp & 0xffff; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C11_C10(pipe)); + tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C11_C10(dev_priv, pipe)); csc->coeff[3] = tmp & 0xffff; csc->coeff[4] = tmp >> 16; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C12(pipe)); + tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C12(dev_priv, pipe)); csc->coeff[5] = tmp & 0xffff; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C21_C20(pipe)); + tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C21_C20(dev_priv, pipe)); csc->coeff[6] = tmp & 0xffff; csc->coeff[7] = tmp >> 16; - tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C22(pipe)); + tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C22(dev_priv, pipe)); csc->coeff[8] = tmp & 0xffff; } @@ -1227,7 +1227,7 @@ static void i9xx_load_lut_8(struct intel_crtc *crtc, lut = blob->data; for (i = 0; i < 256; i++) - intel_de_write_fw(dev_priv, PALETTE(pipe, i), + intel_de_write_fw(dev_priv, PALETTE(dev_priv, pipe, i), i9xx_lut_8(&lut[i])); } @@ -1240,9 +1240,11 @@ static void i9xx_load_lut_10(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size - 1; i++) { - intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0), + intel_de_write_fw(dev_priv, + PALETTE(dev_priv, pipe, 2 * i + 0), i9xx_lut_10_ldw(&lut[i])); - intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1), + intel_de_write_fw(dev_priv, + PALETTE(dev_priv, pipe, 2 * i + 1), i9xx_lut_10_udw(&lut[i])); } } @@ -1274,9 +1276,11 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size - 1; i++) { - intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0), + intel_de_write_fw(dev_priv, + PALETTE(dev_priv, pipe, 2 * i + 0), i965_lut_10p6_ldw(&lut[i])); - intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1), + intel_de_write_fw(dev_priv, + PALETTE(dev_priv, pipe, 2 * i + 1), i965_lut_10p6_udw(&lut[i])); } @@ -3150,7 +3154,8 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < LEGACY_LUT_LENGTH; i++) { - u32 val = intel_de_read_fw(dev_priv, PALETTE(pipe, i)); + u32 val = intel_de_read_fw(dev_priv, + PALETTE(dev_priv, pipe, i)); i9xx_lut_8_pack(&lut[i], val); } @@ -3176,8 +3181,10 @@ static struct drm_property_blob *i9xx_read_lut_10(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size - 1; i++) { - ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0)); - udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1)); + ldw = intel_de_read_fw(dev_priv, + PALETTE(dev_priv, pipe, 2 * i + 0)); + udw = intel_de_read_fw(dev_priv, + PALETTE(dev_priv, pipe, 2 * i + 1)); i9xx_lut_10_pack(&lut[i], ldw, udw); } @@ -3224,8 +3231,10 @@ static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size - 1; i++) { - u32 ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0)); - u32 udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1)); + u32 ldw = intel_de_read_fw(dev_priv, + PALETTE(dev_priv, pipe, 2 * i + 0)); + u32 udw = intel_de_read_fw(dev_priv, + PALETTE(dev_priv, pipe, 2 * i + 1)); i965_lut_10p6_pack(&lut[i], ldw, udw); } diff --git a/drivers/gpu/drm/i915/display/intel_color_regs.h b/drivers/gpu/drm/i915/display/intel_color_regs.h index 9f4ae58f3e..bb99ea5338 100644 --- a/drivers/gpu/drm/i915/display/intel_color_regs.h +++ b/drivers/gpu/drm/i915/display/intel_color_regs.h @@ -8,7 +8,35 @@ #include "intel_display_reg_defs.h" -/* legacy palette */ +/* GMCH palette */ +#define _PALETTE_A 0xa000 +#define _PALETTE_B 0xa800 +#define _CHV_PALETTE_C 0xc000 +/* 8bit mode / i965+ 10.6 interpolated mode ldw/udw */ +#define PALETTE_RED_MASK REG_GENMASK(23, 16) +#define PALETTE_GREEN_MASK REG_GENMASK(15, 8) +#define PALETTE_BLUE_MASK REG_GENMASK(7, 0) +/* pre-i965 10bit interpolated mode ldw */ +#define PALETTE_10BIT_RED_LDW_MASK REG_GENMASK(23, 16) +#define PALETTE_10BIT_GREEN_LDW_MASK REG_GENMASK(15, 8) +#define PALETTE_10BIT_BLUE_LDW_MASK REG_GENMASK(7, 0) +/* pre-i965 10bit interpolated mode udw */ +#define PALETTE_10BIT_RED_EXP_MASK REG_GENMASK(23, 22) +#define PALETTE_10BIT_RED_MANT_MASK REG_GENMASK(21, 18) +#define PALETTE_10BIT_RED_UDW_MASK REG_GENMASK(17, 16) +#define PALETTE_10BIT_GREEN_EXP_MASK REG_GENMASK(15, 14) +#define PALETTE_10BIT_GREEN_MANT_MASK REG_GENMASK(13, 10) +#define PALETTE_10BIT_GREEN_UDW_MASK REG_GENMASK(9, 8) +#define PALETTE_10BIT_BLUE_EXP_MASK REG_GENMASK(7, 6) +#define PALETTE_10BIT_BLUE_MANT_MASK REG_GENMASK(5, 2) +#define PALETTE_10BIT_BLUE_UDW_MASK REG_GENMASK(1, 0) +#define PALETTE(dev_priv, pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \ + _PICK_EVEN_2RANGES(pipe, 2, \ + _PALETTE_A, _PALETTE_B, \ + _CHV_PALETTE_C, _CHV_PALETTE_C) + \ + (i) * 4) + +/* ilk+ palette */ #define _LGC_PALETTE_A 0x4a000 #define _LGC_PALETTE_B 0x4a800 /* see PALETTE_* for the bits */ @@ -228,12 +256,12 @@ #define _PIPE_A_WGC_C21_C20 0x600C0 /* s2.10 */ #define _PIPE_A_WGC_C22 0x600C4 /* s2.10 */ -#define PIPE_WGC_C01_C00(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C01_C00) -#define PIPE_WGC_C02(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C02) -#define PIPE_WGC_C11_C10(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C11_C10) -#define PIPE_WGC_C12(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C12) -#define PIPE_WGC_C21_C20(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C21_C20) -#define PIPE_WGC_C22(pipe) _MMIO_TRANS2(pipe, _PIPE_A_WGC_C22) +#define PIPE_WGC_C01_C00(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C01_C00) +#define PIPE_WGC_C02(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C02) +#define PIPE_WGC_C11_C10(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C11_C10) +#define PIPE_WGC_C12(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C12) +#define PIPE_WGC_C21_C20(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C21_C20) +#define PIPE_WGC_C22(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_A_WGC_C22) /* pipe CSC & degamma/gamma LUTs on CHV */ #define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900) diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h index b0983edccf..0964e392d0 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h +++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h @@ -25,28 +25,26 @@ 4 * (dw)) #define ICL_PORT_CL_DW5(phy) _MMIO(_ICL_PORT_CL_DW(5, phy)) -#define CL_POWER_DOWN_ENABLE (1 << 4) -#define SUS_CLOCK_CONFIG (3 << 0) +#define CL_POWER_DOWN_ENABLE REG_BIT(4) +#define SUS_CLOCK_CONFIG REG_GENMASK(1, 0) #define ICL_PORT_CL_DW10(phy) _MMIO(_ICL_PORT_CL_DW(10, phy)) -#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25) -#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25 -#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24) -#define PWR_UP_ALL_LANES (0x0 << 4) -#define PWR_DOWN_LN_3_2_1 (0xe << 4) -#define PWR_DOWN_LN_3_2 (0xc << 4) -#define PWR_DOWN_LN_3 (0x8 << 4) -#define PWR_DOWN_LN_2_1_0 (0x7 << 4) -#define PWR_DOWN_LN_1_0 (0x3 << 4) -#define PWR_DOWN_LN_3_1 (0xa << 4) -#define PWR_DOWN_LN_3_1_0 (0xb << 4) -#define PWR_DOWN_LN_MASK (0xf << 4) -#define PWR_DOWN_LN_SHIFT 4 -#define EDP4K2K_MODE_OVRD_EN (1 << 3) -#define EDP4K2K_MODE_OVRD_OPTIMIZED (1 << 2) +#define PG_SEQ_DELAY_OVERRIDE_MASK REG_GENMASK(26, 25) +#define PG_SEQ_DELAY_OVERRIDE_ENABLE REG_BIT(24) +#define PWR_DOWN_LN_MASK REG_GENMASK(7, 4) +#define PWR_UP_ALL_LANES REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x0) +#define PWR_DOWN_LN_3_2_1 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xe) +#define PWR_DOWN_LN_3_2 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xc) +#define PWR_DOWN_LN_3 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x8) +#define PWR_DOWN_LN_2_1_0 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x7) +#define PWR_DOWN_LN_1_0 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x3) +#define PWR_DOWN_LN_3_1 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xa) +#define PWR_DOWN_LN_3_1_0 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xb) +#define EDP4K2K_MODE_OVRD_EN REG_BIT(3) +#define EDP4K2K_MODE_OVRD_OPTIMIZED REG_BIT(2) #define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy)) -#define ICL_LANE_ENABLE_AUX (1 << 0) +#define ICL_LANE_ENABLE_AUX REG_BIT(0) /* ICL Port COMP_DW registers */ #define _ICL_PORT_COMP 0x100 @@ -54,24 +52,22 @@ _ICL_PORT_COMP + 4 * (dw)) #define ICL_PORT_COMP_DW0(phy) _MMIO(_ICL_PORT_COMP_DW(0, phy)) -#define COMP_INIT (1 << 31) +#define COMP_INIT REG_BIT(31) #define ICL_PORT_COMP_DW1(phy) _MMIO(_ICL_PORT_COMP_DW(1, phy)) #define ICL_PORT_COMP_DW3(phy) _MMIO(_ICL_PORT_COMP_DW(3, phy)) -#define PROCESS_INFO_DOT_0 (0 << 26) -#define PROCESS_INFO_DOT_1 (1 << 26) -#define PROCESS_INFO_DOT_4 (2 << 26) -#define PROCESS_INFO_MASK (7 << 26) -#define PROCESS_INFO_SHIFT 26 -#define VOLTAGE_INFO_0_85V (0 << 24) -#define VOLTAGE_INFO_0_95V (1 << 24) -#define VOLTAGE_INFO_1_05V (2 << 24) -#define VOLTAGE_INFO_MASK (3 << 24) -#define VOLTAGE_INFO_SHIFT 24 +#define PROCESS_INFO_MASK REG_GENMASK(28, 26) +#define PROCESS_INFO_DOT_0 REG_FIELD_PREP(PROCESS_INFO_MASK, 0) +#define PROCESS_INFO_DOT_1 REG_FIELD_PREP(PROCESS_INFO_MASK, 1) +#define PROCESS_INFO_DOT_4 REG_FIELD_PREP(PROCESS_INFO_MASK, 2) +#define VOLTAGE_INFO_MASK REG_GENMASK(25, 24) +#define VOLTAGE_INFO_0_85V REG_FIELD_PREP(VOLTAGE_INFO_MASK, 0) +#define VOLTAGE_INFO_0_95V REG_FIELD_PREP(VOLTAGE_INFO_MASK, 1) +#define VOLTAGE_INFO_1_05V REG_FIELD_PREP(VOLTAGE_INFO_MASK, 2) #define ICL_PORT_COMP_DW8(phy) _MMIO(_ICL_PORT_COMP_DW(8, phy)) -#define IREFGEN (1 << 24) +#define IREFGEN REG_BIT(24) #define ICL_PORT_COMP_DW9(phy) _MMIO(_ICL_PORT_COMP_DW(9, phy)) @@ -92,9 +88,9 @@ #define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy)) #define DCC_MODE_SELECT_MASK REG_GENMASK(21, 20) #define RUN_DCC_ONCE REG_FIELD_PREP(DCC_MODE_SELECT_MASK, 0) -#define COMMON_KEEPER_EN (1 << 26) -#define LATENCY_OPTIM_MASK (0x3 << 2) -#define LATENCY_OPTIM_VAL(x) ((x) << 2) +#define COMMON_KEEPER_EN REG_BIT(26) +#define LATENCY_OPTIM_MASK REG_GENMASK(3, 2) +#define LATENCY_OPTIM_VAL(x) REG_FIELD_PREP(LATENCY_OPTIM_MASK, (x)) /* ICL Port TX registers */ #define _ICL_PORT_TX_AUX 0x380 @@ -111,42 +107,49 @@ #define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy)) #define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy)) #define ICL_PORT_TX_DW2_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(2, ln, phy)) -#define SWING_SEL_UPPER(x) (((x) >> 3) << 15) -#define SWING_SEL_UPPER_MASK (1 << 15) -#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) -#define SWING_SEL_LOWER_MASK (0x7 << 11) -#define FRC_LATENCY_OPTIM_MASK (0x7 << 8) -#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8) -#define RCOMP_SCALAR(x) ((x) << 0) -#define RCOMP_SCALAR_MASK (0xFF << 0) +#define SWING_SEL_UPPER_MASK REG_BIT(15) +#define SWING_SEL_UPPER(x) REG_FIELD_PREP(SWING_SEL_UPPER_MASK, (x) >> 3) +#define SWING_SEL_LOWER_MASK REG_GENMASK(13, 11) +#define SWING_SEL_LOWER(x) REG_FIELD_PREP(SWING_SEL_LOWER_MASK, (x) & 0x7) +#define FRC_LATENCY_OPTIM_MASK REG_GENMASK(10, 8) +#define FRC_LATENCY_OPTIM_VAL(x) REG_FIELD_PREP(FRC_LATENCY_OPTIM_MASK, (x)) +#define RCOMP_SCALAR_MASK REG_GENMASK(7, 0) +#define RCOMP_SCALAR(x) REG_FIELD_PREP(RCOMP_SCALAR_MASK, (x)) #define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy)) #define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy)) #define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy)) -#define LOADGEN_SELECT (1 << 31) -#define POST_CURSOR_1(x) ((x) << 12) -#define POST_CURSOR_1_MASK (0x3F << 12) -#define POST_CURSOR_2(x) ((x) << 6) -#define POST_CURSOR_2_MASK (0x3F << 6) -#define CURSOR_COEFF(x) ((x) << 0) -#define CURSOR_COEFF_MASK (0x3F << 0) +#define LOADGEN_SELECT REG_BIT(31) +#define POST_CURSOR_1_MASK REG_GENMASK(17, 12) +#define POST_CURSOR_1(x) REG_FIELD_PREP(POST_CURSOR_1_MASK, (x)) +#define POST_CURSOR_2_MASK REG_GENMASK(11, 6) +#define POST_CURSOR_2(x) REG_FIELD_PREP(POST_CURSOR_2_MASK, (x)) +#define CURSOR_COEFF_MASK REG_GENMASK(5, 0) +#define CURSOR_COEFF(x) REG_FIELD_PREP(CURSOR_COEFF_MASK, (x)) #define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy)) #define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy)) #define ICL_PORT_TX_DW5_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(5, ln, phy)) -#define TX_TRAINING_EN (1 << 31) -#define TAP2_DISABLE (1 << 30) -#define TAP3_DISABLE (1 << 29) -#define SCALING_MODE_SEL(x) ((x) << 18) -#define SCALING_MODE_SEL_MASK (0x7 << 18) -#define RTERM_SELECT(x) ((x) << 3) -#define RTERM_SELECT_MASK (0x7 << 3) +#define TX_TRAINING_EN REG_BIT(31) +#define TAP2_DISABLE REG_BIT(30) +#define TAP3_DISABLE REG_BIT(29) +#define SCALING_MODE_SEL_MASK REG_GENMASK(20, 18) +#define SCALING_MODE_SEL(x) REG_FIELD_PREP(SCALING_MODE_SEL_MASK, (x)) +#define RTERM_SELECT_MASK REG_GENMASK(5, 3) +#define RTERM_SELECT(x) REG_FIELD_PREP(RTERM_SELECT_MASK, (x)) + +#define ICL_PORT_TX_DW6_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(6, phy)) +#define ICL_PORT_TX_DW6_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(6, phy)) +#define ICL_PORT_TX_DW6_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(6, ln, phy)) +#define O_FUNC_OVRD_EN REG_BIT(7) +#define O_LDO_REF_SEL_CRI REG_GENMASK(6, 1) +#define O_LDO_BYPASS_CRI REG_BIT(0) #define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy)) #define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy)) #define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy)) -#define N_SCALAR(x) ((x) << 24) -#define N_SCALAR_MASK (0x7F << 24) +#define N_SCALAR_MASK REG_GENMASK(30, 24) +#define N_SCALAR(x) REG_FIELD_PREP(N_SCALAR_MASK, (x)) #define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy)) #define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy)) diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 93479db0f8..10e95dc425 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -348,7 +348,7 @@ intel_crt_mode_valid(struct drm_connector *connector, { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); - int max_dotclk = dev_priv->max_dotclk_freq; + int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq; enum drm_mode_status status; int max_clock; @@ -356,9 +356,6 @@ intel_crt_mode_valid(struct drm_connector *connector, if (status != MODE_OK) return status; - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) - return MODE_NO_DBLESCAN; - if (mode->clock < 25000) return MODE_CLOCK_LOW; diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c index 4bcf446c75..bddcc9edea 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c @@ -12,33 +12,31 @@ #include "intel_hdmi.h" #include "intel_vrr.h" -static void intel_dump_crtc_timings(struct drm_i915_private *i915, +static void intel_dump_crtc_timings(struct drm_printer *p, const struct drm_display_mode *mode) { - drm_dbg_kms(&i915->drm, "crtc timings: clock=%d, " - "hd=%d hb=%d-%d hs=%d-%d ht=%d, " - "vd=%d vb=%d-%d vs=%d-%d vt=%d, " - "flags=0x%x\n", - mode->crtc_clock, - mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end, - mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal, - mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end, - mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal, - mode->flags); + drm_printf(p, "crtc timings: clock=%d, " + "hd=%d hb=%d-%d hs=%d-%d ht=%d, " + "vd=%d vb=%d-%d vs=%d-%d vt=%d, " + "flags=0x%x\n", + mode->crtc_clock, + mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end, + mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal, + mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end, + mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal, + mode->flags); } static void -intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, +intel_dump_m_n_config(struct drm_printer *p, + const struct intel_crtc_state *pipe_config, const char *id, unsigned int lane_count, const struct intel_link_m_n *m_n) { - struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); - - drm_dbg_kms(&i915->drm, - "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n", - id, lane_count, - m_n->data_m, m_n->data_n, - m_n->link_m, m_n->link_n, m_n->tu); + drm_printf(p, "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n", + id, lane_count, + m_n->data_m, m_n->data_n, + m_n->link_m, m_n->link_n, m_n->tu); } static void @@ -52,17 +50,7 @@ intel_dump_infoframe(struct drm_i915_private *i915, } static void -intel_dump_dp_vsc_sdp(struct drm_i915_private *i915, - const struct drm_dp_vsc_sdp *vsc) -{ - struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL); - - drm_dp_vsc_sdp_log(&p, vsc); -} - -static void -intel_dump_buffer(struct drm_i915_private *i915, - const char *prefix, const u8 *buf, size_t len) +intel_dump_buffer(const char *prefix, const u8 *buf, size_t len) { if (!drm_debug_enabled(DRM_UT_KMS)) return; @@ -130,71 +118,66 @@ const char *intel_output_format_name(enum intel_output_format format) return output_format_str[format]; } -static void intel_dump_plane_state(const struct intel_plane_state *plane_state) +static void intel_dump_plane_state(struct drm_printer *p, + const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *i915 = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; if (!fb) { - drm_dbg_kms(&i915->drm, - "[PLANE:%d:%s] fb: [NOFB], visible: %s\n", - plane->base.base.id, plane->base.name, - str_yes_no(plane_state->uapi.visible)); + drm_printf(p, "[PLANE:%d:%s] fb: [NOFB], visible: %s\n", + plane->base.base.id, plane->base.name, + str_yes_no(plane_state->uapi.visible)); return; } - drm_dbg_kms(&i915->drm, - "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n", - plane->base.base.id, plane->base.name, - fb->base.id, fb->width, fb->height, &fb->format->format, - fb->modifier, str_yes_no(plane_state->uapi.visible)); - drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n", - plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter); + drm_printf(p, "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n", + plane->base.base.id, plane->base.name, + fb->base.id, fb->width, fb->height, &fb->format->format, + fb->modifier, str_yes_no(plane_state->uapi.visible)); + drm_printf(p, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n", + plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter); if (plane_state->uapi.visible) - drm_dbg_kms(&i915->drm, - "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", - DRM_RECT_FP_ARG(&plane_state->uapi.src), - DRM_RECT_ARG(&plane_state->uapi.dst)); + drm_printf(p, "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", + DRM_RECT_FP_ARG(&plane_state->uapi.src), + DRM_RECT_ARG(&plane_state->uapi.dst)); } static void -ilk_dump_csc(struct drm_i915_private *i915, const char *name, +ilk_dump_csc(struct drm_i915_private *i915, + struct drm_printer *p, + const char *name, const struct intel_csc_matrix *csc) { int i; - drm_dbg_kms(&i915->drm, - "%s: pre offsets: 0x%04x 0x%04x 0x%04x\n", name, - csc->preoff[0], csc->preoff[1], csc->preoff[2]); + drm_printf(p, "%s: pre offsets: 0x%04x 0x%04x 0x%04x\n", name, + csc->preoff[0], csc->preoff[1], csc->preoff[2]); for (i = 0; i < 3; i++) - drm_dbg_kms(&i915->drm, - "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name, - csc->coeff[3 * i + 0], - csc->coeff[3 * i + 1], - csc->coeff[3 * i + 2]); + drm_printf(p, "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name, + csc->coeff[3 * i + 0], + csc->coeff[3 * i + 1], + csc->coeff[3 * i + 2]); if (DISPLAY_VER(i915) < 7) return; - drm_dbg_kms(&i915->drm, - "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name, - csc->postoff[0], csc->postoff[1], csc->postoff[2]); + drm_printf(p, "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name, + csc->postoff[0], csc->postoff[1], csc->postoff[2]); } static void -vlv_dump_csc(struct drm_i915_private *i915, const char *name, +vlv_dump_csc(struct drm_printer *p, const char *name, const struct intel_csc_matrix *csc) { int i; for (i = 0; i < 3; i++) - drm_dbg_kms(&i915->drm, - "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name, - csc->coeff[3 * i + 0], - csc->coeff[3 * i + 1], - csc->coeff[3 * i + 2]); + drm_printf(p, "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name, + csc->coeff[3 * i + 0], + csc->coeff[3 * i + 1], + csc->coeff[3 * i + 2]); } void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, @@ -205,85 +188,87 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct intel_plane_state *plane_state; struct intel_plane *plane; + struct drm_printer p; char buf[64]; int i; - drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] enable: %s [%s]\n", - crtc->base.base.id, crtc->base.name, - str_yes_no(pipe_config->hw.enable), context); + if (!drm_debug_enabled(DRM_UT_KMS)) + return; + + p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL); + + drm_printf(&p, "[CRTC:%d:%s] enable: %s [%s]\n", + crtc->base.base.id, crtc->base.name, + str_yes_no(pipe_config->hw.enable), context); if (!pipe_config->hw.enable) goto dump_planes; snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); - drm_dbg_kms(&i915->drm, - "active: %s, output_types: %s (0x%x), output format: %s, sink format: %s\n", - str_yes_no(pipe_config->hw.active), - buf, pipe_config->output_types, - intel_output_format_name(pipe_config->output_format), - intel_output_format_name(pipe_config->sink_format)); - - drm_dbg_kms(&i915->drm, - "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", - transcoder_name(pipe_config->cpu_transcoder), - pipe_config->pipe_bpp, pipe_config->dither); - - drm_dbg_kms(&i915->drm, "MST master transcoder: %s\n", - transcoder_name(pipe_config->mst_master_transcoder)); - - drm_dbg_kms(&i915->drm, - "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n", - transcoder_name(pipe_config->master_transcoder), - pipe_config->sync_mode_slaves_mask); - - drm_dbg_kms(&i915->drm, "bigjoiner: %s, pipes: 0x%x\n", - intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" : - intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no", - pipe_config->bigjoiner_pipes); - - drm_dbg_kms(&i915->drm, "splitter: %s, link count %d, overlap %d\n", - str_enabled_disabled(pipe_config->splitter.enable), - pipe_config->splitter.link_count, - pipe_config->splitter.pixel_overlap); + drm_printf(&p, "active: %s, output_types: %s (0x%x), output format: %s, sink format: %s\n", + str_yes_no(pipe_config->hw.active), + buf, pipe_config->output_types, + intel_output_format_name(pipe_config->output_format), + intel_output_format_name(pipe_config->sink_format)); + + drm_printf(&p, "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", + transcoder_name(pipe_config->cpu_transcoder), + pipe_config->pipe_bpp, pipe_config->dither); + + drm_printf(&p, "MST master transcoder: %s\n", + transcoder_name(pipe_config->mst_master_transcoder)); + + drm_printf(&p, "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n", + transcoder_name(pipe_config->master_transcoder), + pipe_config->sync_mode_slaves_mask); + + drm_printf(&p, "bigjoiner: %s, pipes: 0x%x\n", + intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" : + intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no", + pipe_config->bigjoiner_pipes); + + drm_printf(&p, "splitter: %s, link count %d, overlap %d\n", + str_enabled_disabled(pipe_config->splitter.enable), + pipe_config->splitter.link_count, + pipe_config->splitter.pixel_overlap); if (pipe_config->has_pch_encoder) - intel_dump_m_n_config(pipe_config, "fdi", + intel_dump_m_n_config(&p, pipe_config, "fdi", pipe_config->fdi_lanes, &pipe_config->fdi_m_n); if (intel_crtc_has_dp_encoder(pipe_config)) { - intel_dump_m_n_config(pipe_config, "dp m_n", + intel_dump_m_n_config(&p, pipe_config, "dp m_n", pipe_config->lane_count, &pipe_config->dp_m_n); - intel_dump_m_n_config(pipe_config, "dp m2_n2", + intel_dump_m_n_config(&p, pipe_config, "dp m2_n2", pipe_config->lane_count, &pipe_config->dp_m2_n2); - drm_dbg_kms(&i915->drm, "fec: %s, enhanced framing: %s\n", - str_enabled_disabled(pipe_config->fec_enable), - str_enabled_disabled(pipe_config->enhanced_framing)); - - drm_dbg_kms(&i915->drm, "sdp split: %s\n", - str_enabled_disabled(pipe_config->sdp_split_enable)); - - drm_dbg_kms(&i915->drm, "psr: %s, psr2: %s, panel replay: %s, selective fetch: %s\n", - str_enabled_disabled(pipe_config->has_psr), - str_enabled_disabled(pipe_config->has_psr2), - str_enabled_disabled(pipe_config->has_panel_replay), - str_enabled_disabled(pipe_config->enable_psr2_sel_fetch)); + drm_printf(&p, "fec: %s, enhanced framing: %s\n", + str_enabled_disabled(pipe_config->fec_enable), + str_enabled_disabled(pipe_config->enhanced_framing)); + + drm_printf(&p, "sdp split: %s\n", + str_enabled_disabled(pipe_config->sdp_split_enable)); + + drm_printf(&p, "psr: %s, selective update: %s, panel replay: %s, selective fetch: %s\n", + str_enabled_disabled(pipe_config->has_psr && + !pipe_config->has_panel_replay), + str_enabled_disabled(pipe_config->has_sel_update), + str_enabled_disabled(pipe_config->has_panel_replay), + str_enabled_disabled(pipe_config->enable_psr2_sel_fetch)); } - drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n", - pipe_config->framestart_delay, pipe_config->msa_timing_delay); + drm_printf(&p, "framestart delay: %d, MSA timing delay: %d\n", + pipe_config->framestart_delay, pipe_config->msa_timing_delay); - drm_dbg_kms(&i915->drm, - "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", - pipe_config->has_audio, pipe_config->has_infoframe, - pipe_config->infoframes.enable); + drm_printf(&p, "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", + pipe_config->has_audio, pipe_config->has_infoframe, + pipe_config->infoframes.enable); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) - drm_dbg_kms(&i915->drm, "GCP: 0x%x\n", - pipe_config->infoframes.gcp); + drm_printf(&p, "GCP: 0x%x\n", pipe_config->infoframes.gcp); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) intel_dump_infoframe(i915, &pipe_config->infoframes.avi); @@ -301,91 +286,88 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, intel_dump_infoframe(i915, &pipe_config->infoframes.drm); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(DP_SDP_VSC)) - intel_dump_dp_vsc_sdp(i915, &pipe_config->infoframes.vsc); + drm_dp_vsc_sdp_log(&p, &pipe_config->infoframes.vsc); + if (pipe_config->infoframes.enable & + intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC)) + drm_dp_as_sdp_log(&p, &pipe_config->infoframes.as_sdp); if (pipe_config->has_audio) - intel_dump_buffer(i915, "ELD: ", pipe_config->eld, + intel_dump_buffer("ELD: ", pipe_config->eld, drm_eld_size(pipe_config->eld)); - drm_dbg_kms(&i915->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n", - str_yes_no(pipe_config->vrr.enable), - pipe_config->vrr.vmin, pipe_config->vrr.vmax, - pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband, - pipe_config->vrr.flipline, - intel_vrr_vmin_vblank_start(pipe_config), - intel_vrr_vmax_vblank_start(pipe_config)); - - drm_dbg_kms(&i915->drm, "requested mode: " DRM_MODE_FMT "\n", - DRM_MODE_ARG(&pipe_config->hw.mode)); - drm_dbg_kms(&i915->drm, "adjusted mode: " DRM_MODE_FMT "\n", - DRM_MODE_ARG(&pipe_config->hw.adjusted_mode)); - intel_dump_crtc_timings(i915, &pipe_config->hw.adjusted_mode); - drm_dbg_kms(&i915->drm, "pipe mode: " DRM_MODE_FMT "\n", - DRM_MODE_ARG(&pipe_config->hw.pipe_mode)); - intel_dump_crtc_timings(i915, &pipe_config->hw.pipe_mode); - drm_dbg_kms(&i915->drm, - "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n", - pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src), - pipe_config->pixel_rate); - - drm_dbg_kms(&i915->drm, "linetime: %d, ips linetime: %d\n", - pipe_config->linetime, pipe_config->ips_linetime); + drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n", + str_yes_no(pipe_config->vrr.enable), + pipe_config->vrr.vmin, pipe_config->vrr.vmax, + pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband, + pipe_config->vrr.flipline, + intel_vrr_vmin_vblank_start(pipe_config), + intel_vrr_vmax_vblank_start(pipe_config)); + + drm_printf(&p, "requested mode: " DRM_MODE_FMT "\n", + DRM_MODE_ARG(&pipe_config->hw.mode)); + drm_printf(&p, "adjusted mode: " DRM_MODE_FMT "\n", + DRM_MODE_ARG(&pipe_config->hw.adjusted_mode)); + intel_dump_crtc_timings(&p, &pipe_config->hw.adjusted_mode); + drm_printf(&p, "pipe mode: " DRM_MODE_FMT "\n", + DRM_MODE_ARG(&pipe_config->hw.pipe_mode)); + intel_dump_crtc_timings(&p, &pipe_config->hw.pipe_mode); + drm_printf(&p, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n", + pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src), + pipe_config->pixel_rate); + + drm_printf(&p, "linetime: %d, ips linetime: %d\n", + pipe_config->linetime, pipe_config->ips_linetime); if (DISPLAY_VER(i915) >= 9) - drm_dbg_kms(&i915->drm, - "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n", - crtc->num_scalers, - pipe_config->scaler_state.scaler_users, - pipe_config->scaler_state.scaler_id, - pipe_config->hw.scaling_filter); + drm_printf(&p, "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n", + crtc->num_scalers, + pipe_config->scaler_state.scaler_users, + pipe_config->scaler_state.scaler_id, + pipe_config->hw.scaling_filter); if (HAS_GMCH(i915)) - drm_dbg_kms(&i915->drm, - "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", - pipe_config->gmch_pfit.control, - pipe_config->gmch_pfit.pgm_ratios, - pipe_config->gmch_pfit.lvds_border_bits); + drm_printf(&p, "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", + pipe_config->gmch_pfit.control, + pipe_config->gmch_pfit.pgm_ratios, + pipe_config->gmch_pfit.lvds_border_bits); else - drm_dbg_kms(&i915->drm, - "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n", - DRM_RECT_ARG(&pipe_config->pch_pfit.dst), - str_enabled_disabled(pipe_config->pch_pfit.enabled), - str_yes_no(pipe_config->pch_pfit.force_thru)); + drm_printf(&p, "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n", + DRM_RECT_ARG(&pipe_config->pch_pfit.dst), + str_enabled_disabled(pipe_config->pch_pfit.enabled), + str_yes_no(pipe_config->pch_pfit.force_thru)); - drm_dbg_kms(&i915->drm, "ips: %i, double wide: %i, drrs: %i\n", - pipe_config->ips_enabled, pipe_config->double_wide, - pipe_config->has_drrs); + drm_printf(&p, "ips: %i, double wide: %i, drrs: %i\n", + pipe_config->ips_enabled, pipe_config->double_wide, + pipe_config->has_drrs); - intel_dpll_dump_hw_state(i915, &pipe_config->dpll_hw_state); + intel_dpll_dump_hw_state(i915, &p, &pipe_config->dpll_hw_state); if (IS_CHERRYVIEW(i915)) - drm_dbg_kms(&i915->drm, - "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", - pipe_config->cgm_mode, pipe_config->gamma_mode, - pipe_config->gamma_enable, pipe_config->csc_enable); + drm_printf(&p, "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", + pipe_config->cgm_mode, pipe_config->gamma_mode, + pipe_config->gamma_enable, pipe_config->csc_enable); else - drm_dbg_kms(&i915->drm, - "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", - pipe_config->csc_mode, pipe_config->gamma_mode, - pipe_config->gamma_enable, pipe_config->csc_enable); - - drm_dbg_kms(&i915->drm, "pre csc lut: %s%d entries, post csc lut: %d entries\n", - pipe_config->pre_csc_lut && pipe_config->pre_csc_lut == - i915->display.color.glk_linear_degamma_lut ? "(linear) " : "", - pipe_config->pre_csc_lut ? - drm_color_lut_size(pipe_config->pre_csc_lut) : 0, - pipe_config->post_csc_lut ? - drm_color_lut_size(pipe_config->post_csc_lut) : 0); + drm_printf(&p, "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", + pipe_config->csc_mode, pipe_config->gamma_mode, + pipe_config->gamma_enable, pipe_config->csc_enable); + + drm_printf(&p, "pre csc lut: %s%d entries, post csc lut: %d entries\n", + pipe_config->pre_csc_lut && pipe_config->pre_csc_lut == + i915->display.color.glk_linear_degamma_lut ? "(linear) " : "", + pipe_config->pre_csc_lut ? + drm_color_lut_size(pipe_config->pre_csc_lut) : 0, + pipe_config->post_csc_lut ? + drm_color_lut_size(pipe_config->post_csc_lut) : 0); if (DISPLAY_VER(i915) >= 11) - ilk_dump_csc(i915, "output csc", &pipe_config->output_csc); + ilk_dump_csc(i915, &p, "output csc", &pipe_config->output_csc); if (!HAS_GMCH(i915)) - ilk_dump_csc(i915, "pipe csc", &pipe_config->csc); + ilk_dump_csc(i915, &p, "pipe csc", &pipe_config->csc); else if (IS_CHERRYVIEW(i915)) - vlv_dump_csc(i915, "cgm csc", &pipe_config->csc); + vlv_dump_csc(&p, "cgm csc", &pipe_config->csc); else if (IS_VALLEYVIEW(i915)) - vlv_dump_csc(i915, "wgc csc", &pipe_config->csc); + vlv_dump_csc(&p, "wgc csc", &pipe_config->csc); dump_planes: if (!state) @@ -393,6 +375,6 @@ dump_planes: for_each_new_intel_plane_in_state(state, plane, plane_state, i) { if (plane->pipe == crtc->pipe) - intel_dump_plane_state(plane_state); + intel_dump_plane_state(&p, plane_state); } } diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index 0d3da55e1c..23a122ee20 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -509,6 +509,24 @@ static void i9xx_cursor_disable_sel_fetch_arm(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0); } +static void wa_16021440873(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + u32 ctl = plane_state->ctl; + int et_y_position = drm_rect_height(&crtc_state->pipe_src) + 1; + enum pipe pipe = plane->pipe; + + ctl &= ~MCURSOR_MODE_MASK; + ctl |= MCURSOR_MODE_64_2B; + + intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), ctl); + + intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe), + PIPESRC_HEIGHT(et_y_position)); +} + static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) @@ -529,7 +547,11 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), plane_state->ctl); } else { - i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state); + /* Wa_16021440873 */ + if (crtc_state->enable_psr2_su_region_et) + wa_16021440873(plane, crtc_state, plane_state); + else + i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state); } } @@ -821,6 +843,28 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = { .format_mod_supported = intel_cursor_format_mod_supported, }; +static void intel_cursor_add_size_hints_property(struct intel_plane *plane) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + const struct drm_mode_config *config = &i915->drm.mode_config; + struct drm_plane_size_hint hints[4]; + int size, max_size, num_hints = 0; + + max_size = min(config->cursor_width, config->cursor_height); + + /* for simplicity only enumerate the supported square+POT sizes */ + for (size = 64; size <= max_size; size *= 2) { + if (drm_WARN_ON(&i915->drm, num_hints >= ARRAY_SIZE(hints))) + break; + + hints[num_hints].width = size; + hints[num_hints].height = size; + num_hints++; + } + + drm_plane_add_size_hints_property(&plane->base, hints, num_hints); +} + struct intel_plane * intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) @@ -879,6 +923,8 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180); + intel_cursor_add_size_hints_property(cursor); + zpos = DISPLAY_RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; drm_plane_create_zpos_immutable_property(&cursor->base, zpos); diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c index 64e0f820a7..8e3b13884b 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c @@ -29,8 +29,11 @@ #define INTEL_CX0_LANE1 BIT(1) #define INTEL_CX0_BOTH_LANES (INTEL_CX0_LANE1 | INTEL_CX0_LANE0) -bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy) +bool intel_encoder_is_c10phy(struct intel_encoder *encoder) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_encoder_to_phy(encoder); + if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C) return true; @@ -46,8 +49,7 @@ static int lane_mask_to_lane(u8 lane_mask) return ilog2(lane_mask); } -static u8 intel_cx0_get_owned_lane_mask(struct drm_i915_private *i915, - struct intel_encoder *encoder) +static u8 intel_cx0_get_owned_lane_mask(struct intel_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); @@ -114,16 +116,20 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, wakeref); } -static void intel_clear_response_ready_flag(struct drm_i915_private *i915, - enum port port, int lane) +static void intel_clear_response_ready_flag(struct intel_encoder *encoder, + int lane) { - intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane), + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, encoder->port, lane), 0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET); } -static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane) +static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane) { - enum phy phy = intel_port_to_phy(i915, port); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + enum phy phy = intel_encoder_to_phy(encoder); intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane), XELPDP_PORT_M2P_TRANSACTION_RESET); @@ -135,20 +141,22 @@ static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, i return; } - intel_clear_response_ready_flag(i915, port, lane); + intel_clear_response_ready_flag(encoder, lane); } -static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port, +static int intel_cx0_wait_for_ack(struct intel_encoder *encoder, int command, int lane, u32 *val) { - enum phy phy = intel_port_to_phy(i915, port); - - if (__intel_de_wait_for_register(i915, - XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane), - XELPDP_PORT_P2M_RESPONSE_READY, - XELPDP_PORT_P2M_RESPONSE_READY, - XELPDP_MSGBUS_TIMEOUT_FAST_US, - XELPDP_MSGBUS_TIMEOUT_SLOW, val)) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + enum phy phy = intel_encoder_to_phy(encoder); + + if (intel_de_wait_custom(i915, + XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane), + XELPDP_PORT_P2M_RESPONSE_READY, + XELPDP_PORT_P2M_RESPONSE_READY, + XELPDP_MSGBUS_TIMEOUT_FAST_US, + XELPDP_MSGBUS_TIMEOUT_SLOW, val)) { drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n", phy_name(phy), *val); @@ -158,31 +166,33 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port, "PHY %c Hardware did not detect a timeout\n", phy_name(phy)); - intel_cx0_bus_reset(i915, port, lane); + intel_cx0_bus_reset(encoder, lane); return -ETIMEDOUT; } if (*val & XELPDP_PORT_P2M_ERROR_SET) { drm_dbg_kms(&i915->drm, "PHY %c Error occurred during %s command. Status: 0x%x\n", phy_name(phy), command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val); - intel_cx0_bus_reset(i915, port, lane); + intel_cx0_bus_reset(encoder, lane); return -EINVAL; } if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) != command) { drm_dbg_kms(&i915->drm, "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", phy_name(phy), command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val); - intel_cx0_bus_reset(i915, port, lane); + intel_cx0_bus_reset(encoder, lane); return -EINVAL; } return 0; } -static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port, +static int __intel_cx0_read_once(struct intel_encoder *encoder, int lane, u16 addr) { - enum phy phy = intel_port_to_phy(i915, port); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + enum phy phy = intel_encoder_to_phy(encoder); int ack; u32 val; @@ -191,7 +201,7 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port, XELPDP_MSGBUS_TIMEOUT_SLOW)) { drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy)); - intel_cx0_bus_reset(i915, port, lane); + intel_cx0_bus_reset(encoder, lane); return -ETIMEDOUT; } @@ -200,33 +210,34 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port, XELPDP_PORT_M2P_COMMAND_READ | XELPDP_PORT_M2P_ADDRESS(addr)); - ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val); + ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val); if (ack < 0) return ack; - intel_clear_response_ready_flag(i915, port, lane); + intel_clear_response_ready_flag(encoder, lane); /* * FIXME: Workaround to let HW to settle * down and let the message bus to end up * in a known state */ - intel_cx0_bus_reset(i915, port, lane); + intel_cx0_bus_reset(encoder, lane); return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val); } -static u8 __intel_cx0_read(struct drm_i915_private *i915, enum port port, +static u8 __intel_cx0_read(struct intel_encoder *encoder, int lane, u16 addr) { - enum phy phy = intel_port_to_phy(i915, port); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_encoder_to_phy(encoder); int i, status; assert_dc_off(i915); /* 3 tries is assumed to be enough to read successfully */ for (i = 0; i < 3; i++) { - status = __intel_cx0_read_once(i915, port, lane, addr); + status = __intel_cx0_read_once(encoder, lane, addr); if (status >= 0) return status; @@ -238,18 +249,20 @@ static u8 __intel_cx0_read(struct drm_i915_private *i915, enum port port, return 0; } -static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port, +static u8 intel_cx0_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr) { int lane = lane_mask_to_lane(lane_mask); - return __intel_cx0_read(i915, port, lane, addr); + return __intel_cx0_read(encoder, lane, addr); } -static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port, +static int __intel_cx0_write_once(struct intel_encoder *encoder, int lane, u16 addr, u8 data, bool committed) { - enum phy phy = intel_port_to_phy(i915, port); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + enum phy phy = intel_encoder_to_phy(encoder); int ack; u32 val; @@ -258,7 +271,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port, XELPDP_MSGBUS_TIMEOUT_SLOW)) { drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy)); - intel_cx0_bus_reset(i915, port, lane); + intel_cx0_bus_reset(encoder, lane); return -ETIMEDOUT; } @@ -274,45 +287,46 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port, XELPDP_MSGBUS_TIMEOUT_SLOW)) { drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy)); - intel_cx0_bus_reset(i915, port, lane); + intel_cx0_bus_reset(encoder, lane); return -ETIMEDOUT; } if (committed) { - ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val); + ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val); if (ack < 0) return ack; } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane)) & XELPDP_PORT_P2M_ERROR_SET)) { drm_dbg_kms(&i915->drm, "PHY %c Error occurred during write command.\n", phy_name(phy)); - intel_cx0_bus_reset(i915, port, lane); + intel_cx0_bus_reset(encoder, lane); return -EINVAL; } - intel_clear_response_ready_flag(i915, port, lane); + intel_clear_response_ready_flag(encoder, lane); /* * FIXME: Workaround to let HW to settle * down and let the message bus to end up * in a known state */ - intel_cx0_bus_reset(i915, port, lane); + intel_cx0_bus_reset(encoder, lane); return 0; } -static void __intel_cx0_write(struct drm_i915_private *i915, enum port port, +static void __intel_cx0_write(struct intel_encoder *encoder, int lane, u16 addr, u8 data, bool committed) { - enum phy phy = intel_port_to_phy(i915, port); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_encoder_to_phy(encoder); int i, status; assert_dc_off(i915); /* 3 tries is assumed to be enough to write successfully */ for (i = 0; i < 3; i++) { - status = __intel_cx0_write_once(i915, port, lane, addr, data, committed); + status = __intel_cx0_write_once(encoder, lane, addr, data, committed); if (status == 0) return; @@ -322,63 +336,66 @@ static void __intel_cx0_write(struct drm_i915_private *i915, enum port port, "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i); } -static void intel_cx0_write(struct drm_i915_private *i915, enum port port, +static void intel_cx0_write(struct intel_encoder *encoder, u8 lane_mask, u16 addr, u8 data, bool committed) { int lane; for_each_cx0_lane_in_mask(lane_mask, lane) - __intel_cx0_write(i915, port, lane, addr, data, committed); + __intel_cx0_write(encoder, lane, addr, data, committed); } -static void intel_c20_sram_write(struct drm_i915_private *i915, enum port port, +static void intel_c20_sram_write(struct intel_encoder *encoder, int lane, u16 addr, u16 data) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + assert_dc_off(i915); - intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0); - intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0); + intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0); + intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0); - intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_H, data >> 8, 0); - intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_L, data & 0xff, 1); + intel_cx0_write(encoder, lane, PHY_C20_WR_DATA_H, data >> 8, 0); + intel_cx0_write(encoder, lane, PHY_C20_WR_DATA_L, data & 0xff, 1); } -static u16 intel_c20_sram_read(struct drm_i915_private *i915, enum port port, +static u16 intel_c20_sram_read(struct intel_encoder *encoder, int lane, u16 addr) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); u16 val; assert_dc_off(i915); - intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0); - intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1); + intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0); + intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1); - val = intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_H); + val = intel_cx0_read(encoder, lane, PHY_C20_RD_DATA_H); val <<= 8; - val |= intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_L); + val |= intel_cx0_read(encoder, lane, PHY_C20_RD_DATA_L); return val; } -static void __intel_cx0_rmw(struct drm_i915_private *i915, enum port port, +static void __intel_cx0_rmw(struct intel_encoder *encoder, int lane, u16 addr, u8 clear, u8 set, bool committed) { u8 old, val; - old = __intel_cx0_read(i915, port, lane, addr); + old = __intel_cx0_read(encoder, lane, addr); val = (old & ~clear) | set; if (val != old) - __intel_cx0_write(i915, port, lane, addr, val, committed); + __intel_cx0_write(encoder, lane, addr, val, committed); } -static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port, +static void intel_cx0_rmw(struct intel_encoder *encoder, u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed) { u8 lane; for_each_cx0_lane_in_mask(lane_mask, lane) - __intel_cx0_rmw(i915, port, lane, addr, clear, set, committed); + __intel_cx0_rmw(encoder, lane, addr, clear, set, committed); } static u8 intel_c10_get_tx_vboost_lvl(const struct intel_crtc_state *crtc_state) @@ -414,7 +431,6 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_ddi_buf_trans *trans; - enum phy phy = intel_port_to_phy(i915, encoder->port); u8 owned_lane_mask; intel_wakeref_t wakeref; int n_entries, ln; @@ -423,7 +439,7 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, if (intel_tc_port_in_tbt_alt_mode(dig_port)) return; - owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder); + owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); wakeref = intel_cx0_phy_transaction_begin(encoder); @@ -433,14 +449,14 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, return; } - if (intel_is_c10phy(i915, phy)) { - intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CONTROL(1), + if (intel_encoder_is_c10phy(encoder)) { + intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED); - intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CMN(3), + intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CMN(3), C10_CMN3_TXVBOOST_MASK, C10_CMN3_TXVBOOST(intel_c10_get_tx_vboost_lvl(crtc_state)), MB_WRITE_UNCOMMITTED); - intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_TX(1), + intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_TX(1), C10_TX1_TERMCTL_MASK, C10_TX1_TERMCTL(intel_c10_get_tx_term_ctl(crtc_state)), MB_WRITE_COMMITTED); @@ -455,27 +471,27 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, if (!(lane_mask & owned_lane_mask)) continue; - intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 0), + intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 0), C10_PHY_OVRD_LEVEL_MASK, C10_PHY_OVRD_LEVEL(trans->entries[level].snps.pre_cursor), MB_WRITE_COMMITTED); - intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 1), + intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 1), C10_PHY_OVRD_LEVEL_MASK, C10_PHY_OVRD_LEVEL(trans->entries[level].snps.vswing), MB_WRITE_COMMITTED); - intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 2), + intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 2), C10_PHY_OVRD_LEVEL_MASK, C10_PHY_OVRD_LEVEL(trans->entries[level].snps.post_cursor), MB_WRITE_COMMITTED); } /* Write Override enables in 0xD71 */ - intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_OVRD, + intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_OVRD, 0, PHY_C10_VDR_OVRD_TX1 | PHY_C10_VDR_OVRD_TX2, MB_WRITE_COMMITTED); - if (intel_is_c10phy(i915, phy)) - intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CONTROL(1), + if (intel_encoder_is_c10phy(encoder)) + intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED); intel_cx0_phy_transaction_end(encoder, wakeref); @@ -1811,7 +1827,7 @@ static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct intel_cx0pll_state *pll_state = &crtc_state->cx0pll_state; + struct intel_cx0pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll; int i; if (intel_crtc_has_dp_encoder(crtc_state)) { @@ -1843,7 +1859,7 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state, for (i = 0; tables[i]; i++) { if (crtc_state->port_clock == tables[i]->clock) { - crtc_state->cx0pll_state.c10 = *tables[i]; + crtc_state->dpll_hw_state.cx0pll.c10 = *tables[i]; intel_c10pll_update_pll(crtc_state, encoder); return 0; @@ -1856,7 +1872,6 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state, static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c10pll_state *pll_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); u8 lane = INTEL_CX0_LANE0; intel_wakeref_t wakeref; int i; @@ -1867,16 +1882,15 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, * According to C10 VDR Register programming Sequence we need * to do this to read PHY internal registers from MsgBus. */ - intel_cx0_rmw(i915, encoder->port, lane, PHY_C10_VDR_CONTROL(1), + intel_cx0_rmw(encoder, lane, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED); for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++) - pll_state->pll[i] = intel_cx0_read(i915, encoder->port, lane, - PHY_C10_VDR_PLL(i)); + pll_state->pll[i] = intel_cx0_read(encoder, lane, PHY_C10_VDR_PLL(i)); - pll_state->cmn = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_CMN(0)); - pll_state->tx = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_TX(0)); + pll_state->cmn = intel_cx0_read(encoder, lane, PHY_C10_VDR_CMN(0)); + pll_state->tx = intel_cx0_read(encoder, lane, PHY_C10_VDR_TX(0)); intel_cx0_phy_transaction_end(encoder, wakeref); } @@ -1885,31 +1899,31 @@ static void intel_c10_pll_program(struct drm_i915_private *i915, const struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { - const struct intel_c10pll_state *pll_state = &crtc_state->cx0pll_state.c10; + const struct intel_c10pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c10; int i; - intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), + intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED); /* Custom width needs to be programmed to 0 for both the phy lanes */ - intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH, + intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH, C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10, MB_WRITE_COMMITTED); - intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), + intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED); /* Program the pll values only for the master lane */ for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++) - intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i), + intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i), pll_state->pll[i], (i % 4) ? MB_WRITE_UNCOMMITTED : MB_WRITE_COMMITTED); - intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED); - intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED); + intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED); + intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED); - intel_cx0_rmw(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1), + intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED); } @@ -2037,10 +2051,8 @@ static int intel_c20_phy_check_hdmi_link_rate(int clock) int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock) { struct intel_digital_port *dig_port = hdmi_to_dig_port(hdmi); - struct drm_i915_private *i915 = intel_hdmi_to_i915(hdmi); - enum phy phy = intel_port_to_phy(i915, dig_port->base.port); - if (intel_is_c10phy(i915, phy)) + if (intel_encoder_is_c10phy(&dig_port->base)) return intel_c10_phy_check_hdmi_link_rate(clock); return intel_c20_phy_check_hdmi_link_rate(clock); } @@ -2067,7 +2079,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state, /* try computed C20 HDMI tables before using consolidated tables */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { if (intel_c20_compute_hdmi_tmds_pll(crtc_state->port_clock, - &crtc_state->cx0pll_state.c20) == 0) + &crtc_state->dpll_hw_state.cx0pll.c20) == 0) return 0; } @@ -2077,7 +2089,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state, for (i = 0; tables[i]; i++) { if (crtc_state->port_clock == tables[i]->clock) { - crtc_state->cx0pll_state.c20 = *tables[i]; + crtc_state->dpll_hw_state.cx0pll.c20 = *tables[i]; return 0; } } @@ -2088,10 +2100,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state, int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); - - if (intel_is_c10phy(i915, phy)) + if (intel_encoder_is_c10phy(encoder)) return intel_c10pll_calc_state(crtc_state, encoder); return intel_c20pll_calc_state(crtc_state, encoder); } @@ -2149,7 +2158,6 @@ static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c20pll_state *pll_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); bool cntx; intel_wakeref_t wakeref; int i; @@ -2157,25 +2165,25 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, wakeref = intel_cx0_phy_transaction_begin(encoder); /* 1. Read current context selection */ - cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & PHY_C20_CONTEXT_TOGGLE; + cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & PHY_C20_CONTEXT_TOGGLE; /* Read Tx configuration */ for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) { if (cntx) - pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i)); else - pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i)); } /* Read common configuration */ for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) { if (cntx) - pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i)); else - pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i)); } @@ -2183,20 +2191,20 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, /* MPLLB configuration */ for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) { if (cntx) - pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, PHY_C20_B_MPLLB_CNTX_CFG(i)); else - pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, PHY_C20_A_MPLLB_CNTX_CFG(i)); } } else { /* MPLLA configuration */ for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) { if (cntx) - pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, PHY_C20_B_MPLLA_CNTX_CFG(i)); else - pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, + pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0, PHY_C20_A_MPLLA_CNTX_CFG(i)); } } @@ -2327,7 +2335,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, const struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { - const struct intel_c20pll_state *pll_state = &crtc_state->cx0pll_state.c20; + const struct intel_c20pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c20; bool dp = false; int lane = crtc_state->lane_count > 2 ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0; u32 clock = crtc_state->port_clock; @@ -2338,7 +2346,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, dp = true; /* 1. Read current context selection */ - cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0); + cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0); /* * 2. If there is a protocol switch from HDMI to DP or vice versa, clear @@ -2347,7 +2355,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, */ if (intel_c20_protocol_switch_valid(encoder)) { for (i = 0; i < 4; i++) - intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(i), 0); + intel_c20_sram_write(encoder, INTEL_CX0_LANE0, RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(i), 0); usleep_range(4000, 4100); } @@ -2355,63 +2363,63 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, /* 3.1 Tx configuration */ for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) { if (cntx) - intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i), pll_state->tx[i]); + intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i), pll_state->tx[i]); else - intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i), pll_state->tx[i]); + intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i), pll_state->tx[i]); } /* 3.2 common configuration */ for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) { if (cntx) - intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i), pll_state->cmn[i]); + intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i), pll_state->cmn[i]); else - intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i), pll_state->cmn[i]); + intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i), pll_state->cmn[i]); } /* 3.3 mpllb or mplla configuration */ if (intel_c20phy_use_mpllb(pll_state)) { for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) { if (cntx) - intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, + intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_A_MPLLB_CNTX_CFG(i), pll_state->mpllb[i]); else - intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, + intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_B_MPLLB_CNTX_CFG(i), pll_state->mpllb[i]); } } else { for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) { if (cntx) - intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, + intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_A_MPLLA_CNTX_CFG(i), pll_state->mplla[i]); else - intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, + intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_B_MPLLA_CNTX_CFG(i), pll_state->mplla[i]); } } /* 4. Program custom width to match the link protocol */ - intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_WIDTH, + intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_WIDTH, PHY_C20_CUSTOM_WIDTH_MASK, PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(clock, dp)), MB_WRITE_COMMITTED); /* 5. For DP or 6. For HDMI */ if (dp) { - intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE, + intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE, BIT(6) | PHY_C20_CUSTOM_SERDES_MASK, BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(clock)), MB_WRITE_COMMITTED); } else { - intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE, + intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE, BIT(7) | PHY_C20_CUSTOM_SERDES_MASK, is_hdmi_frl(clock) ? BIT(7) : 0, MB_WRITE_COMMITTED); - intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE, + intel_cx0_write(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE, intel_c20_get_hdmi_rate(clock), MB_WRITE_COMMITTED); } @@ -2420,7 +2428,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, * 7. Write Vendor specific registers to toggle context setting to load * the updated programming toggle context bit */ - intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE, + intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE, BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED); } @@ -2476,9 +2484,9 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder, /* TODO: HDMI FRL */ /* DP2.0 10G and 20G rates enable MPLLA*/ if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000) - val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0; + val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0; else - val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0; + val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0; intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE | @@ -2508,11 +2516,12 @@ static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state) return val; } -static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915, - enum port port, +static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder, u8 lane_mask, u8 state) { - enum phy phy = intel_port_to_phy(i915, port); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + enum phy phy = intel_encoder_to_phy(encoder); i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(i915, port); int lane; @@ -2528,7 +2537,7 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915, drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n", phy_name(phy)); - intel_cx0_bus_reset(i915, port, lane); + intel_cx0_bus_reset(encoder, lane); } intel_de_rmw(i915, buf_ctl2_reg, @@ -2536,15 +2545,18 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915, intel_cx0_get_powerdown_update(lane_mask)); /* Update Timeout Value */ - if (__intel_de_wait_for_register(i915, buf_ctl2_reg, - intel_cx0_get_powerdown_update(lane_mask), 0, - XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL)) + if (intel_de_wait_custom(i915, buf_ctl2_reg, + intel_cx0_get_powerdown_update(lane_mask), 0, + XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL)) drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n", phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US); } -static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port port) +static void intel_cx0_setup_powerdown(struct intel_encoder *encoder) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), XELPDP_POWER_STATE_READY_MASK, XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY)); @@ -2577,13 +2589,13 @@ static u32 intel_cx0_get_pclk_refclk_ack(u8 lane_mask) return val; } -static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915, - struct intel_encoder *encoder, +static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder, bool lane_reversal) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum port port = encoder->port; - enum phy phy = intel_port_to_phy(i915, port); - u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder); + enum phy phy = intel_encoder_to_phy(encoder); + u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); u8 lane_mask = lane_reversal ? INTEL_CX0_LANE1 : INTEL_CX0_LANE0; u32 lane_pipe_reset = owned_lane_mask == INTEL_CX0_BOTH_LANES ? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1) @@ -2593,19 +2605,19 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915, XELPDP_LANE_PHY_CURRENT_STATUS(1)) : XELPDP_LANE_PHY_CURRENT_STATUS(0); - if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(i915, port), - XELPDP_PORT_BUF_SOC_PHY_READY, - XELPDP_PORT_BUF_SOC_PHY_READY, - XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL)) + if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL1(i915, port), + XELPDP_PORT_BUF_SOC_PHY_READY, + XELPDP_PORT_BUF_SOC_PHY_READY, + XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL)) drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n", phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US); intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, lane_pipe_reset); - if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(i915, port), - lane_phy_current_status, lane_phy_current_status, - XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL)) + if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL2(i915, port), + lane_phy_current_status, lane_phy_current_status, + XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL)) drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n", phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US); @@ -2613,16 +2625,16 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915, intel_cx0_get_pclk_refclk_request(owned_lane_mask), intel_cx0_get_pclk_refclk_request(lane_mask)); - if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, port), - intel_cx0_get_pclk_refclk_ack(owned_lane_mask), - intel_cx0_get_pclk_refclk_ack(lane_mask), - XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL)) + if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, port), + intel_cx0_get_pclk_refclk_ack(owned_lane_mask), + intel_cx0_get_pclk_refclk_ack(lane_mask), + XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL)) drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n", phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US); - intel_cx0_powerdown_change_sequence(i915, port, INTEL_CX0_BOTH_LANES, + intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES, CX0_P2_STATE_RESET); - intel_cx0_setup_powerdown(i915, port); + intel_cx0_setup_powerdown(encoder); intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, 0); @@ -2640,11 +2652,10 @@ static void intel_cx0_program_phy_lane(struct drm_i915_private *i915, int i; u8 disables; bool dp_alt_mode = intel_tc_port_in_dp_alt_mode(enc_to_dig_port(encoder)); - u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder); - enum port port = encoder->port; + u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); - if (intel_is_c10phy(i915, intel_port_to_phy(i915, port))) - intel_cx0_rmw(i915, port, owned_lane_mask, + if (intel_encoder_is_c10phy(encoder)) + intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED); @@ -2666,14 +2677,14 @@ static void intel_cx0_program_phy_lane(struct drm_i915_private *i915, if (!(owned_lane_mask & lane_mask)) continue; - intel_cx0_rmw(i915, port, lane_mask, PHY_CX0_TX_CONTROL(tx, 2), + intel_cx0_rmw(encoder, lane_mask, PHY_CX0_TX_CONTROL(tx, 2), CONTROL2_DISABLE_SINGLE_TX, disables & BIT(i) ? CONTROL2_DISABLE_SINGLE_TX : 0, MB_WRITE_COMMITTED); } - if (intel_is_c10phy(i915, intel_port_to_phy(i915, port))) - intel_cx0_rmw(i915, port, owned_lane_mask, + if (intel_encoder_is_c10phy(encoder)) + intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED); @@ -2705,7 +2716,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 : @@ -2719,13 +2730,13 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder, intel_program_port_clock_ctl(encoder, crtc_state, lane_reversal); /* 2. Bring PHY out of reset. */ - intel_cx0_phy_lane_reset(i915, encoder, lane_reversal); + intel_cx0_phy_lane_reset(encoder, lane_reversal); /* * 3. Change Phy power state to Ready. * TODO: For DP alt mode use only one lane. */ - intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES, + intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES, CX0_P2_STATE_READY); /* @@ -2735,7 +2746,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder, */ /* 5. Program PHY internal PLL internal registers. */ - if (intel_is_c10phy(i915, phy)) + if (intel_encoder_is_c10phy(encoder)) intel_c10_pll_program(i915, crtc_state, encoder); else intel_c20_pll_program(i915, crtc_state, encoder); @@ -2767,10 +2778,10 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder, intel_cx0_get_pclk_pll_request(maxpclk_lane)); /* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN == "1". */ - if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), - intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES), - intel_cx0_get_pclk_pll_ack(maxpclk_lane), - XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL)) + if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES), + intel_cx0_get_pclk_pll_ack(maxpclk_lane), + XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL)) drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n", phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US); @@ -2831,7 +2842,7 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); u32 val = 0; /* @@ -2858,10 +2869,10 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), val); /* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */ - if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), - XELPDP_TBT_CLOCK_ACK, - XELPDP_TBT_CLOCK_ACK, - 100, 0, NULL)) + if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + XELPDP_TBT_CLOCK_ACK, + XELPDP_TBT_CLOCK_ACK, + 100, 0, NULL)) drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n", encoder->base.base.id, encoder->base.name, phy_name(phy)); @@ -2892,12 +2903,12 @@ void intel_mtl_pll_enable(struct intel_encoder *encoder, static void intel_cx0pll_disable(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); - bool is_c10 = intel_is_c10phy(i915, phy); + enum phy phy = intel_encoder_to_phy(encoder); + bool is_c10 = intel_encoder_is_c10phy(encoder); intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder); /* 1. Change owned PHY lane power to Disable state. */ - intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES, + intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES, is_c10 ? CX0_P2PG_STATE_DISABLE : CX0_P4PG_STATE_DISABLE); @@ -2920,10 +2931,10 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder) /* * 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN == "0". */ - if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), - intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) | - intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0, - XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL)) + if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) | + intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0, + XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL)) drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n", phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US); @@ -2944,7 +2955,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder) static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); /* * 1. Follow the Display Voltage Frequency Switching Sequence Before @@ -2958,8 +2969,8 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) XELPDP_TBT_CLOCK_REQUEST, 0); /* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */ - if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), - XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL)) + if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), + XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL)) drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n", encoder->base.base.id, encoder->base.name, phy_name(phy)); @@ -3014,7 +3025,7 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state, struct intel_c10pll_state *mpllb_hw_state) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); - const struct intel_c10pll_state *mpllb_sw_state = &state->cx0pll_state.c10; + const struct intel_c10pll_state *mpllb_sw_state = &state->dpll_hw_state.cx0pll.c10; int i; if (intel_crtc_needs_fastset(state)) @@ -3043,10 +3054,7 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state, void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder, struct intel_cx0pll_state *pll_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); - - if (intel_is_c10phy(i915, phy)) + if (intel_encoder_is_c10phy(encoder)) intel_c10pll_readout_hw_state(encoder, &pll_state->c10); else intel_c20pll_readout_hw_state(encoder, &pll_state->c20); @@ -3055,10 +3063,7 @@ void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder, int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder, const struct intel_cx0pll_state *pll_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); - - if (intel_is_c10phy(i915, phy)) + if (intel_encoder_is_c10phy(encoder)) return intel_c10pll_calc_port_clock(encoder, &pll_state->c10); return intel_c20pll_calc_port_clock(encoder, &pll_state->c20); @@ -3070,7 +3075,7 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state, struct intel_c20pll_state *mpll_hw_state) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); - const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20; + const struct intel_c20pll_state *mpll_sw_state = &state->dpll_hw_state.cx0pll.c20; bool sw_use_mpllb = intel_c20phy_use_mpllb(mpll_sw_state); bool hw_use_mpllb = intel_c20phy_use_mpllb(mpll_hw_state); int i; @@ -3124,7 +3129,6 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder; struct intel_cx0pll_state mpll_hw_state = {}; - enum phy phy; if (DISPLAY_VER(i915) < 14) return; @@ -3138,14 +3142,13 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state, return; encoder = intel_get_crtc_new_encoder(state, new_crtc_state); - phy = intel_port_to_phy(i915, encoder->port); if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder))) return; intel_cx0pll_readout_hw_state(encoder, &mpll_hw_state); - if (intel_is_c10phy(i915, phy)) + if (intel_encoder_is_c10phy(encoder)) intel_c10pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c10); else intel_c20pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c20); diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h index c668267725..3e03af3e00 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h @@ -11,7 +11,6 @@ #include enum icl_port_dpll_id; -enum phy; struct drm_i915_private; struct intel_atomic_state; struct intel_c10pll_state; @@ -22,7 +21,7 @@ struct intel_crtc_state; struct intel_encoder; struct intel_hdmi; -bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy phy); +bool intel_encoder_is_c10phy(struct intel_encoder *encoder); void intel_mtl_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_mtl_pll_disable(struct intel_encoder *encoder); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index c17462b4c2..6bff169fa8 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -200,10 +200,10 @@ void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, port_name(port)); } -static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv, - enum port port) +static void intel_wait_ddi_buf_active(struct intel_encoder *encoder) { - enum phy phy = intel_port_to_phy(dev_priv, port); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; int timeout_us; int ret; @@ -218,7 +218,7 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv, } else if (IS_DG2(dev_priv)) { timeout_us = 1200; } else if (DISPLAY_VER(dev_priv) >= 12) { - if (intel_phy_is_tc(dev_priv, phy)) + if (intel_encoder_is_tc(encoder)) timeout_us = 3000; else timeout_us = 1000; @@ -331,7 +331,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder, struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - enum phy phy = intel_port_to_phy(i915, encoder->port); /* DDI_BUF_CTL_ENABLE will be set by intel_ddi_prepare_link_retrain() later */ intel_dp->DP = dig_port->saved_port_bits | @@ -345,7 +344,7 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder, intel_dp->DP |= DDI_BUF_PORT_DATA_10BIT; } - if (IS_ALDERLAKE_P(i915) && intel_phy_is_tc(i915, phy)) { + if (IS_ALDERLAKE_P(i915) && intel_encoder_is_tc(encoder)) { intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock); if (!intel_tc_port_in_tbt_alt_mode(dig_port)) intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP; @@ -632,6 +631,7 @@ intel_ddi_config_transcoder_func(struct intel_encoder *encoder, void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; @@ -662,10 +662,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl); - if (intel_has_quirk(dev_priv, QUIRK_INCREASE_DDI_DISABLED_TIME) && + if (intel_has_quirk(display, QUIRK_INCREASE_DDI_DISABLED_TIME) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - drm_dbg_kms(&dev_priv->drm, - "Quirk Increase DDI disabled time\n"); + drm_dbg_kms(display->drm, "Quirk Increase DDI disabled time\n"); /* Quirk time at 100ms for reliable operation */ msleep(100); } @@ -895,7 +894,6 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(i915, dig_port->base.port); /* * ICL+ HW requires corresponding AUX IOs to be powered up for PSR with @@ -914,7 +912,7 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port, return intel_display_power_aux_io_domain(i915, dig_port->aux_ch); else if (DISPLAY_VER(i915) < 14 && (intel_crtc_has_dp_encoder(crtc_state) || - intel_phy_is_tc(i915, phy))) + intel_encoder_is_tc(&dig_port->base))) return intel_aux_power_domain(dig_port); else return POWER_DOMAIN_INVALID; @@ -984,7 +982,7 @@ void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); u32 val; if (cpu_transcoder == TRANSCODER_EDP) @@ -1113,7 +1111,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); const struct intel_ddi_buf_trans *trans; - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); int n_entries, ln; u32 val; @@ -1176,7 +1174,7 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); u32 val; int ln; @@ -1227,7 +1225,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); + enum tc_port tc_port = intel_encoder_to_tc(encoder); const struct intel_ddi_buf_trans *trans; int n_entries, ln; @@ -1328,7 +1326,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); + enum tc_port tc_port = intel_encoder_to_tc(encoder); const struct intel_ddi_buf_trans *trans; int n_entries, ln; @@ -1526,7 +1524,7 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder, { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); if (drm_WARN_ON(&i915->drm, !pll)) return; @@ -1540,7 +1538,7 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder, static void adls_ddi_disable_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); _icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy), ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); @@ -1549,7 +1547,7 @@ static void adls_ddi_disable_clock(struct intel_encoder *encoder) static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy), ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); @@ -1558,7 +1556,7 @@ static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder) static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); return _icl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy), ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy), @@ -1570,7 +1568,7 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder, { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); if (drm_WARN_ON(&i915->drm, !pll)) return; @@ -1584,7 +1582,7 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder, static void rkl_ddi_disable_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0, RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); @@ -1593,7 +1591,7 @@ static void rkl_ddi_disable_clock(struct intel_encoder *encoder) static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0, RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); @@ -1602,7 +1600,7 @@ static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder) static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0, RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), @@ -1614,7 +1612,7 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder, { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); if (drm_WARN_ON(&i915->drm, !pll)) return; @@ -1637,7 +1635,7 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder, static void dg1_ddi_disable_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); _icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy), DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); @@ -1646,7 +1644,7 @@ static void dg1_ddi_disable_clock(struct intel_encoder *encoder) static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy), DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); @@ -1655,7 +1653,7 @@ static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder) static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); enum intel_dpll_id id; u32 val; @@ -1680,7 +1678,7 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder, { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); if (drm_WARN_ON(&i915->drm, !pll)) return; @@ -1694,7 +1692,7 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder, static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0, ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); @@ -1703,7 +1701,7 @@ static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder) static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0, ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); @@ -1712,7 +1710,7 @@ static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder) struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0, ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), @@ -1767,7 +1765,7 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder, { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; - enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); + enum tc_port tc_port = intel_encoder_to_tc(encoder); enum port port = encoder->port; if (drm_WARN_ON(&i915->drm, !pll)) @@ -1787,7 +1785,7 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder, static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); + enum tc_port tc_port = intel_encoder_to_tc(encoder); enum port port = encoder->port; mutex_lock(&i915->display.dpll.lock); @@ -1803,7 +1801,7 @@ static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder) static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); + enum tc_port tc_port = intel_encoder_to_tc(encoder); enum port port = encoder->port; u32 tmp; @@ -1820,7 +1818,7 @@ static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder) static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); + enum tc_port tc_port = intel_encoder_to_tc(encoder); enum port port = encoder->port; enum intel_dpll_id id; u32 tmp; @@ -2086,12 +2084,14 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); - enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); + enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base); u32 ln0, ln1, pin_assignment; u8 width; - if (!intel_phy_is_tc(dev_priv, phy) || + if (DISPLAY_VER(dev_priv) >= 14) + return; + + if (!intel_encoder_is_tc(&dig_port->base) || intel_tc_port_in_tbt_alt_mode(dig_port)) return; @@ -2327,9 +2327,9 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder, { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - enum phy phy = intel_port_to_phy(i915, encoder->port); - if (intel_phy_is_combo(i915, phy)) { + if (intel_encoder_is_combo(encoder)) { + enum phy phy = intel_encoder_to_phy(encoder); bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; @@ -2339,10 +2339,15 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder, } } -/* Splitter enable for eDP MSO is limited to certain pipes. */ +/* + * Splitter enable for eDP MSO is limited to certain pipes, on certain + * platforms. + */ static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915) { - if (IS_ALDERLAKE_P(i915)) + if (DISPLAY_VER(i915) > 20) + return ~0; + else if (IS_ALDERLAKE_P(i915)) return BIT(PIPE_A) | BIT(PIPE_B); else return BIT(PIPE_A); @@ -2812,15 +2817,14 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (HAS_DP20(dev_priv)) { + if (HAS_DP20(dev_priv)) intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder), crtc_state); - if (crtc_state->has_panel_replay) - drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, - DP_PANEL_REPLAY_ENABLE); - } + + /* Panel replay has to be enabled in sink dpcd before link training. */ + if (crtc_state->has_panel_replay) + intel_psr_enable_sink(enc_to_intel_dp(encoder), crtc_state); if (DISPLAY_VER(dev_priv) >= 14) mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); @@ -3095,39 +3099,48 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state, intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); } -static void intel_ddi_post_disable(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state) +static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *slave_crtc; + struct intel_crtc *pipe_crtc; + + for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(old_crtc_state)) { + const struct intel_crtc_state *old_pipe_crtc_state = + intel_atomic_get_old_crtc_state(state, pipe_crtc); + + intel_crtc_vblank_off(old_pipe_crtc_state); + } - if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) { - intel_crtc_vblank_off(old_crtc_state); + intel_disable_transcoder(old_crtc_state); - intel_disable_transcoder(old_crtc_state); + intel_ddi_disable_transcoder_func(old_crtc_state); - intel_ddi_disable_transcoder_func(old_crtc_state); + for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(old_crtc_state)) { + const struct intel_crtc_state *old_pipe_crtc_state = + intel_atomic_get_old_crtc_state(state, pipe_crtc); - intel_dsc_disable(old_crtc_state); + intel_dsc_disable(old_pipe_crtc_state); if (DISPLAY_VER(dev_priv) >= 9) - skl_scaler_disable(old_crtc_state); + skl_scaler_disable(old_pipe_crtc_state); else - ilk_pfit_disable(old_crtc_state); + ilk_pfit_disable(old_pipe_crtc_state); } +} - for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc, - intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) { - const struct intel_crtc_state *old_slave_crtc_state = - intel_atomic_get_old_crtc_state(state, slave_crtc); - - intel_crtc_vblank_off(old_slave_crtc_state); - - intel_dsc_disable(old_slave_crtc_state); - skl_scaler_disable(old_slave_crtc_state); - } +static void intel_ddi_post_disable(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) + intel_ddi_post_disable_hdmi_or_sst(state, encoder, old_crtc_state, + old_conn_state); /* * When called from DP MST code: @@ -3155,14 +3168,11 @@ static void intel_ddi_post_pll_disable(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - enum phy phy = intel_port_to_phy(i915, encoder->port); - bool is_tc_port = intel_phy_is_tc(i915, phy); main_link_aux_power_domain_put(dig_port, old_crtc_state); - if (is_tc_port) + if (intel_encoder_is_tc(encoder)) intel_tc_port_put_link(dig_port); } @@ -3263,7 +3273,6 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_connector *connector = conn_state->connector; enum port port = encoder->port; - enum phy phy = intel_port_to_phy(dev_priv, port); u32 buf_ctl; if (!intel_hdmi_handle_sink_scrambling(encoder, connector, @@ -3347,14 +3356,14 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, if (DISPLAY_VER(dev_priv) >= 20) buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE; - } else if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) { + } else if (IS_ALDERLAKE_P(dev_priv) && intel_encoder_is_tc(encoder)) { drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port)); buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP; } intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl); - intel_wait_ddi_buf_active(dev_priv, port); + intel_wait_ddi_buf_active(encoder); } static void intel_enable_ddi(struct intel_atomic_state *state, @@ -3362,10 +3371,10 @@ static void intel_enable_ddi(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_crtc *pipe_crtc; - if (!intel_crtc_is_bigjoiner_slave(crtc_state)) - intel_ddi_enable_transcoder_func(encoder, crtc_state); + intel_ddi_enable_transcoder_func(encoder, crtc_state); /* Enable/Disable DP2.0 SDP split config before transcoder */ intel_audio_sdp_split_update(crtc_state); @@ -3374,7 +3383,13 @@ static void intel_enable_ddi(struct intel_atomic_state *state, intel_ddi_wait_for_fec_status(encoder, crtc_state, true); - intel_crtc_vblank_on(crtc_state); + for_each_intel_crtc_in_pipe_mask_reverse(&i915->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(crtc_state)) { + const struct intel_crtc_state *pipe_crtc_state = + intel_atomic_get_new_crtc_state(state, pipe_crtc); + + intel_crtc_vblank_on(pipe_crtc_state); + } if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state); @@ -3470,19 +3485,17 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct intel_crtc_state *crtc_state = + const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - struct intel_crtc *slave_crtc; - enum phy phy = intel_port_to_phy(i915, encoder->port); + struct intel_crtc *pipe_crtc; /* FIXME: Add MTL pll_mgr */ - if (DISPLAY_VER(i915) >= 14 || !intel_phy_is_tc(i915, phy)) + if (DISPLAY_VER(i915) >= 14 || !intel_encoder_is_tc(encoder)) return; - intel_update_active_dpll(state, crtc, encoder); - for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, - intel_crtc_bigjoiner_slave_pipes(crtc_state)) - intel_update_active_dpll(state, slave_crtc, encoder); + for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(crtc_state)) + intel_update_active_dpll(state, pipe_crtc, encoder); } static void @@ -3493,8 +3506,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - bool is_tc_port = intel_phy_is_tc(dev_priv, phy); + bool is_tc_port = intel_encoder_is_tc(encoder); if (is_tc_port) { struct intel_crtc *master_crtc = @@ -3513,14 +3525,14 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state, */ intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count); else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - bxt_ddi_phy_set_lane_optim_mask(encoder, - crtc_state->lane_lat_optim_mask); + bxt_dpio_phy_set_lane_optim_mask(encoder, + crtc_state->lane_lat_optim_mask); } static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); + enum tc_port tc_port = intel_encoder_to_tc(encoder); int ln; for (ln = 0; ln < 2; ln++) @@ -3574,7 +3586,7 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp, intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); /* 6.j Poll for PORT_BUF_CTL Idle Status == 0, timeout after 100 us */ - intel_wait_ddi_buf_active(dev_priv, port); + intel_wait_ddi_buf_active(encoder); } static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp, @@ -3624,7 +3636,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp, intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); - intel_wait_ddi_buf_active(dev_priv, port); + intel_wait_ddi_buf_active(encoder); } static void intel_ddi_set_link_train(struct intel_dp *intel_dp, @@ -3681,7 +3693,7 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp, if (intel_de_wait_for_set(dev_priv, dp_tp_status_reg(encoder, crtc_state), - DP_TP_STATUS_IDLE_DONE, 1)) + DP_TP_STATUS_IDLE_DONE, 2)) drm_err(&dev_priv->drm, "Timed out waiting for DP idle patterns\n"); } @@ -3946,7 +3958,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) pipe_config->lane_lat_optim_mask = - bxt_ddi_phy_get_lane_lat_optim_mask(encoder); + bxt_dpio_phy_get_lane_lat_optim_mask(encoder); intel_ddi_compute_min_voltage_level(pipe_config); @@ -3972,6 +3984,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA); intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC); + intel_read_dp_sdp(encoder, pipe_config, DP_SDP_ADAPTIVE_SYNC); intel_audio_codec_get_config(encoder, pipe_config); } @@ -4006,8 +4019,8 @@ static void mtl_ddi_get_config(struct intel_encoder *encoder, if (intel_tc_port_in_tbt_alt_mode(dig_port)) { crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder); } else { - intel_cx0pll_readout_hw_state(encoder, &crtc_state->cx0pll_state); - crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state); + intel_cx0pll_readout_hw_state(encoder, &crtc_state->dpll_hw_state.cx0pll); + crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll); } intel_ddi_get_config(encoder, crtc_state); @@ -4016,8 +4029,8 @@ static void mtl_ddi_get_config(struct intel_encoder *encoder, static void dg2_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { - intel_mpllb_readout_hw_state(encoder, &crtc_state->mpllb_state); - crtc_state->port_clock = intel_mpllb_calc_port_clock(encoder, &crtc_state->mpllb_state); + intel_mpllb_readout_hw_state(encoder, &crtc_state->dpll_hw_state.mpllb); + crtc_state->port_clock = intel_mpllb_calc_port_clock(encoder, &crtc_state->dpll_hw_state.mpllb); intel_ddi_get_config(encoder, crtc_state); } @@ -4144,10 +4157,7 @@ void hsw_ddi_get_config(struct intel_encoder *encoder, static void intel_ddi_sync_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); - - if (intel_phy_is_tc(i915, phy)) + if (intel_encoder_is_tc(encoder)) intel_tc_port_sanitize_mode(enc_to_dig_port(encoder), crtc_state); @@ -4159,10 +4169,9 @@ static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); bool fastset = true; - if (intel_phy_is_tc(i915, phy)) { + if (intel_encoder_is_tc(encoder)) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n", encoder->base.base.id, encoder->base.name); crtc_state->uapi.mode_changed = true; @@ -4226,7 +4235,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) pipe_config->lane_lat_optim_mask = - bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); + bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); intel_ddi_compute_min_voltage_level(pipe_config); @@ -4353,10 +4362,9 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->dev); struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); - enum phy phy = intel_port_to_phy(i915, dig_port->base.port); intel_dp_encoder_flush_work(encoder); - if (intel_phy_is_tc(i915, phy)) + if (intel_encoder_is_tc(&dig_port->base)) intel_tc_port_cleanup(dig_port); intel_display_power_flush_work(i915); @@ -4367,16 +4375,14 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder) static void intel_ddi_encoder_reset(struct drm_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->dev); struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); - enum phy phy = intel_port_to_phy(i915, dig_port->base.port); intel_dp->reset_link_params = true; intel_pps_encoder_reset(intel_dp); - if (intel_phy_is_tc(i915, phy)) + if (intel_encoder_is_tc(&dig_port->base)) intel_tc_port_init_mode(dig_port); } @@ -4543,11 +4549,9 @@ static enum intel_hotplug_state intel_ddi_hotplug(struct intel_encoder *encoder, struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_dp *intel_dp = &dig_port->dp; - enum phy phy = intel_port_to_phy(i915, encoder->port); - bool is_tc = intel_phy_is_tc(i915, phy); + bool is_tc = intel_encoder_is_tc(encoder); struct drm_modeset_acquire_ctx ctx; enum intel_hotplug_state state; int ret; @@ -4829,10 +4833,7 @@ static bool port_strap_detected(struct drm_i915_private *i915, enum port port) static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); - - return init_dp || intel_phy_is_tc(i915, phy); + return init_dp || intel_encoder_is_tc(encoder); } static bool assert_has_icl_dsi(struct drm_i915_private *i915) @@ -5076,17 +5077,17 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, } else if (IS_DG2(dev_priv)) { encoder->set_signal_levels = intel_snps_phy_set_signal_levels; } else if (DISPLAY_VER(dev_priv) >= 12) { - if (intel_phy_is_combo(dev_priv, phy)) + if (intel_encoder_is_combo(encoder)) encoder->set_signal_levels = icl_combo_phy_set_signal_levels; else encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels; } else if (DISPLAY_VER(dev_priv) >= 11) { - if (intel_phy_is_combo(dev_priv, phy)) + if (intel_encoder_is_combo(encoder)) encoder->set_signal_levels = icl_combo_phy_set_signal_levels; else encoder->set_signal_levels = icl_mg_phy_set_signal_levels; } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { - encoder->set_signal_levels = bxt_ddi_phy_set_signal_levels; + encoder->set_signal_levels = bxt_dpio_phy_set_signal_levels; } else { encoder->set_signal_levels = hsw_set_signal_levels; } @@ -5131,7 +5132,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, goto err; } - if (intel_phy_is_tc(dev_priv, phy)) { + if (intel_encoder_is_tc(encoder)) { bool is_legacy = !intel_bios_encoder_supports_typec_usb(devdata) && !intel_bios_encoder_supports_tbt(devdata); @@ -5160,7 +5161,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port); if (DISPLAY_VER(dev_priv) >= 11) { - if (intel_phy_is_tc(dev_priv, phy)) + if (intel_encoder_is_tc(encoder)) dig_port->connected = intel_tc_port_connected; else dig_port->connected = lpt_digital_port_connected; diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c index de809e2d9c..4d21ce7343 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c @@ -1691,14 +1691,11 @@ mtl_get_cx0_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); - if (intel_crtc_has_dp_encoder(crtc_state) && crtc_state->port_clock >= 1000000) return intel_get_buf_trans(&mtl_c20_trans_uhbr, n_entries); - else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_is_c10phy(i915, phy))) + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_encoder_is_c10phy(encoder))) return intel_get_buf_trans(&mtl_c20_trans_hdmi, n_entries); - else if (!intel_is_c10phy(i915, phy)) + else if (!intel_encoder_is_c10phy(encoder)) return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries); else return intel_get_buf_trans(&mtl_c10_trans_dp14, n_entries); @@ -1707,14 +1704,13 @@ mtl_get_cx0_buf_trans(struct intel_encoder *encoder, void intel_ddi_buf_trans_init(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); if (DISPLAY_VER(i915) >= 14) { encoder->get_buf_trans = mtl_get_cx0_buf_trans; } else if (IS_DG2(i915)) { encoder->get_buf_trans = dg2_get_snps_buf_trans; } else if (IS_ALDERLAKE_P(i915)) { - if (intel_phy_is_combo(i915, phy)) + if (intel_encoder_is_combo(encoder)) encoder->get_buf_trans = adlp_get_combo_buf_trans; else encoder->get_buf_trans = adlp_get_dkl_buf_trans; @@ -1725,16 +1721,16 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder) } else if (IS_DG1(i915)) { encoder->get_buf_trans = dg1_get_combo_buf_trans; } else if (DISPLAY_VER(i915) >= 12) { - if (intel_phy_is_combo(i915, phy)) + if (intel_encoder_is_combo(encoder)) encoder->get_buf_trans = tgl_get_combo_buf_trans; else encoder->get_buf_trans = tgl_get_dkl_buf_trans; } else if (DISPLAY_VER(i915) == 11) { - if (IS_PLATFORM(i915, INTEL_JASPERLAKE)) + if (IS_JASPERLAKE(i915)) encoder->get_buf_trans = jsl_get_combo_buf_trans; - else if (IS_PLATFORM(i915, INTEL_ELKHARTLAKE)) + else if (IS_ELKHARTLAKE(i915)) encoder->get_buf_trans = ehl_get_combo_buf_trans; - else if (intel_phy_is_combo(i915, phy)) + else if (intel_encoder_is_combo(encoder)) encoder->get_buf_trans = icl_get_combo_buf_trans; else encoder->get_buf_trans = icl_get_mg_buf_trans; diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h index 42552d8c15..e881bfeafb 100644 --- a/drivers/gpu/drm/i915/display/intel_de.h +++ b/drivers/gpu/drm/i915/display/intel_de.h @@ -10,80 +10,185 @@ #include "i915_trace.h" #include "intel_uncore.h" +static inline struct intel_uncore *__to_uncore(struct intel_display *display) +{ + return &to_i915(display->drm)->uncore; +} + static inline u32 -intel_de_read(struct drm_i915_private *i915, i915_reg_t reg) +__intel_de_read(struct intel_display *display, i915_reg_t reg) { - return intel_uncore_read(&i915->uncore, reg); + u32 val; + + intel_dmc_wl_get(display, reg); + + val = intel_uncore_read(__to_uncore(display), reg); + + intel_dmc_wl_put(display, reg); + + return val; } +#define intel_de_read(p,...) __intel_de_read(__to_intel_display(p), __VA_ARGS__) static inline u8 -intel_de_read8(struct drm_i915_private *i915, i915_reg_t reg) +__intel_de_read8(struct intel_display *display, i915_reg_t reg) { - return intel_uncore_read8(&i915->uncore, reg); + u8 val; + + intel_dmc_wl_get(display, reg); + + val = intel_uncore_read8(__to_uncore(display), reg); + + intel_dmc_wl_put(display, reg); + + return val; } +#define intel_de_read8(p,...) __intel_de_read8(__to_intel_display(p), __VA_ARGS__) static inline u64 -intel_de_read64_2x32(struct drm_i915_private *i915, - i915_reg_t lower_reg, i915_reg_t upper_reg) +__intel_de_read64_2x32(struct intel_display *display, + i915_reg_t lower_reg, i915_reg_t upper_reg) { - return intel_uncore_read64_2x32(&i915->uncore, lower_reg, upper_reg); + u64 val; + + intel_dmc_wl_get(display, lower_reg); + intel_dmc_wl_get(display, upper_reg); + + val = intel_uncore_read64_2x32(__to_uncore(display), lower_reg, + upper_reg); + + intel_dmc_wl_put(display, upper_reg); + intel_dmc_wl_put(display, lower_reg); + + return val; } +#define intel_de_read64_2x32(p,...) __intel_de_read64_2x32(__to_intel_display(p), __VA_ARGS__) static inline void -intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg) +__intel_de_posting_read(struct intel_display *display, i915_reg_t reg) { - intel_uncore_posting_read(&i915->uncore, reg); + intel_dmc_wl_get(display, reg); + + intel_uncore_posting_read(__to_uncore(display), reg); + + intel_dmc_wl_put(display, reg); } +#define intel_de_posting_read(p,...) __intel_de_posting_read(__to_intel_display(p), __VA_ARGS__) static inline void -intel_de_write(struct drm_i915_private *i915, i915_reg_t reg, u32 val) +__intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val) { - intel_uncore_write(&i915->uncore, reg, val); + intel_dmc_wl_get(display, reg); + + intel_uncore_write(__to_uncore(display), reg, val); + + intel_dmc_wl_put(display, reg); } +#define intel_de_write(p,...) __intel_de_write(__to_intel_display(p), __VA_ARGS__) static inline u32 -intel_de_rmw(struct drm_i915_private *i915, i915_reg_t reg, u32 clear, u32 set) +____intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg, + u32 clear, u32 set) { - return intel_uncore_rmw(&i915->uncore, reg, clear, set); + return intel_uncore_rmw(__to_uncore(display), reg, clear, set); } +#define __intel_de_rmw_nowl(p,...) ____intel_de_rmw_nowl(__to_intel_display(p), __VA_ARGS__) + +static inline u32 +__intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, + u32 set) +{ + u32 val; + + intel_dmc_wl_get(display, reg); + + val = __intel_de_rmw_nowl(display, reg, clear, set); + + intel_dmc_wl_put(display, reg); + + return val; +} +#define intel_de_rmw(p,...) __intel_de_rmw(__to_intel_display(p), __VA_ARGS__) static inline int -intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg, - u32 mask, u32 value, unsigned int timeout) +____intel_de_wait_for_register_nowl(struct intel_display *display, + i915_reg_t reg, + u32 mask, u32 value, unsigned int timeout) { - return intel_wait_for_register(&i915->uncore, reg, mask, value, timeout); + return intel_wait_for_register(__to_uncore(display), reg, mask, + value, timeout); } +#define __intel_de_wait_for_register_nowl(p,...) ____intel_de_wait_for_register_nowl(__to_intel_display(p), __VA_ARGS__) static inline int -intel_de_wait_for_register_fw(struct drm_i915_private *i915, i915_reg_t reg, - u32 mask, u32 value, unsigned int timeout) +__intel_de_wait(struct intel_display *display, i915_reg_t reg, + u32 mask, u32 value, unsigned int timeout) { - return intel_wait_for_register_fw(&i915->uncore, reg, mask, value, timeout); + int ret; + + intel_dmc_wl_get(display, reg); + + ret = __intel_de_wait_for_register_nowl(display, reg, mask, value, + timeout); + + intel_dmc_wl_put(display, reg); + + return ret; } +#define intel_de_wait(p,...) __intel_de_wait(__to_intel_display(p), __VA_ARGS__) static inline int -__intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg, - u32 mask, u32 value, - unsigned int fast_timeout_us, - unsigned int slow_timeout_ms, u32 *out_value) +__intel_de_wait_fw(struct intel_display *display, i915_reg_t reg, + u32 mask, u32 value, unsigned int timeout) { - return __intel_wait_for_register(&i915->uncore, reg, mask, value, - fast_timeout_us, slow_timeout_ms, out_value); + int ret; + + intel_dmc_wl_get(display, reg); + + ret = intel_wait_for_register_fw(__to_uncore(display), reg, mask, + value, timeout); + + intel_dmc_wl_put(display, reg); + + return ret; } +#define intel_de_wait_fw(p,...) __intel_de_wait_fw(__to_intel_display(p), __VA_ARGS__) static inline int -intel_de_wait_for_set(struct drm_i915_private *i915, i915_reg_t reg, - u32 mask, unsigned int timeout) +__intel_de_wait_custom(struct intel_display *display, i915_reg_t reg, + u32 mask, u32 value, + unsigned int fast_timeout_us, + unsigned int slow_timeout_ms, u32 *out_value) { - return intel_de_wait_for_register(i915, reg, mask, mask, timeout); + int ret; + + intel_dmc_wl_get(display, reg); + + ret = __intel_wait_for_register(__to_uncore(display), reg, mask, + value, + fast_timeout_us, slow_timeout_ms, out_value); + + intel_dmc_wl_put(display, reg); + + return ret; } +#define intel_de_wait_custom(p,...) __intel_de_wait_custom(__to_intel_display(p), __VA_ARGS__) static inline int -intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg, +__intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg, u32 mask, unsigned int timeout) { - return intel_de_wait_for_register(i915, reg, mask, 0, timeout); + return intel_de_wait(display, reg, mask, mask, timeout); +} +#define intel_de_wait_for_set(p,...) __intel_de_wait_for_set(__to_intel_display(p), __VA_ARGS__) + +static inline int +__intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg, + u32 mask, unsigned int timeout) +{ + return intel_de_wait(display, reg, mask, 0, timeout); } +#define intel_de_wait_for_clear(p,...) __intel_de_wait_for_clear(__to_intel_display(p), __VA_ARGS__) /* * Unlocked mmio-accessors, think carefully before using these. @@ -94,33 +199,38 @@ intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg, * a more localised lock guarding all access to that bank of registers. */ static inline u32 -intel_de_read_fw(struct drm_i915_private *i915, i915_reg_t reg) +__intel_de_read_fw(struct intel_display *display, i915_reg_t reg) { u32 val; - val = intel_uncore_read_fw(&i915->uncore, reg); + val = intel_uncore_read_fw(__to_uncore(display), reg); trace_i915_reg_rw(false, reg, val, sizeof(val), true); return val; } +#define intel_de_read_fw(p,...) __intel_de_read_fw(__to_intel_display(p), __VA_ARGS__) static inline void -intel_de_write_fw(struct drm_i915_private *i915, i915_reg_t reg, u32 val) +__intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val) { trace_i915_reg_rw(true, reg, val, sizeof(val), true); - intel_uncore_write_fw(&i915->uncore, reg, val); + intel_uncore_write_fw(__to_uncore(display), reg, val); } +#define intel_de_write_fw(p,...) __intel_de_write_fw(__to_intel_display(p), __VA_ARGS__) static inline u32 -intel_de_read_notrace(struct drm_i915_private *i915, i915_reg_t reg) +__intel_de_read_notrace(struct intel_display *display, i915_reg_t reg) { - return intel_uncore_read_notrace(&i915->uncore, reg); + return intel_uncore_read_notrace(__to_uncore(display), reg); } +#define intel_de_read_notrace(p,...) __intel_de_read_notrace(__to_intel_display(p), __VA_ARGS__) static inline void -intel_de_write_notrace(struct drm_i915_private *i915, i915_reg_t reg, u32 val) +__intel_de_write_notrace(struct intel_display *display, i915_reg_t reg, + u32 val) { - intel_uncore_write_notrace(&i915->uncore, reg, val); + intel_uncore_write_notrace(__to_uncore(display), reg, val); } +#define intel_de_write_notrace(p,...) __intel_de_write_notrace(__to_intel_display(p), __VA_ARGS__) #endif /* __INTEL_DE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 8af9e61282..e53d3e900b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -85,7 +85,6 @@ #include "intel_dvo.h" #include "intel_fb.h" #include "intel_fbc.h" -#include "intel_fbdev.h" #include "intel_fdi.h" #include "intel_fifo_underrun.h" #include "intel_frontbuffer.h" @@ -120,6 +119,7 @@ #include "skl_scaler.h" #include "skl_universal_plane.h" #include "skl_watermark.h" +#include "vlv_dpio_phy_regs.h" #include "vlv_dsi.h" #include "vlv_dsi_pll.h" #include "vlv_dsi_regs.h" @@ -275,6 +275,13 @@ static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state) return hweight8(crtc_state->bigjoiner_pipes); } +u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + return BIT(crtc->pipe) | crtc_state->bigjoiner_pipes; +} + struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); @@ -383,8 +390,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, break; } - if (intel_de_wait_for_register(dev_priv, dpll_reg, - port_mask, expected_mask, 1000)) + if (intel_de_wait(dev_priv, dpll_reg, port_mask, expected_mask, 1000)) drm_WARN(&dev_priv->drm, 1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", dig_port->base.base.base.id, dig_port->base.base.name, @@ -430,6 +436,18 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe), 0, PIPE_ARB_USE_PROG_SLOTS); + if (DISPLAY_VER(dev_priv) >= 14) { + u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA; + u32 set = 0; + + if (DISPLAY_VER(dev_priv) == 14) + set |= DP_FEC_BS_JITTER_WA; + + intel_de_rmw(dev_priv, + hsw_chicken_trans_reg(dev_priv, cpu_transcoder), + clear, set); + } + val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); if (val & TRANSCONF_ENABLE) { /* we keep both pipes enabled on 830 */ @@ -437,6 +455,14 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) return; } + /* Wa_1409098942:adlp+ */ + if (DISPLAY_VER(dev_priv) >= 13 && + new_crtc_state->dsc.compression_enable) { + val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK; + val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK, + TRANSCONF_PIXEL_COUNT_SCALING_X4); + } + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val | TRANSCONF_ENABLE); intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); @@ -483,6 +509,11 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) if (!IS_I830(dev_priv)) val &= ~TRANSCONF_ENABLE; + /* Wa_1409098942:adlp+ */ + if (DISPLAY_VER(dev_priv) >= 13 && + old_crtc_state->dsc.compression_enable) + val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK; + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); if (DISPLAY_VER(dev_priv) >= 12) @@ -535,7 +566,7 @@ bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) struct drm_i915_private *dev_priv = to_i915(plane->base.dev); return DISPLAY_VER(dev_priv) < 4 || - (plane->fbc && + (plane->fbc && !plane_state->no_fbc_reason && plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL); } @@ -1552,18 +1583,21 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); } -static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, - enum pipe pipe, bool apply) +/* Display WA #1180: WaDisableScalarClockGating: glk */ +static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state) { - u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)); - u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - if (apply) - val |= mask; - else - val &= ~mask; + return DISPLAY_VER(i915) == 10 && crtc_state->pch_pfit.enabled; +} - intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val); +static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; + + intel_de_rmw(i915, CLKGATE_DIS_PSL(crtc->pipe), + mask, enable ? mask : 0); } static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) @@ -1586,24 +1620,6 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); } -static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, - const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *master_crtc = intel_master_crtc(crtc_state); - - /* - * Enable sequence steps 1-7 on bigjoiner master - */ - if (intel_crtc_is_bigjoiner_slave(crtc_state)) - intel_encoders_pre_pll_enable(state, master_crtc); - - if (crtc_state->shared_dpll) - intel_enable_shared_dpll(crtc_state); - - if (intel_crtc_is_bigjoiner_slave(crtc_state)) - intel_encoders_pre_enable(state, master_crtc); -} - static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -1639,90 +1655,107 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe, hsw_workaround_pipe; enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; - bool psl_clkgate_wa; + struct intel_crtc *pipe_crtc; if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; - intel_dmc_enable_pipe(dev_priv, crtc->pipe); + for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(new_crtc_state)) + intel_dmc_enable_pipe(dev_priv, pipe_crtc->pipe); - if (!new_crtc_state->bigjoiner_pipes) { - intel_encoders_pre_pll_enable(state, crtc); + intel_encoders_pre_pll_enable(state, crtc); - if (new_crtc_state->shared_dpll) - intel_enable_shared_dpll(new_crtc_state); + for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(new_crtc_state)) { + const struct intel_crtc_state *pipe_crtc_state = + intel_atomic_get_new_crtc_state(state, pipe_crtc); - intel_encoders_pre_enable(state, crtc); - } else { - icl_ddi_bigjoiner_pre_enable(state, new_crtc_state); + if (pipe_crtc_state->shared_dpll) + intel_enable_shared_dpll(pipe_crtc_state); } - intel_dsc_enable(new_crtc_state); + intel_encoders_pre_enable(state, crtc); - if (DISPLAY_VER(dev_priv) >= 13) - intel_uncompressed_joiner_enable(new_crtc_state); + for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(new_crtc_state)) { + const struct intel_crtc_state *pipe_crtc_state = + intel_atomic_get_new_crtc_state(state, pipe_crtc); - intel_set_pipe_src_size(new_crtc_state); - if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - bdw_set_pipe_misc(new_crtc_state); + intel_dsc_enable(pipe_crtc_state); + + if (DISPLAY_VER(dev_priv) >= 13) + intel_uncompressed_joiner_enable(pipe_crtc_state); + + intel_set_pipe_src_size(pipe_crtc_state); - if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) && - !transcoder_is_dsi(cpu_transcoder)) + if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) + bdw_set_pipe_misc(pipe_crtc_state); + } + + if (!transcoder_is_dsi(cpu_transcoder)) hsw_configure_cpu_transcoder(new_crtc_state); - crtc->active = true; + for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(new_crtc_state)) { + const struct intel_crtc_state *pipe_crtc_state = + intel_atomic_get_new_crtc_state(state, pipe_crtc); - /* Display WA #1180: WaDisableScalarClockGating: glk */ - psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 && - new_crtc_state->pch_pfit.enabled; - if (psl_clkgate_wa) - glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); + pipe_crtc->active = true; - if (DISPLAY_VER(dev_priv) >= 9) - skl_pfit_enable(new_crtc_state); - else - ilk_pfit_enable(new_crtc_state); + if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) + glk_pipe_scaler_clock_gating_wa(pipe_crtc, true); - /* - * On ILK+ LUT must be loaded before the pipe is running but with - * clocks enabled - */ - intel_color_load_luts(new_crtc_state); - intel_color_commit_noarm(new_crtc_state); - intel_color_commit_arm(new_crtc_state); - /* update DSPCNTR to configure gamma/csc for pipe bottom color */ - if (DISPLAY_VER(dev_priv) < 9) - intel_disable_primary_plane(new_crtc_state); + if (DISPLAY_VER(dev_priv) >= 9) + skl_pfit_enable(pipe_crtc_state); + else + ilk_pfit_enable(pipe_crtc_state); - hsw_set_linetime_wm(new_crtc_state); + /* + * On ILK+ LUT must be loaded before the pipe is running but with + * clocks enabled + */ + intel_color_load_luts(pipe_crtc_state); + intel_color_commit_noarm(pipe_crtc_state); + intel_color_commit_arm(pipe_crtc_state); + /* update DSPCNTR to configure gamma/csc for pipe bottom color */ + if (DISPLAY_VER(dev_priv) < 9) + intel_disable_primary_plane(pipe_crtc_state); - if (DISPLAY_VER(dev_priv) >= 11) - icl_set_pipe_chicken(new_crtc_state); + hsw_set_linetime_wm(pipe_crtc_state); - intel_initial_watermarks(state, crtc); + if (DISPLAY_VER(dev_priv) >= 11) + icl_set_pipe_chicken(pipe_crtc_state); - if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) - intel_crtc_vblank_on(new_crtc_state); + intel_initial_watermarks(state, pipe_crtc); + } intel_encoders_enable(state, crtc); - if (psl_clkgate_wa) { - intel_crtc_wait_for_next_vblank(crtc); - glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); - } + for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(new_crtc_state)) { + const struct intel_crtc_state *pipe_crtc_state = + intel_atomic_get_new_crtc_state(state, pipe_crtc); + enum pipe hsw_workaround_pipe; - /* If we change the relative order between pipe/planes enabling, we need - * to change the workaround. */ - hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; - if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { - struct intel_crtc *wa_crtc; + if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) { + intel_crtc_wait_for_next_vblank(pipe_crtc); + glk_pipe_scaler_clock_gating_wa(pipe_crtc, false); + } - wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe); + /* + * If we change the relative order between pipe/planes + * enabling, we need to change the workaround. + */ + hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe; + if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { + struct intel_crtc *wa_crtc = + intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe); - intel_crtc_wait_for_next_vblank(wa_crtc); - intel_crtc_wait_for_next_vblank(wa_crtc); + intel_crtc_wait_for_next_vblank(wa_crtc); + intel_crtc_wait_for_next_vblank(wa_crtc); + } } } @@ -1786,29 +1819,28 @@ static void hsw_crtc_disable(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_crtc *pipe_crtc; /* * FIXME collapse everything to one hook. * Need care with mst->ddi interactions. */ - if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { - intel_encoders_disable(state, crtc); - intel_encoders_post_disable(state, crtc); - } - - intel_disable_shared_dpll(old_crtc_state); + intel_encoders_disable(state, crtc); + intel_encoders_post_disable(state, crtc); - if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { - struct intel_crtc *slave_crtc; + for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(old_crtc_state)) { + const struct intel_crtc_state *old_pipe_crtc_state = + intel_atomic_get_old_crtc_state(state, pipe_crtc); - intel_encoders_post_pll_disable(state, crtc); + intel_disable_shared_dpll(old_pipe_crtc_state); + } - intel_dmc_disable_pipe(i915, crtc->pipe); + intel_encoders_post_pll_disable(state, crtc); - for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, - intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) - intel_dmc_disable_pipe(i915, slave_crtc->pipe); - } + for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(old_crtc_state)) + intel_dmc_disable_pipe(i915, pipe_crtc->pipe); } static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) @@ -1836,6 +1868,7 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0); } +/* Prefer intel_encoder_is_combo() */ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) { if (phy == PHY_NONE) @@ -1857,6 +1890,7 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) return false; } +/* Prefer intel_encoder_is_tc() */ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) { /* @@ -1877,6 +1911,7 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) return false; } +/* Prefer intel_encoder_is_snps() */ bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy) { /* @@ -1886,6 +1921,7 @@ bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy) return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E; } +/* Prefer intel_encoder_to_phy() */ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) { if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD) @@ -1903,6 +1939,7 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) return PHY_A + port - PORT_A; } +/* Prefer intel_encoder_to_tc() */ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) { if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) @@ -1914,6 +1951,41 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) return TC_PORT_1 + port - PORT_C; } +enum phy intel_encoder_to_phy(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + return intel_port_to_phy(i915, encoder->port); +} + +bool intel_encoder_is_combo(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + return intel_phy_is_combo(i915, intel_encoder_to_phy(encoder)); +} + +bool intel_encoder_is_snps(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + return intel_phy_is_snps(i915, intel_encoder_to_phy(encoder)); +} + +bool intel_encoder_is_tc(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + return intel_phy_is_tc(i915, intel_encoder_to_phy(encoder)); +} + +enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + return intel_port_to_tc(i915, encoder->port); +} + enum intel_display_power_domain intel_aux_power_domain(struct intel_digital_port *dig_port) { @@ -2381,7 +2453,7 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; - int clock_limit = i915->max_dotclk_freq; + int clock_limit = i915->display.cdclk.max_dotclk_freq; /* * Start with the adjusted_mode crtc timings, which @@ -2405,7 +2477,7 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) */ if (intel_crtc_supports_double_wide(crtc) && pipe_mode->crtc_clock > clock_limit) { - clock_limit = i915->max_dotclk_freq; + clock_limit = i915->display.cdclk.max_dotclk_freq; crtc_state->double_wide = true; } } @@ -2999,19 +3071,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, i9xx_get_pfit_config(pipe_config); + i9xx_dpll_get_hw_state(crtc, &pipe_config->dpll_hw_state); + if (DISPLAY_VER(dev_priv) >= 4) { - /* No way to read it out on pipes B and C */ - if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) - tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe]; - else - tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe)); + tmp = pipe_config->dpll_hw_state.i9xx.dpll_md; pipe_config->pixel_multiplier = ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; - pipe_config->dpll_hw_state.dpll_md = tmp; } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { - tmp = intel_de_read(dev_priv, DPLL(crtc->pipe)); + tmp = pipe_config->dpll_hw_state.i9xx.dpll; pipe_config->pixel_multiplier = ((tmp & SDVO_MULTIPLIER_MASK) >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; @@ -3021,26 +3090,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, * function. */ pipe_config->pixel_multiplier = 1; } - pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv, - DPLL(crtc->pipe)); - if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { - pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv, - FP0(crtc->pipe)); - pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv, - FP1(crtc->pipe)); - } else { - /* Mask out read-only status bits. */ - pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | - DPLL_PORTC_READY_MASK | - DPLL_PORTB_READY_MASK); - } if (IS_CHERRYVIEW(dev_priv)) - chv_crtc_clock_get(crtc, pipe_config); + chv_crtc_clock_get(pipe_config); else if (IS_VALLEYVIEW(dev_priv)) - vlv_crtc_clock_get(crtc, pipe_config); + vlv_crtc_clock_get(pipe_config); else - i9xx_crtc_clock_get(crtc, pipe_config); + i9xx_crtc_clock_get(pipe_config); /* * Normally the dotclock is filled in by the encoder .get_config() @@ -3666,8 +3722,8 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config, struct intel_display_power_domain_set *power_domain_set) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder; enum port port; u32 tmp; @@ -3693,11 +3749,11 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, break; /* XXX: this works for video mode only */ - tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); + tmp = intel_de_read(display, BXT_MIPI_PORT_CTRL(port)); if (!(tmp & DPI_ENABLE)) continue; - tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); + tmp = intel_de_read(display, MIPI_CTRL(display, port)); if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) continue; @@ -4714,8 +4770,6 @@ intel_modeset_pipe_config_late(struct intel_atomic_state *state, struct drm_connector *connector; int i; - intel_bigjoiner_adjust_pipe_src(crtc_state); - for_each_new_connector_in_state(&state->base, connector, conn_state, i) { struct intel_encoder *encoder = @@ -4782,42 +4836,92 @@ intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, a->content_type == b->content_type; } +static bool +intel_compare_dp_as_sdp(const struct drm_dp_as_sdp *a, + const struct drm_dp_as_sdp *b) +{ + return a->vtotal == b->vtotal && + a->target_rr == b->target_rr && + a->duration_incr_ms == b->duration_incr_ms && + a->duration_decr_ms == b->duration_decr_ms && + a->mode == b->mode; +} + static bool intel_compare_buffer(const u8 *a, const u8 *b, size_t len) { return memcmp(a, b, len) == 0; } +static void __printf(5, 6) +pipe_config_mismatch(struct drm_printer *p, bool fastset, + const struct intel_crtc *crtc, + const char *name, const char *format, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, format); + vaf.fmt = format; + vaf.va = &args; + + if (fastset) + drm_printf(p, "[CRTC:%d:%s] fastset requirement not met in %s %pV\n", + crtc->base.base.id, crtc->base.name, name, &vaf); + else + drm_printf(p, "[CRTC:%d:%s] mismatch in %s %pV\n", + crtc->base.base.id, crtc->base.name, name, &vaf); + + va_end(args); +} + static void -pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, - bool fastset, const char *name, +pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset, + const struct intel_crtc *crtc, + const char *name, const union hdmi_infoframe *a, const union hdmi_infoframe *b) { + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + const char *loglevel; + if (fastset) { if (!drm_debug_enabled(DRM_UT_KMS)) return; - drm_dbg_kms(&dev_priv->drm, - "fastset requirement not met in %s infoframe\n", name); - drm_dbg_kms(&dev_priv->drm, "expected:\n"); - hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); - drm_dbg_kms(&dev_priv->drm, "found:\n"); - hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); + loglevel = KERN_DEBUG; } else { - drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name); - drm_err(&dev_priv->drm, "expected:\n"); - hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); - drm_err(&dev_priv->drm, "found:\n"); - hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); + loglevel = KERN_ERR; } + + pipe_config_mismatch(p, fastset, crtc, name, "infoframe"); + + drm_printf(p, "expected:\n"); + hdmi_infoframe_log(loglevel, i915->drm.dev, a); + drm_printf(p, "found:\n"); + hdmi_infoframe_log(loglevel, i915->drm.dev, b); } static void -pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *i915, - bool fastset, const char *name, +pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset, + const struct intel_crtc *crtc, + const char *name, const struct drm_dp_vsc_sdp *a, const struct drm_dp_vsc_sdp *b) +{ + pipe_config_mismatch(p, fastset, crtc, name, "dp sdp"); + + drm_printf(p, "expected:\n"); + drm_dp_vsc_sdp_log(p, a); + drm_printf(p, "found:\n"); + drm_dp_vsc_sdp_log(p, b); +} + +static void +pipe_config_dp_as_sdp_mismatch(struct drm_i915_private *i915, + bool fastset, const char *name, + const struct drm_dp_as_sdp *a, + const struct drm_dp_as_sdp *b) { struct drm_printer p; @@ -4832,9 +4936,9 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *i915, } drm_printf(&p, "expected:\n"); - drm_dp_vsc_sdp_log(&p, a); + drm_dp_as_sdp_log(&p, a); drm_printf(&p, "found:\n"); - drm_dp_vsc_sdp_log(&p, b); + drm_dp_as_sdp_log(&p, b); } /* Returns the length up to and including the last differing byte */ @@ -4852,64 +4956,35 @@ memcmp_diff_len(const u8 *a, const u8 *b, size_t len) } static void -pipe_config_buffer_mismatch(bool fastset, const struct intel_crtc *crtc, +pipe_config_buffer_mismatch(struct drm_printer *p, bool fastset, + const struct intel_crtc *crtc, const char *name, const u8 *a, const u8 *b, size_t len) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const char *loglevel; if (fastset) { if (!drm_debug_enabled(DRM_UT_KMS)) return; - /* only dump up to the last difference */ - len = memcmp_diff_len(a, b, len); - - drm_dbg_kms(&dev_priv->drm, - "[CRTC:%d:%s] fastset requirement not met in %s buffer\n", - crtc->base.base.id, crtc->base.name, name); - print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE, - 16, 0, a, len, false); - print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE, - 16, 0, b, len, false); + loglevel = KERN_DEBUG; } else { - /* only dump up to the last difference */ - len = memcmp_diff_len(a, b, len); - - drm_err(&dev_priv->drm, "[CRTC:%d:%s] mismatch in %s buffer\n", - crtc->base.base.id, crtc->base.name, name); - print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE, - 16, 0, a, len, false); - print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE, - 16, 0, b, len, false); + loglevel = KERN_ERR; } -} -static void __printf(4, 5) -pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, - const char *name, const char *format, ...) -{ - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - struct va_format vaf; - va_list args; + pipe_config_mismatch(p, fastset, crtc, name, "buffer"); - va_start(args, format); - vaf.fmt = format; - vaf.va = &args; + /* only dump up to the last difference */ + len = memcmp_diff_len(a, b, len); - if (fastset) - drm_dbg_kms(&i915->drm, - "[CRTC:%d:%s] fastset requirement not met in %s %pV\n", - crtc->base.base.id, crtc->base.name, name, &vaf); - else - drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n", - crtc->base.base.id, crtc->base.name, name, &vaf); - - va_end(args); + print_hex_dump(loglevel, "expected: ", DUMP_PREFIX_NONE, + 16, 0, a, len, false); + print_hex_dump(loglevel, "found: ", DUMP_PREFIX_NONE, + 16, 0, b, len, false); } static void -pipe_config_pll_mismatch(bool fastset, +pipe_config_pll_mismatch(struct drm_printer *p, bool fastset, const struct intel_crtc *crtc, const char *name, const struct intel_dpll_hw_state *a, @@ -4917,25 +4992,12 @@ pipe_config_pll_mismatch(bool fastset, { struct drm_i915_private *i915 = to_i915(crtc->base.dev); - if (fastset) { - if (!drm_debug_enabled(DRM_UT_KMS)) - return; + pipe_config_mismatch(p, fastset, crtc, name, " "); /* stupid -Werror=format-zero-length */ - drm_dbg_kms(&i915->drm, - "[CRTC:%d:%s] fastset requirement not met in %s\n", - crtc->base.base.id, crtc->base.name, name); - drm_dbg_kms(&i915->drm, "expected:\n"); - intel_dpll_dump_hw_state(i915, a); - drm_dbg_kms(&i915->drm, "found:\n"); - intel_dpll_dump_hw_state(i915, b); - } else { - drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s buffer\n", - crtc->base.base.id, crtc->base.name, name); - drm_err(&i915->drm, "expected:\n"); - intel_dpll_dump_hw_state(i915, a); - drm_err(&i915->drm, "found:\n"); - intel_dpll_dump_hw_state(i915, b); - } + drm_printf(p, "expected:\n"); + intel_dpll_dump_hw_state(i915, p, a); + drm_printf(p, "found:\n"); + intel_dpll_dump_hw_state(i915, p, b); } bool @@ -4945,13 +5007,19 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, { struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); + struct drm_printer p; bool ret = true; + if (fastset) + p = drm_dbg_printer(&dev_priv->drm, DRM_UT_KMS, NULL); + else + p = drm_err_printer(&dev_priv->drm, NULL); + #define PIPE_CONF_CHECK_X(name) do { \ if (current_config->name != pipe_config->name) { \ BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ __stringify(name) " is bool"); \ - pipe_config_mismatch(fastset, crtc, __stringify(name), \ + pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ "(expected 0x%08x, found 0x%08x)", \ current_config->name, \ pipe_config->name); \ @@ -4963,7 +5031,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \ BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ __stringify(name) " is bool"); \ - pipe_config_mismatch(fastset, crtc, __stringify(name), \ + pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ "(expected 0x%08x, found 0x%08x)", \ current_config->name & (mask), \ pipe_config->name & (mask)); \ @@ -4975,7 +5043,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, if (current_config->name != pipe_config->name) { \ BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ __stringify(name) " is bool"); \ - pipe_config_mismatch(fastset, crtc, __stringify(name), \ + pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ "(expected %i, found %i)", \ current_config->name, \ pipe_config->name); \ @@ -4987,7 +5055,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, if (current_config->name != pipe_config->name) { \ BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \ __stringify(name) " is not bool"); \ - pipe_config_mismatch(fastset, crtc, __stringify(name), \ + pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ "(expected %s, found %s)", \ str_yes_no(current_config->name), \ str_yes_no(pipe_config->name)); \ @@ -4997,7 +5065,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_P(name) do { \ if (current_config->name != pipe_config->name) { \ - pipe_config_mismatch(fastset, crtc, __stringify(name), \ + pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ "(expected %p, found %p)", \ current_config->name, \ pipe_config->name); \ @@ -5008,7 +5076,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_M_N(name) do { \ if (!intel_compare_link_m_n(¤t_config->name, \ &pipe_config->name)) { \ - pipe_config_mismatch(fastset, crtc, __stringify(name), \ + pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ "(expected tu %i data %i/%i link %i/%i, " \ "found tu %i, data %i/%i link %i/%i)", \ current_config->name.tu, \ @@ -5028,7 +5096,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_PLL(name) do { \ if (!intel_dpll_compare_hw_state(dev_priv, ¤t_config->name, \ &pipe_config->name)) { \ - pipe_config_pll_mismatch(fastset, crtc, __stringify(name), \ + pipe_config_pll_mismatch(&p, fastset, crtc, __stringify(name), \ ¤t_config->name, \ &pipe_config->name); \ ret = false; \ @@ -5061,7 +5129,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ if ((current_config->name ^ pipe_config->name) & (mask)) { \ - pipe_config_mismatch(fastset, crtc, __stringify(name), \ + pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \ "(%x) (expected %i, found %i)", \ (mask), \ current_config->name & (mask), \ @@ -5073,7 +5141,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ if (!intel_compare_infoframe(¤t_config->infoframes.name, \ &pipe_config->infoframes.name)) { \ - pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ + pipe_config_infoframe_mismatch(&p, fastset, crtc, __stringify(name), \ ¤t_config->infoframes.name, \ &pipe_config->infoframes.name); \ ret = false; \ @@ -5083,7 +5151,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ if (!intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ &pipe_config->infoframes.name)) { \ - pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \ + pipe_config_dp_vsc_sdp_mismatch(&p, fastset, crtc, __stringify(name), \ + ¤t_config->infoframes.name, \ + &pipe_config->infoframes.name); \ + ret = false; \ + } \ +} while (0) + +#define PIPE_CONF_CHECK_DP_AS_SDP(name) do { \ + if (!intel_compare_dp_as_sdp(¤t_config->infoframes.name, \ + &pipe_config->infoframes.name)) { \ + pipe_config_dp_as_sdp_mismatch(dev_priv, fastset, __stringify(name), \ ¤t_config->infoframes.name, \ &pipe_config->infoframes.name); \ ret = false; \ @@ -5094,7 +5172,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, BUILD_BUG_ON(sizeof(current_config->name) != (len)); \ BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \ if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \ - pipe_config_buffer_mismatch(fastset, crtc, __stringify(name), \ + pipe_config_buffer_mismatch(&p, fastset, crtc, __stringify(name), \ current_config->name, \ pipe_config->name, \ (len)); \ @@ -5107,7 +5185,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, !intel_color_lut_equal(current_config, \ current_config->lut, pipe_config->lut, \ is_pre_csc_lut)) { \ - pipe_config_mismatch(fastset, crtc, __stringify(lut), \ + pipe_config_mismatch(&p, fastset, crtc, __stringify(lut), \ "hw_state doesn't match sw_state"); \ ret = false; \ } \ @@ -5236,6 +5314,20 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_CSC(output_csc); } + /* + * Panel replay has to be enabled before link training. PSR doesn't have + * this requirement -> check these only if using panel replay + */ + if (current_config->active_planes && + (current_config->has_panel_replay || + pipe_config->has_panel_replay)) { + PIPE_CONF_CHECK_BOOL(has_psr); + PIPE_CONF_CHECK_BOOL(has_sel_update); + PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch); + PIPE_CONF_CHECK_BOOL(enable_psr2_su_region_et); + PIPE_CONF_CHECK_BOOL(has_panel_replay); + } + PIPE_CONF_CHECK_BOOL(double_wide); if (dev_priv->display.dpll.mgr) @@ -5271,6 +5363,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_INFOFRAME(hdmi); PIPE_CONF_CHECK_INFOFRAME(drm); PIPE_CONF_CHECK_DP_VSC_SDP(vsc); + PIPE_CONF_CHECK_DP_AS_SDP(as_sdp); PIPE_CONF_CHECK_X(sync_mode_slaves_mask); PIPE_CONF_CHECK_I(master_transcoder); @@ -5322,6 +5415,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(vrr.flipline); PIPE_CONF_CHECK_I(vrr.pipeline_full); PIPE_CONF_CHECK_I(vrr.guardband); + PIPE_CONF_CHECK_I(vrr.vsync_start); + PIPE_CONF_CHECK_I(vrr.vsync_end); } #undef PIPE_CONF_CHECK_X @@ -5567,14 +5662,16 @@ static int intel_modeset_checks(struct intel_atomic_state *state) static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) { - struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); /* only allow LRR when the timings stay within the VRR range */ if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range) new_crtc_state->update_lrr = false; if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) - drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n"); + drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n", + crtc->base.base.id, crtc->base.name); else new_crtc_state->uapi.mode_changed = false; @@ -6228,27 +6325,37 @@ static int intel_atomic_check_config(struct intel_atomic_state *state, continue; } - if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) { - drm_WARN_ON(&i915->drm, new_crtc_state->uapi.enable); + if (drm_WARN_ON(&i915->drm, intel_crtc_is_bigjoiner_slave(new_crtc_state))) continue; - } ret = intel_crtc_prepare_cleared_state(state, crtc); if (ret) - break; + goto fail; if (!new_crtc_state->hw.enable) continue; ret = intel_modeset_pipe_config(state, crtc, limits); if (ret) - break; + goto fail; + } - ret = intel_atomic_check_bigjoiner(state, crtc); + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + if (!intel_crtc_needs_modeset(new_crtc_state)) + continue; + + if (drm_WARN_ON(&i915->drm, intel_crtc_is_bigjoiner_slave(new_crtc_state))) + continue; + + if (!new_crtc_state->hw.enable) + continue; + + ret = intel_modeset_pipe_config_late(state, crtc); if (ret) - break; + goto fail; } +fail: if (ret) *failed_pipe = crtc->pipe; @@ -6344,16 +6451,26 @@ int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + if (!intel_crtc_needs_modeset(new_crtc_state)) + continue; + + if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) { + drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable); + continue; + } + + ret = intel_atomic_check_bigjoiner(state, crtc); + if (ret) + goto fail; + } + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!intel_crtc_needs_modeset(new_crtc_state)) continue; - if (new_crtc_state->hw.enable) { - ret = intel_modeset_pipe_config_late(state, crtc); - if (ret) - goto fail; - } + intel_bigjoiner_adjust_pipe_src(new_crtc_state); intel_crtc_check_fastset(old_crtc_state, new_crtc_state); } @@ -6635,17 +6752,21 @@ static void intel_enable_crtc(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + struct intel_crtc *pipe_crtc; if (!intel_crtc_needs_modeset(new_crtc_state)) return; - /* VRR will be enable later, if required */ - intel_crtc_update_active_timings(new_crtc_state, false); + for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(new_crtc_state)) { + const struct intel_crtc_state *pipe_crtc_state = + intel_atomic_get_new_crtc_state(state, pipe_crtc); - dev_priv->display.funcs.display->crtc_enable(state, crtc); + /* VRR will be enable later, if required */ + intel_crtc_update_active_timings(pipe_crtc_state, false); + } - if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) - return; + dev_priv->display.funcs.display->crtc_enable(state, crtc); /* vblanks work again, re-enable pipe CRC. */ intel_crtc_enable_pipe_crc(crtc); @@ -6737,31 +6858,42 @@ static void intel_update_crtc(struct intel_atomic_state *state, } static void intel_old_crtc_state_disables(struct intel_atomic_state *state, - struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_crtc *pipe_crtc; /* * We need to disable pipe CRC before disabling the pipe, * or we race against vblank off. */ - intel_crtc_disable_pipe_crc(crtc); + for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(old_crtc_state)) + intel_crtc_disable_pipe_crc(pipe_crtc); dev_priv->display.funcs.display->crtc_disable(state, crtc); - crtc->active = false; - intel_fbc_disable(crtc); - if (!new_crtc_state->hw.active) - intel_initial_watermarks(state, crtc); + for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(old_crtc_state)) { + const struct intel_crtc_state *new_pipe_crtc_state = + intel_atomic_get_new_crtc_state(state, pipe_crtc); + + pipe_crtc->active = false; + intel_fbc_disable(pipe_crtc); + + if (!new_pipe_crtc_state->hw.active) + intel_initial_watermarks(state, pipe_crtc); + } } static void intel_commit_modeset_disables(struct intel_atomic_state *state) { - struct intel_crtc_state *new_crtc_state, *old_crtc_state; + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *new_crtc_state, *old_crtc_state; struct intel_crtc *crtc; - u32 handled = 0; + u8 disable_pipes = 0; int i; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, @@ -6769,21 +6901,31 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) if (!intel_crtc_needs_modeset(new_crtc_state)) continue; + /* + * Needs to be done even for pipes + * that weren't enabled previously. + */ intel_pre_plane_update(state, crtc); if (!old_crtc_state->hw.active) continue; + disable_pipes |= BIT(crtc->pipe); + } + + for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { + if ((disable_pipes & BIT(crtc->pipe)) == 0) + continue; + intel_crtc_disable_planes(state, crtc); } /* Only disable port sync and MST slaves */ - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - if (!intel_crtc_needs_modeset(new_crtc_state)) + for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { + if ((disable_pipes & BIT(crtc->pipe)) == 0) continue; - if (!old_crtc_state->hw.active) + if (intel_crtc_is_bigjoiner_slave(old_crtc_state)) continue; /* In case of Transcoder port Sync master slave CRTCs can be @@ -6792,28 +6934,28 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) * Slave vblanks are masked till Master Vblanks. */ if (!is_trans_port_sync_slave(old_crtc_state) && - !intel_dp_mst_is_slave_trans(old_crtc_state) && - !intel_crtc_is_bigjoiner_slave(old_crtc_state)) + !intel_dp_mst_is_slave_trans(old_crtc_state)) continue; - intel_old_crtc_state_disables(state, old_crtc_state, - new_crtc_state, crtc); - handled |= BIT(crtc->pipe); + intel_old_crtc_state_disables(state, crtc); + + disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state); } /* Disable everything else left on */ - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - if (!intel_crtc_needs_modeset(new_crtc_state) || - (handled & BIT(crtc->pipe))) + for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) { + if ((disable_pipes & BIT(crtc->pipe)) == 0) continue; - if (!old_crtc_state->hw.active) + if (intel_crtc_is_bigjoiner_slave(old_crtc_state)) continue; - intel_old_crtc_state_disables(state, old_crtc_state, - new_crtc_state, crtc); + intel_old_crtc_state_disables(state, crtc); + + disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state); } + + drm_WARN_ON(&i915->drm, disable_pipes); } static void intel_commit_modeset_enables(struct intel_atomic_state *state) @@ -6880,9 +7022,15 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) intel_pre_update_crtc(state, crtc); } + intel_dbuf_mbus_pre_ddb_update(state); + while (update_pipes) { - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { + /* + * Commit in reverse order to make bigjoiner master + * send the uapi events after slaves are done. + */ + for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state, + new_crtc_state, i) { enum pipe pipe = crtc->pipe; if ((update_pipes & BIT(pipe)) == 0) @@ -6910,6 +7058,8 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) } } + intel_dbuf_mbus_post_ddb_update(state); + update_pipes = modeset_pipes; /* @@ -6922,12 +7072,14 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) if ((modeset_pipes & BIT(pipe)) == 0) continue; + if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) + continue; + if (intel_dp_mst_is_slave_trans(new_crtc_state) || - is_trans_port_sync_master(new_crtc_state) || - intel_crtc_is_bigjoiner_master(new_crtc_state)) + is_trans_port_sync_master(new_crtc_state)) continue; - modeset_pipes &= ~BIT(pipe); + modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state); intel_enable_crtc(state, crtc); } @@ -6942,7 +7094,10 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) if ((modeset_pipes & BIT(pipe)) == 0) continue; - modeset_pipes &= ~BIT(pipe); + if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) + continue; + + modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state); intel_enable_crtc(state, crtc); } @@ -6959,7 +7114,11 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) intel_pre_update_crtc(state, crtc); } - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + /* + * Commit in reverse order to make bigjoiner master + * send the uapi events after slaves are done. + */ + for_each_new_intel_crtc_in_state_reverse(state, crtc, new_crtc_state, i) { enum pipe pipe = crtc->pipe; if ((update_pipes & BIT(pipe)) == 0) @@ -7156,7 +7315,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_encoders_update_prepare(state); intel_dbuf_pre_plane_update(state); - intel_mbus_dbox_update(state); for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->do_async_flip) @@ -7681,7 +7839,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv) static int max_dotclock(struct drm_i915_private *i915) { - int max_dotclock = i915->max_dotclk_freq; + int max_dotclock = i915->display.cdclk.max_dotclk_freq; /* icl+ might use bigjoiner */ if (DISPLAY_VER(i915) >= 11) diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index f4a0773f0f..56d1c0e3e6 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -280,6 +280,12 @@ enum phy_fia { base.head) \ for_each_if((pipe_mask) & BIT(intel_crtc->pipe)) +#define for_each_intel_crtc_in_pipe_mask_reverse(dev, intel_crtc, pipe_mask) \ + list_for_each_entry_reverse((intel_crtc), \ + &(dev)->mode_config.crtc_list, \ + base.head) \ + for_each_if((pipe_mask) & BIT((intel_crtc)->pipe)) + #define for_each_intel_encoder(dev, intel_encoder) \ list_for_each_entry(intel_encoder, \ &(dev)->mode_config.encoder_list, \ @@ -344,6 +350,14 @@ enum phy_fia { (__i)++) \ for_each_if(crtc) +#define for_each_new_intel_crtc_in_state_reverse(__state, crtc, new_crtc_state, __i) \ + for ((__i) = (__state)->base.dev->mode_config.num_crtc - 1; \ + (__i) >= 0 && \ + ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \ + (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \ + (__i)--) \ + for_each_if(crtc) + #define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->base.dev->mode_config.num_total_plane && \ @@ -408,6 +422,7 @@ intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915, enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port); bool is_trans_port_sync_mode(const struct intel_crtc_state *state); bool is_trans_port_sync_master(const struct intel_crtc_state *state); +u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state); bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state); bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state); u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state); @@ -448,6 +463,13 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy); bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy); enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port); + +enum phy intel_encoder_to_phy(struct intel_encoder *encoder); +bool intel_encoder_is_combo(struct intel_encoder *encoder); +bool intel_encoder_is_snps(struct intel_encoder *encoder); +bool intel_encoder_is_tc(struct intel_encoder *encoder); +enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder); + int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.h b/drivers/gpu/drm/i915/display/intel_display_conversion.h new file mode 100644 index 0000000000..ad8545c805 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_conversion.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2024 Intel Corporation */ + +/* + * This header is for transitional struct intel_display conversion helpers only. + */ + +#ifndef __INTEL_DISPLAY_CONVERSION__ +#define __INTEL_DISPLAY_CONVERSION__ + +/* + * Transitional macro to optionally convert struct drm_i915_private * to struct + * intel_display *, also accepting the latter. + */ +#define __to_intel_display(p) \ + _Generic(p, \ + const struct drm_i915_private *: (&((const struct drm_i915_private *)(p))->display), \ + struct drm_i915_private *: (&((struct drm_i915_private *)(p))->display), \ + const struct intel_display *: (p), \ + struct intel_display *: (p)) + +#endif /* __INTEL_DISPLAY_CONVERSION__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index 2167dbee5e..7715fc3290 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -26,6 +26,7 @@ #include "intel_global_state.h" #include "intel_gmbus.h" #include "intel_opregion.h" +#include "intel_dmc_wl.h" #include "intel_wm_types.h" struct task_struct; @@ -282,6 +283,9 @@ struct intel_wm { }; struct intel_display { + /* drm device backpointer */ + struct drm_device *drm; + /* Display functions */ struct { /* Top level crtc-ish functions */ @@ -345,6 +349,8 @@ struct intel_display { struct intel_global_obj obj; unsigned int max_cdclk_freq; + unsigned int max_dotclk_freq; + unsigned int skl_preferred_vco_freq; } cdclk; struct { @@ -445,6 +451,16 @@ struct intel_display { bool false_color; } ips; + struct { + bool display_irqs_enabled; + + /* For i915gm/i945gm vblank irq workaround */ + u8 vblank_enabled; + + u32 de_irq_mask[I915_MAX_PIPES]; + u32 pipestat_irq_mask[I915_MAX_PIPES]; + } irq; + struct { wait_queue_head_t waitqueue; @@ -534,6 +550,7 @@ struct intel_display { struct intel_overlay *overlay; struct intel_display_params params; struct intel_vbt_data vbt; + struct intel_dmc_wl wl; struct intel_wm wm; }; diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index b99c024b09..35f9f86ef7 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -31,6 +31,7 @@ #include "intel_hdmi.h" #include "intel_hotplug.h" #include "intel_panel.h" +#include "intel_pps.h" #include "intel_psr.h" #include "intel_psr_regs.h" #include "intel_wm.h" @@ -191,7 +192,7 @@ static void intel_hdcp_info(struct seq_file *m, struct intel_connector *intel_connector, bool remote_req) { - bool hdcp_cap, hdcp2_cap; + bool hdcp_cap = false, hdcp2_cap = false; if (!intel_connector->hdcp.shim) { seq_puts(m, "No Connector Support"); @@ -252,9 +253,6 @@ static void intel_connector_info(struct seq_file *m, struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); - const struct drm_connector_state *conn_state = connector->state; - struct intel_encoder *encoder = - to_intel_encoder(conn_state->best_encoder); const struct drm_display_mode *mode; seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n", @@ -271,28 +269,23 @@ static void intel_connector_info(struct seq_file *m, drm_get_subpixel_order_name(connector->display_info.subpixel_order)); seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); - if (!encoder) - return; - switch (connector->connector_type) { case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_eDP: - if (encoder->type == INTEL_OUTPUT_DP_MST) + if (intel_connector->mst_port) intel_dp_mst_info(m, intel_connector); else intel_dp_info(m, intel_connector); break; case DRM_MODE_CONNECTOR_HDMIA: - if (encoder->type == INTEL_OUTPUT_HDMI || - encoder->type == INTEL_OUTPUT_DDI) - intel_hdmi_info(m, intel_connector); + intel_hdmi_info(m, intel_connector); break; default: break; } seq_puts(m, "\tHDCP version: "); - if (intel_encoder_is_mst(encoder)) { + if (intel_connector->mst_port) { intel_hdcp_info(m, intel_connector, true); seq_puts(m, "\tMST Hub HDCP version: "); } @@ -645,51 +638,24 @@ static int i915_display_capabilities(struct seq_file *m, void *unused) static int i915_shared_dplls_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_printer p = drm_seq_file_printer(m); struct intel_shared_dpll *pll; int i; drm_modeset_lock_all(&dev_priv->drm); - seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n", + drm_printf(&p, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n", dev_priv->display.dpll.ref_clks.nssc, dev_priv->display.dpll.ref_clks.ssc); for_each_shared_dpll(dev_priv, pll, i) { - seq_printf(m, "DPLL%i: %s, id: %i\n", pll->index, + drm_printf(&p, "DPLL%i: %s, id: %i\n", pll->index, pll->info->name, pll->info->id); - seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n", + drm_printf(&p, " pipe_mask: 0x%x, active: 0x%x, on: %s\n", pll->state.pipe_mask, pll->active_mask, str_yes_no(pll->on)); - seq_printf(m, " tracked hardware state:\n"); - seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); - seq_printf(m, " dpll_md: 0x%08x\n", - pll->state.hw_state.dpll_md); - seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); - seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); - seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); - seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0); - seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1); - seq_printf(m, " div0: 0x%08x\n", pll->state.hw_state.div0); - seq_printf(m, " mg_refclkin_ctl: 0x%08x\n", - pll->state.hw_state.mg_refclkin_ctl); - seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n", - pll->state.hw_state.mg_clktop2_coreclkctl1); - seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n", - pll->state.hw_state.mg_clktop2_hsclkctl); - seq_printf(m, " mg_pll_div0: 0x%08x\n", - pll->state.hw_state.mg_pll_div0); - seq_printf(m, " mg_pll_div1: 0x%08x\n", - pll->state.hw_state.mg_pll_div1); - seq_printf(m, " mg_pll_lf: 0x%08x\n", - pll->state.hw_state.mg_pll_lf); - seq_printf(m, " mg_pll_frac_lock: 0x%08x\n", - pll->state.hw_state.mg_pll_frac_lock); - seq_printf(m, " mg_pll_ssc: 0x%08x\n", - pll->state.hw_state.mg_pll_ssc); - seq_printf(m, " mg_pll_bias: 0x%08x\n", - pll->state.hw_state.mg_pll_bias); - seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n", - pll->state.hw_state.mg_pll_tdc_coldst_bias); + drm_printf(&p, " tracked hardware state:\n"); + intel_dpll_dump_hw_state(dev_priv, &p, &pll->state.hw_state); } drm_modeset_unlock_all(&dev_priv->drm); @@ -1103,27 +1069,6 @@ void intel_display_debugfs_register(struct drm_i915_private *i915) intel_display_debugfs_params(i915); } -static int i915_panel_show(struct seq_file *m, void *data) -{ - struct intel_connector *connector = m->private; - struct intel_dp *intel_dp = intel_attached_dp(connector); - - if (connector->base.status != connector_status_connected) - return -ENODEV; - - seq_printf(m, "Panel power up delay: %d\n", - intel_dp->pps.panel_power_up_delay); - seq_printf(m, "Panel power down delay: %d\n", - intel_dp->pps.panel_power_down_delay); - seq_printf(m, "Backlight on delay: %d\n", - intel_dp->pps.backlight_on_delay); - seq_printf(m, "Backlight off delay: %d\n", - intel_dp->pps.backlight_off_delay); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(i915_panel); - static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) { struct intel_connector *connector = m->private; @@ -1402,20 +1347,6 @@ out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); return ret; } -static int i915_bigjoiner_enable_show(struct seq_file *m, void *data) -{ - struct intel_connector *connector = m->private; - struct drm_crtc *crtc; - - crtc = connector->base.state->crtc; - if (connector->base.status != connector_status_connected || !crtc) - return -ENODEV; - - seq_printf(m, "Bigjoiner enable: %d\n", connector->force_bigjoiner_enable); - - return 0; -} - static ssize_t i915_dsc_output_format_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) @@ -1437,30 +1368,6 @@ static ssize_t i915_dsc_output_format_write(struct file *file, return len; } -static ssize_t i915_bigjoiner_enable_write(struct file *file, - const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct intel_connector *connector = m->private; - struct drm_crtc *crtc; - bool bigjoiner_en = 0; - int ret; - - crtc = connector->base.state->crtc; - if (connector->base.status != connector_status_connected || !crtc) - return -ENODEV; - - ret = kstrtobool_from_user(ubuf, len, &bigjoiner_en); - if (ret < 0) - return ret; - - connector->force_bigjoiner_enable = bigjoiner_en; - *offp += len; - - return len; -} - static int i915_dsc_output_format_open(struct inode *inode, struct file *file) { @@ -1554,8 +1461,6 @@ static const struct file_operations i915_dsc_fractional_bpp_fops = { .write = i915_dsc_fractional_bpp_write }; -DEFINE_SHOW_STORE_ATTRIBUTE(i915_bigjoiner_enable); - /* * Returns the Current CRTC's bpc. * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc @@ -1608,12 +1513,9 @@ void intel_connector_debugfs_add(struct intel_connector *connector) return; intel_drrs_connector_debugfs_add(connector); + intel_pps_connector_debugfs_add(connector); intel_psr_connector_debugfs_add(connector); - if (connector_type == DRM_MODE_CONNECTOR_eDP) - debugfs_create_file("i915_panel_timings", 0444, root, - connector, &i915_panel_fops); - if (connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector_type == DRM_MODE_CONNECTOR_HDMIA || connector_type == DRM_MODE_CONNECTOR_HDMIB) { @@ -1640,8 +1542,8 @@ void intel_connector_debugfs_add(struct intel_connector *connector) if (DISPLAY_VER(i915) >= 11 && (connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector_type == DRM_MODE_CONNECTOR_eDP)) { - debugfs_create_file("i915_bigjoiner_force_enable", 0644, root, - connector, &i915_bigjoiner_enable_fops); + debugfs_create_bool("i915_bigjoiner_force_enable", 0644, root, + &connector->force_bigjoiner_enable); } if (connector_type == DRM_MODE_CONNECTOR_DSI || diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c index c02d79b500..120e209ee7 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.c +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -17,6 +17,9 @@ #include "intel_display_reg_defs.h" #include "intel_fbc.h" +__diag_push(); +__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for display info"); + static const struct intel_display_device_info no_display = {}; #define PIPE_A_OFFSET 0x70000 @@ -768,6 +771,8 @@ static const struct intel_display_device_info xe2_lpd_display = { BIT(INTEL_FBC_C) | BIT(INTEL_FBC_D), }; +__diag_pop(); + /* * Separate detection for no display cases to keep the display id array simple. * @@ -922,6 +927,9 @@ void intel_display_device_probe(struct drm_i915_private *i915) const struct intel_display_device_info *info; u16 ver, rel, step; + /* Add drm device backpointer as early as possible. */ + i915->display.drm = &i915->drm; + if (HAS_GMD_ID(i915)) info = probe_gmdid_display(i915, &ver, &rel, &step); else diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index 9b1bce2624..17ddf82f0b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -8,6 +8,7 @@ #include +#include "intel_display_conversion.h" #include "intel_display_limits.h" struct drm_i915_private; @@ -69,6 +70,7 @@ struct drm_printer; #define HAS_TRANSCODER(i915, trans) ((DISPLAY_RUNTIME_INFO(i915)->cpu_transcoder_mask & \ BIT(trans)) != 0) #define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11) +#define HAS_AS_SDP(i915) (DISPLAY_VER(i915) >= 13) #define INTEL_NUM_PIPES(i915) (hweight8(DISPLAY_RUNTIME_INFO(i915)->pipe_mask)) #define I915_HAS_HOTPLUG(i915) (DISPLAY_INFO(i915)->has_hotplug) #define OVERLAY_NEEDS_PHYSICAL(i915) (DISPLAY_INFO(i915)->overlay_needs_physical) @@ -99,8 +101,8 @@ struct drm_printer; (IS_DISPLAY_IP_RANGE((__i915), (ipver), (ipver)) && \ IS_DISPLAY_STEP((__i915), (from), (until))) -#define DISPLAY_INFO(i915) ((i915)->display.info.__device_info) -#define DISPLAY_RUNTIME_INFO(i915) (&(i915)->display.info.__runtime_info) +#define DISPLAY_INFO(i915) (__to_intel_display(i915)->info.__device_info) +#define DISPLAY_RUNTIME_INFO(i915) (&__to_intel_display(i915)->info.__runtime_info) #define DISPLAY_VER(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver) #define DISPLAY_VER_FULL(i915) IP_VER(DISPLAY_RUNTIME_INFO(i915)->ip.ver, \ diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 6da5e85abe..794b4af380 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -98,7 +99,6 @@ void intel_display_driver_init_hw(struct drm_i915_private *i915) static const struct drm_mode_config_funcs intel_mode_funcs = { .fb_create = intel_user_framebuffer_create, .get_format_info = intel_fb_get_format_info, - .output_poll_changed = intel_fbdev_output_poll_changed, .mode_valid = intel_mode_valid, .atomic_check = intel_atomic_check, .atomic_commit = intel_atomic_commit, @@ -198,11 +198,13 @@ void intel_display_driver_early_probe(struct drm_i915_private *i915) intel_dpll_init_clock_hook(i915); intel_init_display_hooks(i915); intel_fdi_init_hook(i915); + intel_dmc_wl_init(&i915->display); } /* part #1: call before irq install */ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; int ret; if (i915_inject_probe_failure(i915)) @@ -261,7 +263,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) if (ret) goto cleanup_vga_client_pw_domain_dmc; - intel_init_quirks(i915); + intel_init_quirks(display); intel_fbc_init(i915); @@ -514,10 +516,6 @@ int intel_display_driver_probe(struct drm_i915_private *i915) intel_overlay_setup(i915); - ret = intel_fbdev_init(&i915->drm); - if (ret) - return ret; - /* Only enable hotplug handling once the fbdev is fully set up. */ intel_hpd_init(i915); @@ -546,16 +544,6 @@ void intel_display_driver_register(struct drm_i915_private *i915) intel_display_debugfs_register(i915); - /* - * Some ports require correctly set-up hpd registers for - * detection to work properly (leading to ghost connected - * connector status), e.g. VGA on gm45. Hence we can only set - * up the initial fbdev config after hpd irqs are fully - * enabled. We do it last so that the async config cannot run - * before the connectors are registered. - */ - intel_fbdev_initial_config_async(i915); - /* * We need to coordinate the hotplugs with the asynchronous * fbdev configuration, for which we use the @@ -564,6 +552,8 @@ void intel_display_driver_register(struct drm_i915_private *i915) drm_kms_helper_poll_init(&i915->drm); intel_hpd_poll_disable(i915); + intel_fbdev_setup(i915); + intel_display_device_info_print(DISPLAY_INFO(i915), DISPLAY_RUNTIME_INFO(i915), &p); } @@ -599,9 +589,6 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915) */ intel_hpd_poll_fini(i915); - /* poll work can call into fbdev, hence clean that up afterwards */ - intel_fbdev_fini(i915); - intel_unregister_dsm_handler(); /* flush any delayed tasks or pending work */ @@ -640,7 +627,8 @@ void intel_display_driver_unregister(struct drm_i915_private *i915) if (!HAS_DISPLAY(i915)) return; - intel_fbdev_unregister(i915); + drm_client_dev_unregister(&i915->drm); + /* * After flushing the fbdev (incl. a late async config which * will have delayed queuing of a hotplug event), then flush diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index f846c5b108..c337e05975 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -117,13 +117,14 @@ static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) return; - new_val = dev_priv->de_irq_mask[pipe]; + new_val = dev_priv->display.irq.de_irq_mask[pipe]; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); - if (new_val != dev_priv->de_irq_mask[pipe]) { - dev_priv->de_irq_mask[pipe] = new_val; - intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); + if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) { + dev_priv->display.irq.de_irq_mask[pipe] = new_val; + intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), + dev_priv->display.irq.de_irq_mask[pipe]); intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)); } } @@ -179,7 +180,7 @@ void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits) u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, enum pipe pipe) { - u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; + u32 status_mask = dev_priv->display.irq.pipestat_irq_mask[pipe]; u32 enable_mask = status_mask << 16; lockdep_assert_held(&dev_priv->irq_lock); @@ -233,10 +234,10 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv, lockdep_assert_held(&dev_priv->irq_lock); drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); - if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) + if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask) return; - dev_priv->pipestat_irq_mask[pipe] |= status_mask; + dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask; enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); @@ -256,10 +257,10 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv, lockdep_assert_held(&dev_priv->irq_lock); drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); - if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) + if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0) return; - dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; + dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask; enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); @@ -401,7 +402,7 @@ void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS); - dev_priv->pipestat_irq_mask[pipe] = 0; + dev_priv->display.irq.pipestat_irq_mask[pipe] = 0; } } @@ -412,7 +413,7 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, spin_lock(&dev_priv->irq_lock); - if (!dev_priv->display_irqs_enabled) { + if (!dev_priv->display.irq.display_irqs_enabled) { spin_unlock(&dev_priv->irq_lock); return; } @@ -445,7 +446,7 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, break; } if (iir & iir_bit) - status_mask |= dev_priv->pipestat_irq_mask[pipe]; + status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe]; if (!status_mask) continue; @@ -1203,7 +1204,7 @@ int i8xx_enable_vblank(struct drm_crtc *crtc) int i915gm_enable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); + struct drm_i915_private *i915 = to_i915(crtc->dev); /* * Vblank interrupts fail to wake the device up from C2+. @@ -1211,8 +1212,8 @@ int i915gm_enable_vblank(struct drm_crtc *crtc) * the problem. There is a small power cost so we do this * only when vblank interrupts are actually enabled. */ - if (dev_priv->vblank_enabled++ == 0) - intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); + if (i915->display.irq.vblank_enabled++ == 0) + intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); return i8xx_enable_vblank(crtc); } @@ -1315,12 +1316,12 @@ void i8xx_disable_vblank(struct drm_crtc *crtc) void i915gm_disable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); + struct drm_i915_private *i915 = to_i915(crtc->dev); i8xx_disable_vblank(crtc); - if (--dev_priv->vblank_enabled == 0) - intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); + if (--i915->display.irq.vblank_enabled == 0) + intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); } void i965_disable_vblank(struct drm_crtc *crtc) @@ -1497,8 +1498,8 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, for_each_pipe_masked(dev_priv, pipe, pipe_mask) GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, - dev_priv->de_irq_mask[pipe], - ~dev_priv->de_irq_mask[pipe] | extra_ier); + dev_priv->display.irq.de_irq_mask[pipe], + ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier); spin_unlock_irq(&dev_priv->irq_lock); } @@ -1558,10 +1559,10 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) { lockdep_assert_held(&dev_priv->irq_lock); - if (dev_priv->display_irqs_enabled) + if (dev_priv->display.irq.display_irqs_enabled) return; - dev_priv->display_irqs_enabled = true; + dev_priv->display.irq.display_irqs_enabled = true; if (intel_irqs_enabled(dev_priv)) { vlv_display_irq_reset(dev_priv); @@ -1573,10 +1574,10 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) { lockdep_assert_held(&dev_priv->irq_lock); - if (!dev_priv->display_irqs_enabled) + if (!dev_priv->display.irq.display_irqs_enabled) return; - dev_priv->display_irqs_enabled = false; + dev_priv->display.irq.display_irqs_enabled = false; if (intel_irqs_enabled(dev_priv)) vlv_display_irq_reset(dev_priv); @@ -1694,12 +1695,12 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) } for_each_pipe(dev_priv, pipe) { - dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; + dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked; if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, - dev_priv->de_irq_mask[pipe], + dev_priv->display.irq.de_irq_mask[pipe], de_pipe_enables); } @@ -1770,9 +1771,9 @@ void intel_display_irq_init(struct drm_i915_private *i915) * domain. We defer setting up the display irqs in this case to the * runtime pm. */ - i915->display_irqs_enabled = true; + i915->display.irq.display_irqs_enabled = true; if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) - i915->display_irqs_enabled = false; + i915->display.irq.display_irqs_enabled = false; intel_hotplug_irq_init(i915); } diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c index 11e03cfb77..1799a66431 100644 --- a/drivers/gpu/drm/i915/display/intel_display_params.c +++ b/drivers/gpu/drm/i915/display/intel_display_params.c @@ -27,6 +27,10 @@ static struct intel_display_params intel_display_modparams __read_mostly = { * debugfs mode to 0. */ +intel_display_param_named_unsafe(dmc_firmware_path, charp, 0400, + "DMC firmware path to use instead of the default one. " + "Use /dev/null to disable DMC and runtime PM."); + intel_display_param_named_unsafe(vbt_firmware, charp, 0400, "Load VBT from specified file under /lib/firmware"); @@ -116,6 +120,11 @@ intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400, "(0=disabled, 1=enabled) " "Default: 1"); +intel_display_param_named_unsafe(enable_dmc_wl, bool, 0400, + "Enable DMC wakelock " + "(0=disabled, 1=enabled) " + "Default: 0"); + __maybe_unused static void _param_print_bool(struct drm_printer *p, const char *driver_name, const char *name, bool val) diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h index 6206cc51df..1208a62c16 100644 --- a/drivers/gpu/drm/i915/display/intel_display_params.h +++ b/drivers/gpu/drm/i915/display/intel_display_params.h @@ -24,6 +24,7 @@ struct drm_i915_private; * debugfs file */ #define INTEL_DISPLAY_PARAMS_FOR_EACH(param) \ + param(char *, dmc_firmware_path, NULL, 0400) \ param(char *, vbt_firmware, NULL, 0400) \ param(int, lvds_channel_mode, 0, 0400) \ param(int, panel_use_ssc, -1, 0600) \ @@ -46,6 +47,7 @@ struct drm_i915_private; param(int, enable_psr, -1, 0600) \ param(bool, psr_safest_params, false, 0400) \ param(bool, enable_psr2_sel_fetch, true, 0400) \ + param(bool, enable_dmc_wl, false, 0400) \ #define MEMBER(T, member, ...) T member; struct intel_display_params { diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 6fd4fa5225..03dc7edcc4 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -640,13 +640,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, enum intel_display_power_domain domain; intel_wakeref_t wakeref; - /* - * The caller must hold already raw wakeref, upgrade that to a proper - * wakeref to make the state checker happy about the HW access during - * power well disabling. - */ - assert_rpm_raw_wakeref_held(rpm); - wakeref = intel_runtime_pm_get(rpm); + wakeref = intel_runtime_pm_get_noresume(rpm); for_each_power_domain(domain, mask) { /* Clear before put, so put's sanity check is happy. */ diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 06900ff307..83f616097a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -17,6 +17,7 @@ #include "intel_dkl_phy.h" #include "intel_dkl_phy_regs.h" #include "intel_dmc.h" +#include "intel_dmc_wl.h" #include "intel_dp_aux_regs.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" @@ -26,6 +27,7 @@ #include "intel_tc.h" #include "intel_vga.h" #include "skl_watermark.h" +#include "vlv_dpio_phy_regs.h" #include "vlv_sideband.h" #include "vlv_sideband_reg.h" @@ -199,6 +201,9 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); } +#define ICL_AUX_PW_TO_PHY(pw_idx) \ + ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + PHY_A) + #define ICL_AUX_PW_TO_CH(pw_idx) \ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) @@ -217,27 +222,22 @@ static struct intel_digital_port * aux_ch_to_digital_port(struct drm_i915_private *dev_priv, enum aux_ch aux_ch) { - struct intel_digital_port *dig_port = NULL; struct intel_encoder *encoder; for_each_intel_encoder(&dev_priv->drm, encoder) { + struct intel_digital_port *dig_port; + /* We'll check the MST primary port */ if (encoder->type == INTEL_OUTPUT_DP_MST) continue; dig_port = enc_to_dig_port(encoder); - if (!dig_port) - continue; - - if (dig_port->aux_ch != aux_ch) { - dig_port = NULL; - continue; - } - break; + if (dig_port && dig_port->aux_ch == aux_ch) + return dig_port; } - return dig_port; + return NULL; } static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, @@ -253,7 +253,7 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, * as HDMI-only and routed to a combo PHY, the encoder either won't be * present at all or it will not have an aux_ch assigned. */ - return dig_port ? intel_port_to_phy(i915, dig_port->base.port) : PHY_NONE; + return dig_port ? intel_encoder_to_phy(&dig_port->base) : PHY_NONE; } static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, @@ -396,17 +396,11 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv, hsw_wait_for_power_well_disable(dev_priv, power_well); } -static bool intel_port_is_edp(struct drm_i915_private *i915, enum port port) +static bool intel_aux_ch_is_edp(struct drm_i915_private *i915, enum aux_ch aux_ch) { - struct intel_encoder *encoder; - - for_each_intel_encoder(&i915->drm, encoder) { - if (encoder->type == INTEL_OUTPUT_EDP && - encoder->port == port) - return true; - } + struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); - return false; + return dig_port && dig_port->base.type == INTEL_OUTPUT_EDP; } static void @@ -415,24 +409,25 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; - enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); - /* FIXME this is a mess */ - if (phy != PHY_NONE) - intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), - 0, ICL_LANE_ENABLE_AUX); + /* + * FIXME not sure if we should derive the PHY from the pw_idx, or + * from the VBT defined AUX_CH->DDI->PHY mapping. + */ + intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)), + 0, ICL_LANE_ENABLE_AUX); hsw_wait_for_power_well_enable(dev_priv, power_well, false); /* Display WA #1178: icl */ if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && - !intel_port_is_edp(dev_priv, (enum port)phy)) - intel_de_rmw(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), - 0, ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS); + !intel_aux_ch_is_edp(dev_priv, ICL_AUX_PW_TO_CH(pw_idx))) + intel_de_rmw(dev_priv, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)), + 0, O_FUNC_OVRD_EN | O_LDO_BYPASS_CRI); } static void @@ -441,14 +436,15 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; - enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); - /* FIXME this is a mess */ - if (phy != PHY_NONE) - intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), - ICL_LANE_ENABLE_AUX, 0); + /* + * FIXME not sure if we should derive the PHY from the pw_idx, or + * from the VBT defined AUX_CH->DDI->PHY mapping. + */ + intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)), + ICL_LANE_ENABLE_AUX, 0); intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); @@ -827,6 +823,8 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv) intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, SKL_SELECT_ALTERNATE_DC_EXIT); + intel_dmc_wl_enable(&dev_priv->display); + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); } @@ -856,6 +854,8 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv) intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, SKL_SELECT_ALTERNATE_DC_EXIT); + intel_dmc_wl_enable(&dev_priv->display); + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); } @@ -906,39 +906,39 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - bxt_ddi_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + bxt_dpio_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy); } static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - bxt_ddi_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + bxt_dpio_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy); } static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - return bxt_ddi_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + return bxt_dpio_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy); } -static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) +static void bxt_verify_dpio_phy_power_wells(struct drm_i915_private *dev_priv) { struct i915_power_well *power_well; power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); if (intel_power_well_refcount(power_well) > 0) - bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); if (intel_power_well_refcount(power_well) > 0) - bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); if (IS_GEMINILAKE(dev_priv)) { power_well = lookup_power_well(dev_priv, GLK_DISP_PW_DPIO_CMN_C); if (intel_power_well_refcount(power_well) > 0) - bxt_ddi_phy_verify_state(dev_priv, - i915_power_well_instance(power_well)->bxt.phy); + bxt_dpio_phy_verify_state(dev_priv, + i915_power_well_instance(power_well)->bxt.phy); } } @@ -976,16 +976,18 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; + intel_dmc_wl_disable(&dev_priv->display); + intel_cdclk_get_cdclk(dev_priv, &cdclk_config); /* Can't read out voltage_level so can't use intel_cdclk_changed() */ drm_WARN_ON(&dev_priv->drm, - intel_cdclk_needs_modeset(&dev_priv->display.cdclk.hw, + intel_cdclk_clock_changed(&dev_priv->display.cdclk.hw, &cdclk_config)); gen9_assert_dbuf_enabled(dev_priv); if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - bxt_verify_ddi_phy_power_wells(dev_priv); + bxt_verify_dpio_phy_power_wells(dev_priv); if (DISPLAY_VER(dev_priv) >= 11) /* @@ -1396,8 +1398,8 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) * The PHY may be busy with some initial calibration and whatnot, * so the power state can take a while to actually change. */ - if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, - phy_status_mask, phy_status, 10)) + if (intel_de_wait(dev_priv, DISPLAY_PHY_STATUS, + phy_status_mask, phy_status, 10)) drm_err(&dev_priv->drm, "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, @@ -1441,9 +1443,9 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp); if (id == VLV_DISP_PW_DPIO_CMN_BC) { - tmp = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW6_CH1); + tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW6_CH1); tmp |= DPIO_DYNPWRDOWNEN_CH1; - vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW6_CH1, tmp); + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW6_CH1, tmp); } else { /* * Force the non-existing CL2 off. BXT does this @@ -1519,9 +1521,9 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi return; if (ch == DPIO_CH0) - reg = _CHV_CMN_DW0_CH0; + reg = CHV_CMN_DW0_CH0; else - reg = _CHV_CMN_DW6_CH1; + reg = CHV_CMN_DW6_CH1; vlv_dpio_get(dev_priv); val = vlv_dpio_read(dev_priv, phy, reg); @@ -1552,10 +1554,11 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi } if (ch == DPIO_CH0) - actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; + actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH0 | + DPIO_ALLDL_POWERDOWN_CH0, val); else - actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; - actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; + actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH1 | + DPIO_ALLDL_POWERDOWN_CH1, val); drm_WARN(&dev_priv->drm, actual != expected, "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", diff --git a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h index 2f07b7afa3..b83ad06f2e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h +++ b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h @@ -29,21 +29,21 @@ #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) #define _MMIO_PHY(phy, a, b) _MMIO(_PHY(phy, a, b)) -#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c)) -#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c)) +#define _MMIO_BASE_PIPE3(base, pipe, a, b, c) _MMIO((base) + _PICK_EVEN_2RANGES(pipe, 1, a, a, b, c)) +#define _MMIO_BASE_PORT3(base, pipe, a, b, c) _MMIO((base) + _PICK_EVEN_2RANGES(pipe, 1, a, a, b, c)) /* * Device info offset array based helpers for groups of registers with unevenly * spaced base offsets. */ -#define _MMIO_PIPE2(pipe, reg) _MMIO(DISPLAY_INFO(dev_priv)->pipe_offsets[(pipe)] - \ - DISPLAY_INFO(dev_priv)->pipe_offsets[PIPE_A] + \ - DISPLAY_MMIO_BASE(dev_priv) + (reg)) -#define _MMIO_TRANS2(tran, reg) _MMIO(DISPLAY_INFO(dev_priv)->trans_offsets[(tran)] - \ - DISPLAY_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + \ - DISPLAY_MMIO_BASE(dev_priv) + (reg)) -#define _MMIO_CURSOR2(pipe, reg) _MMIO(DISPLAY_INFO(dev_priv)->cursor_offsets[(pipe)] - \ - DISPLAY_INFO(dev_priv)->cursor_offsets[PIPE_A] + \ - DISPLAY_MMIO_BASE(dev_priv) + (reg)) +#define _MMIO_PIPE2(display, pipe, reg) _MMIO(DISPLAY_INFO(display)->pipe_offsets[(pipe)] - \ + DISPLAY_INFO(display)->pipe_offsets[PIPE_A] + \ + DISPLAY_MMIO_BASE(display) + (reg)) +#define _MMIO_TRANS2(display, tran, reg) _MMIO(DISPLAY_INFO(display)->trans_offsets[(tran)] - \ + DISPLAY_INFO(display)->trans_offsets[TRANSCODER_A] + \ + DISPLAY_MMIO_BASE(display) + (reg)) +#define _MMIO_CURSOR2(display, pipe, reg) _MMIO(DISPLAY_INFO(display)->cursor_offsets[(pipe)] - \ + DISPLAY_INFO(display)->cursor_offsets[PIPE_A] + \ + DISPLAY_MMIO_BASE(display) + (reg)) #endif /* __INTEL_DISPLAY_REG_DEFS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h index 7862e7cefe..49a5e6d9dc 100644 --- a/drivers/gpu/drm/i915/display/intel_display_trace.h +++ b/drivers/gpu/drm/i915/display/intel_display_trace.h @@ -34,7 +34,7 @@ TRACE_EVENT(intel_pipe_enable, TP_fast_assign( struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc *it__; - __assign_str(dev, __dev_name_kms(crtc)); + __assign_str(dev); for_each_intel_crtc(&dev_priv->drm, it__) { __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); @@ -63,7 +63,7 @@ TRACE_EVENT(intel_pipe_disable, TP_fast_assign( struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc *it__; - __assign_str(dev, __dev_name_kms(crtc)); + __assign_str(dev); for_each_intel_crtc(&dev_priv->drm, it__) { __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); @@ -91,7 +91,7 @@ TRACE_EVENT(intel_pipe_crc, ), TP_fast_assign( - __assign_str(dev, __dev_name_kms(crtc)); + __assign_str(dev); __entry->pipe = crtc->pipe; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); @@ -119,7 +119,7 @@ TRACE_EVENT(intel_cpu_fifo_underrun, TP_fast_assign( struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); - __assign_str(dev, __dev_name_kms(crtc)); + __assign_str(dev); __entry->pipe = pipe; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); @@ -144,7 +144,7 @@ TRACE_EVENT(intel_pch_fifo_underrun, TP_fast_assign( enum pipe pipe = pch_transcoder; struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); - __assign_str(dev, __dev_name_i915(dev_priv)); + __assign_str(dev); __entry->pipe = pipe; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); @@ -169,7 +169,7 @@ TRACE_EVENT(intel_memory_cxsr, TP_fast_assign( struct intel_crtc *crtc; - __assign_str(dev, __dev_name_i915(dev_priv)); + __assign_str(dev); for_each_intel_crtc(&dev_priv->drm, crtc) { __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc); __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc); @@ -209,7 +209,7 @@ TRACE_EVENT(g4x_wm, ), TP_fast_assign( - __assign_str(dev, __dev_name_kms(crtc)); + __assign_str(dev); __entry->pipe = crtc->pipe; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); @@ -256,7 +256,7 @@ TRACE_EVENT(vlv_wm, ), TP_fast_assign( - __assign_str(dev, __dev_name_kms(crtc)); + __assign_str(dev); __entry->pipe = crtc->pipe; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); @@ -293,7 +293,7 @@ TRACE_EVENT(vlv_fifo_size, ), TP_fast_assign( - __assign_str(dev, __dev_name_kms(crtc)); + __assign_str(dev); __entry->pipe = crtc->pipe; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); @@ -323,8 +323,8 @@ TRACE_EVENT(intel_plane_update_noarm, ), TP_fast_assign( - __assign_str(dev, __dev_name_kms(plane)); - __assign_str(name, plane->base.name); + __assign_str(dev); + __assign_str(name); __entry->pipe = crtc->pipe; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); @@ -354,8 +354,8 @@ TRACE_EVENT(intel_plane_update_arm, ), TP_fast_assign( - __assign_str(dev, __dev_name_kms(plane)); - __assign_str(name, plane->base.name); + __assign_str(dev); + __assign_str(name); __entry->pipe = crtc->pipe; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); @@ -383,8 +383,8 @@ TRACE_EVENT(intel_plane_disable_arm, ), TP_fast_assign( - __assign_str(dev, __dev_name_kms(plane)); - __assign_str(name, plane->base.name); + __assign_str(dev); + __assign_str(name); __entry->pipe = crtc->pipe; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); @@ -410,8 +410,8 @@ TRACE_EVENT(intel_fbc_activate, TP_fast_assign( struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), plane->pipe); - __assign_str(dev, __dev_name_kms(plane)); - __assign_str(name, plane->base.name); + __assign_str(dev); + __assign_str(name); __entry->pipe = crtc->pipe; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); @@ -437,8 +437,8 @@ TRACE_EVENT(intel_fbc_deactivate, TP_fast_assign( struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), plane->pipe); - __assign_str(dev, __dev_name_kms(plane)); - __assign_str(name, plane->base.name); + __assign_str(dev); + __assign_str(name); __entry->pipe = crtc->pipe; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); @@ -464,8 +464,8 @@ TRACE_EVENT(intel_fbc_nuke, TP_fast_assign( struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), plane->pipe); - __assign_str(dev, __dev_name_kms(plane)); - __assign_str(name, plane->base.name); + __assign_str(dev); + __assign_str(name); __entry->pipe = crtc->pipe; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); @@ -488,7 +488,7 @@ TRACE_EVENT(intel_crtc_vblank_work_start, ), TP_fast_assign( - __assign_str(dev, __dev_name_kms(crtc)); + __assign_str(dev); __entry->pipe = crtc->pipe; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); @@ -511,7 +511,7 @@ TRACE_EVENT(intel_crtc_vblank_work_end, ), TP_fast_assign( - __assign_str(dev, __dev_name_kms(crtc)); + __assign_str(dev); __entry->pipe = crtc->pipe; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); @@ -536,7 +536,7 @@ TRACE_EVENT(intel_pipe_update_start, ), TP_fast_assign( - __assign_str(dev, __dev_name_kms(crtc)); + __assign_str(dev); __entry->pipe = crtc->pipe; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); @@ -564,7 +564,7 @@ TRACE_EVENT(intel_pipe_update_vblank_evaded, ), TP_fast_assign( - __assign_str(dev, __dev_name_kms(crtc)); + __assign_str(dev); __entry->pipe = crtc->pipe; __entry->frame = crtc->debug.start_vbl_count; __entry->scanline = crtc->debug.scanline_start; @@ -590,7 +590,7 @@ TRACE_EVENT(intel_pipe_update_end, ), TP_fast_assign( - __assign_str(dev, __dev_name_kms(crtc)); + __assign_str(dev); __entry->pipe = crtc->pipe; __entry->frame = frame; __entry->scanline = scanline_end; @@ -613,7 +613,7 @@ TRACE_EVENT(intel_frontbuffer_invalidate, ), TP_fast_assign( - __assign_str(dev, __dev_name_i915(i915)); + __assign_str(dev); __entry->frontbuffer_bits = frontbuffer_bits; __entry->origin = origin; ), @@ -634,7 +634,7 @@ TRACE_EVENT(intel_frontbuffer_flush, ), TP_fast_assign( - __assign_str(dev, __dev_name_i915(i915)); + __assign_str(dev); __entry->frontbuffer_bits = frontbuffer_bits; __entry->origin = origin; ), diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index bf3f942e19..6747c10da2 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -661,7 +661,8 @@ struct intel_digital_connector_state { int broadcast_rgb; }; -#define to_intel_digital_connector_state(x) container_of(x, struct intel_digital_connector_state, base) +#define to_intel_digital_connector_state(conn_state) \ + container_of_const((conn_state), struct intel_digital_connector_state, base) struct dpll { /* given values */ @@ -1003,18 +1004,6 @@ enum intel_output_format { INTEL_OUTPUT_FORMAT_YCBCR444, }; -struct intel_mpllb_state { - u32 clock; /* in KHz */ - u32 ref_control; - u32 mpllb_cp; - u32 mpllb_div; - u32 mpllb_div2; - u32 mpllb_fracn1; - u32 mpllb_fracn2; - u32 mpllb_sscen; - u32 mpllb_sscstep; -}; - /* Used by dp and fdi links */ struct intel_link_m_n { u32 tu; @@ -1030,31 +1019,6 @@ struct intel_csc_matrix { u16 postoff[3]; }; -struct intel_c10pll_state { - u32 clock; /* in KHz */ - u8 tx; - u8 cmn; - u8 pll[20]; -}; - -struct intel_c20pll_state { - u32 clock; /* in kHz */ - u16 tx[3]; - u16 cmn[4]; - union { - u16 mplla[10]; - u16 mpllb[11]; - }; -}; - -struct intel_cx0pll_state { - union { - struct intel_c10pll_state c10; - struct intel_c20pll_state c20; - }; - bool ssc_enabled; -}; - struct intel_crtc_state { /* * uapi (drm) state. This is the software state shown to userspace. @@ -1199,11 +1163,7 @@ struct intel_crtc_state { struct intel_shared_dpll *shared_dpll; /* Actual register state of the dpll, for shared dpll cross-checking. */ - union { - struct intel_dpll_hw_state dpll_hw_state; - struct intel_mpllb_state mpllb_state; - struct intel_cx0pll_state cx0pll_state; - }; + struct intel_dpll_hw_state dpll_hw_state; /* * ICL reserved DPLLs for the CRTC/port. The active PLL is selected by @@ -1229,7 +1189,7 @@ struct intel_crtc_state { /* PSR is supported but might not be enabled due the lack of enabled planes */ bool has_psr; - bool has_psr2; + bool has_sel_update; bool enable_psr2_sel_fetch; bool enable_psr2_su_region_et; bool req_psr2_sdp_prior_scanline; @@ -1346,6 +1306,7 @@ struct intel_crtc_state { union hdmi_infoframe hdmi; union hdmi_infoframe drm; struct drm_dp_vsc_sdp vsc; + struct drm_dp_as_sdp as_sdp; } infoframes; u8 eld[MAX_ELD_BYTES]; @@ -1432,6 +1393,7 @@ struct intel_crtc_state { bool enable, in_range; u8 pipeline_full; u16 flipline, vmin, vmax, guardband; + u32 vsync_end, vsync_start; } vrr; /* Stream Splitter for eDP MSO */ @@ -1620,12 +1582,17 @@ struct intel_watermark_params { #define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base) #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) -#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi) #define to_intel_connector(x) container_of(x, struct intel_connector, base) #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) -#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) #define to_intel_plane(x) container_of(x, struct intel_plane, base) -#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, uapi) + +#define to_intel_crtc_state(crtc_state) \ + container_of_const((crtc_state), struct intel_crtc_state, uapi) +#define to_intel_plane_state(plane_state) \ + container_of_const((plane_state), struct intel_plane_state, uapi) +#define to_intel_framebuffer(fb) \ + container_of_const((fb), struct intel_framebuffer, base) + #define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL) struct intel_hdmi { @@ -1740,6 +1707,8 @@ struct intel_psr { /* LNL and beyond */ u8 check_entry_lines; + u8 silence_period_sym_clocks; + u8 lfps_half_cycle_num_of_syms; } alpm_parameters; ktime_t last_entry_attempt; @@ -1801,6 +1770,7 @@ struct intel_dp { bool is_mst; int active_mst_links; + enum drm_dp_mst_mode mst_detect; /* connector directly attached - won't be use for modeset in mst world */ struct intel_connector *attached_connector; @@ -2186,4 +2156,41 @@ static inline int to_bpp_x16(int bpp) return bpp << 4; } +/* + * Conversion functions/macros from various pointer types to struct + * intel_display pointer. + */ +#define __drm_device_to_intel_display(p) \ + (&to_i915(p)->display) +#define __intel_connector_to_intel_display(p) \ + __drm_device_to_intel_display((p)->base.dev) +#define __intel_crtc_to_intel_display(p) \ + __drm_device_to_intel_display((p)->base.dev) +#define __intel_crtc_state_to_intel_display(p) \ + __drm_device_to_intel_display((p)->uapi.crtc->dev) +#define __intel_digital_port_to_intel_display(p) \ + __drm_device_to_intel_display((p)->base.base.dev) +#define __intel_dp_to_intel_display(p) \ + __drm_device_to_intel_display(dp_to_dig_port(p)->base.base.dev) +#define __intel_encoder_to_intel_display(p) \ + __drm_device_to_intel_display((p)->base.dev) +#define __intel_hdmi_to_intel_display(p) \ + __drm_device_to_intel_display(hdmi_to_dig_port(p)->base.base.dev) + +/* Helper for generic association. Map types to conversion functions/macros. */ +#define __assoc(type, p) \ + struct type: __##type##_to_intel_display((struct type *)(p)) + +/* Convert various pointer types to struct intel_display pointer. */ +#define to_intel_display(p) \ + _Generic(*p, \ + __assoc(drm_device, p), \ + __assoc(intel_connector, p), \ + __assoc(intel_crtc, p), \ + __assoc(intel_crtc_state, p), \ + __assoc(intel_digital_port, p), \ + __assoc(intel_dp, p), \ + __assoc(intel_encoder, p), \ + __assoc(intel_hdmi, p)) + #endif /* __INTEL_DISPLAY_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c index ac136fd992..e5a8022db6 100644 --- a/drivers/gpu/drm/i915/display/intel_display_wa.c +++ b/drivers/gpu/drm/i915/display/intel_display_wa.c @@ -10,20 +10,12 @@ static void gen11_display_wa_apply(struct drm_i915_private *i915) { - /* Wa_1409120013 */ - intel_de_write(i915, ILK_DPFC_CHICKEN(INTEL_FBC_A), - DPFC_CHICKEN_COMP_DUMMY_PIXEL); - /* Wa_14010594013 */ intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP); } static void xe_d_display_wa_apply(struct drm_i915_private *i915) { - /* Wa_1409120013 */ - intel_de_write(i915, ILK_DPFC_CHICKEN(INTEL_FBC_A), - DPFC_CHICKEN_COMP_DUMMY_PIXEL); - /* Wa_14013723622 */ intel_de_rmw(i915, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0); } diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 8357816244..cbd2ac5671 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -22,6 +22,7 @@ * */ +#include #include #include "i915_drv.h" @@ -38,6 +39,8 @@ * low-power state and comes back to normal. */ +#define INTEL_DMC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git" + enum intel_dmc_id { DMC_FW_MAIN = 0, DMC_FW_PIPEA, @@ -71,6 +74,21 @@ static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915) return i915->display.dmc.dmc; } +static const char *dmc_firmware_param(struct drm_i915_private *i915) +{ + const char *p = i915->display.params.dmc_firmware_path; + + return p && *p ? p : NULL; +} + +static bool dmc_firmware_param_disabled(struct drm_i915_private *i915) +{ + const char *p = dmc_firmware_param(i915); + + /* Magic path to indicate disabled */ + return p && !strcmp(p, "/dev/null"); +} + #define DMC_VERSION(major, minor) ((major) << 16 | (minor)) #define DMC_VERSION_MAJOR(version) ((version) >> 16) #define DMC_VERSION_MINOR(version) ((version) & 0xffff) @@ -89,10 +107,14 @@ static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915) __stringify(major) "_" \ __stringify(minor) ".bin" +#define XE2LPD_DMC_MAX_FW_SIZE 0x8000 #define XELPDP_DMC_MAX_FW_SIZE 0x7000 #define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000 #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE +#define XE2LPD_DMC_PATH DMC_PATH(xe2lpd) +MODULE_FIRMWARE(XE2LPD_DMC_PATH); + #define MTL_DMC_PATH DMC_PATH(mtl) MODULE_FIRMWARE(MTL_DMC_PATH); @@ -136,6 +158,59 @@ MODULE_FIRMWARE(SKL_DMC_PATH); #define BXT_DMC_MAX_FW_SIZE 0x3000 MODULE_FIRMWARE(BXT_DMC_PATH); +static const char *dmc_firmware_default(struct drm_i915_private *i915, u32 *size) +{ + const char *fw_path = NULL; + u32 max_fw_size = 0; + + if (DISPLAY_VER_FULL(i915) == IP_VER(20, 0)) { + fw_path = XE2LPD_DMC_PATH; + max_fw_size = XE2LPD_DMC_MAX_FW_SIZE; + } else if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) { + fw_path = MTL_DMC_PATH; + max_fw_size = XELPDP_DMC_MAX_FW_SIZE; + } else if (IS_DG2(i915)) { + fw_path = DG2_DMC_PATH; + max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; + } else if (IS_ALDERLAKE_P(i915)) { + fw_path = ADLP_DMC_PATH; + max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; + } else if (IS_ALDERLAKE_S(i915)) { + fw_path = ADLS_DMC_PATH; + max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; + } else if (IS_DG1(i915)) { + fw_path = DG1_DMC_PATH; + max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; + } else if (IS_ROCKETLAKE(i915)) { + fw_path = RKL_DMC_PATH; + max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; + } else if (IS_TIGERLAKE(i915)) { + fw_path = TGL_DMC_PATH; + max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; + } else if (DISPLAY_VER(i915) == 11) { + fw_path = ICL_DMC_PATH; + max_fw_size = ICL_DMC_MAX_FW_SIZE; + } else if (IS_GEMINILAKE(i915)) { + fw_path = GLK_DMC_PATH; + max_fw_size = GLK_DMC_MAX_FW_SIZE; + } else if (IS_KABYLAKE(i915) || + IS_COFFEELAKE(i915) || + IS_COMETLAKE(i915)) { + fw_path = KBL_DMC_PATH; + max_fw_size = KBL_DMC_MAX_FW_SIZE; + } else if (IS_SKYLAKE(i915)) { + fw_path = SKL_DMC_PATH; + max_fw_size = SKL_DMC_MAX_FW_SIZE; + } else if (IS_BROXTON(i915)) { + fw_path = BXT_DMC_PATH; + max_fw_size = BXT_DMC_MAX_FW_SIZE; + } + + *size = max_fw_size; + + return fw_path; +} + #define DMC_DEFAULT_FW_OFFSET 0xFFFFFFFF #define PACKAGE_MAX_FW_INFO_ENTRIES 20 #define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32 @@ -546,6 +621,8 @@ void intel_dmc_disable_program(struct drm_i915_private *i915) pipedmc_clock_gating_wa(i915, true); disable_all_event_handlers(i915); pipedmc_clock_gating_wa(i915, false); + + intel_dmc_wl_disable(&i915->display); } void assert_dmc_loaded(struct drm_i915_private *i915) @@ -845,7 +922,7 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc, return sizeof(struct intel_css_header); } -static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) +static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) { struct drm_i915_private *i915 = dmc->i915; struct intel_css_header *css_header; @@ -858,13 +935,13 @@ static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) u32 r, offset; if (!fw) - return; + return -EINVAL; /* Extract CSS Header information */ css_header = (struct intel_css_header *)fw->data; r = parse_dmc_fw_css(dmc, css_header, fw->size); if (!r) - return; + return -EINVAL; readcount += r; @@ -872,7 +949,7 @@ static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) package_header = (struct intel_package_header *)&fw->data[readcount]; r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount); if (!r) - return; + return -EINVAL; readcount += r; @@ -889,6 +966,13 @@ static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) dmc_header = (struct intel_dmc_header_base *)&fw->data[offset]; parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id); } + + if (!intel_dmc_has_payload(i915)) { + drm_err(&i915->drm, "DMC firmware main program not found\n"); + return -ENOENT; + } + + return 0; } static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915) @@ -923,7 +1007,7 @@ static void dmc_load_work_fn(struct work_struct *work) err = request_firmware(&fw, dmc->fw_path, i915->drm.dev); - if (err == -ENOENT && !i915->params.dmc_firmware_path) { + if (err == -ENOENT && !dmc_firmware_param(i915)) { fallback_path = dmc_fallback_path(i915); if (fallback_path) { drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n", @@ -934,24 +1018,31 @@ static void dmc_load_work_fn(struct work_struct *work) } } - parse_dmc_fw(dmc, fw); - - if (intel_dmc_has_payload(i915)) { - intel_dmc_load_program(i915); - intel_dmc_runtime_pm_put(i915); - - drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n", - dmc->fw_path, DMC_VERSION_MAJOR(dmc->version), - DMC_VERSION_MINOR(dmc->version)); - } else { + if (err) { drm_notice(&i915->drm, - "Failed to load DMC firmware %s." - " Disabling runtime power management.\n", - dmc->fw_path); + "Failed to load DMC firmware %s (%pe). Disabling runtime power management.\n", + dmc->fw_path, ERR_PTR(err)); drm_notice(&i915->drm, "DMC firmware homepage: %s", - INTEL_UC_FIRMWARE_URL); + INTEL_DMC_FIRMWARE_URL); + return; } + err = parse_dmc_fw(dmc, fw); + if (err) { + drm_notice(&i915->drm, + "Failed to parse DMC firmware %s (%pe). Disabling runtime power management.\n", + dmc->fw_path, ERR_PTR(err)); + goto out; + } + + intel_dmc_load_program(i915); + intel_dmc_runtime_pm_put(i915); + + drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n", + dmc->fw_path, DMC_VERSION_MAJOR(dmc->version), + DMC_VERSION_MINOR(dmc->version)); + +out: release_firmware(fw); } @@ -987,56 +1078,16 @@ void intel_dmc_init(struct drm_i915_private *i915) INIT_WORK(&dmc->work, dmc_load_work_fn); - if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) { - dmc->fw_path = MTL_DMC_PATH; - dmc->max_fw_size = XELPDP_DMC_MAX_FW_SIZE; - } else if (IS_DG2(i915)) { - dmc->fw_path = DG2_DMC_PATH; - dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; - } else if (IS_ALDERLAKE_P(i915)) { - dmc->fw_path = ADLP_DMC_PATH; - dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; - } else if (IS_ALDERLAKE_S(i915)) { - dmc->fw_path = ADLS_DMC_PATH; - dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; - } else if (IS_DG1(i915)) { - dmc->fw_path = DG1_DMC_PATH; - dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; - } else if (IS_ROCKETLAKE(i915)) { - dmc->fw_path = RKL_DMC_PATH; - dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; - } else if (IS_TIGERLAKE(i915)) { - dmc->fw_path = TGL_DMC_PATH; - dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; - } else if (DISPLAY_VER(i915) == 11) { - dmc->fw_path = ICL_DMC_PATH; - dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE; - } else if (IS_GEMINILAKE(i915)) { - dmc->fw_path = GLK_DMC_PATH; - dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE; - } else if (IS_KABYLAKE(i915) || - IS_COFFEELAKE(i915) || - IS_COMETLAKE(i915)) { - dmc->fw_path = KBL_DMC_PATH; - dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE; - } else if (IS_SKYLAKE(i915)) { - dmc->fw_path = SKL_DMC_PATH; - dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE; - } else if (IS_BROXTON(i915)) { - dmc->fw_path = BXT_DMC_PATH; - dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE; - } - - if (i915->params.dmc_firmware_path) { - if (strlen(i915->params.dmc_firmware_path) == 0) { - drm_info(&i915->drm, - "Disabling DMC firmware and runtime PM\n"); - goto out; - } + dmc->fw_path = dmc_firmware_default(i915, &dmc->max_fw_size); - dmc->fw_path = i915->params.dmc_firmware_path; + if (dmc_firmware_param_disabled(i915)) { + drm_info(&i915->drm, "Disabling DMC firmware and runtime PM\n"); + goto out; } + if (dmc_firmware_param(i915)) + dmc->fw_path = dmc_firmware_param(i915); + if (!dmc->fw_path) { drm_dbg_kms(&i915->drm, "No known DMC firmware for platform, disabling runtime PM\n"); @@ -1072,6 +1123,8 @@ void intel_dmc_suspend(struct drm_i915_private *i915) if (dmc) flush_work(&dmc->work); + intel_dmc_wl_disable(&i915->display); + /* Drop the reference held in case DMC isn't loaded. */ if (!intel_dmc_has_payload(i915)) intel_dmc_runtime_pm_put(i915); diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h index 90d0dbb41c..1bf446f96a 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h +++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h @@ -97,4 +97,10 @@ #define TGL_DMC_DEBUG3 _MMIO(0x101090) #define DG1_DMC_DEBUG3 _MMIO(0x13415c) +#define DMC_WAKELOCK_CFG _MMIO(0x8F1B0) +#define DMC_WAKELOCK_CFG_ENABLE REG_BIT(31) +#define DMC_WAKELOCK1_CTL _MMIO(0x8F140) +#define DMC_WAKELOCK_CTL_REQ REG_BIT(31) +#define DMC_WAKELOCK_CTL_ACK REG_BIT(15) + #endif /* __INTEL_DMC_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c new file mode 100644 index 0000000000..d9864b9cc4 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2024 Intel Corporation + */ + +#include + +#include "intel_de.h" +#include "intel_dmc.h" +#include "intel_dmc_regs.h" +#include "intel_dmc_wl.h" + +/** + * DOC: DMC wakelock support + * + * Wake lock is the mechanism to cause display engine to exit DC + * states to allow programming to registers that are powered down in + * those states. Previous projects exited DC states automatically when + * detecting programming. Now software controls the exit by + * programming the wake lock. This improves system performance and + * system interactions and better fits the flip queue style of + * programming. Wake lock is only required when DC5, DC6, or DC6v have + * been enabled in DC_STATE_EN and the wake lock mode of operation has + * been enabled. + * + * The wakelock mechanism in DMC allows the display engine to exit DC + * states explicitly before programming registers that may be powered + * down. In earlier hardware, this was done automatically and + * implicitly when the display engine accessed a register. With the + * wakelock implementation, the driver asserts a wakelock in DMC, + * which forces it to exit the DC state until the wakelock is + * deasserted. + * + * The mechanism can be enabled and disabled by writing to the + * DMC_WAKELOCK_CFG register. There are also 13 control registers + * that can be used to hold and release different wakelocks. In the + * current implementation, we only need one wakelock, so only + * DMC_WAKELOCK1_CTL is used. The other definitions are here for + * potential future use. + */ + +#define DMC_WAKELOCK_CTL_TIMEOUT 5 +#define DMC_WAKELOCK_HOLD_TIME 50 + +struct intel_dmc_wl_range { + u32 start; + u32 end; +}; + +static struct intel_dmc_wl_range lnl_wl_range[] = { + { .start = 0x60000, .end = 0x7ffff }, +}; + +static void __intel_dmc_wl_release(struct intel_display *display) +{ + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_dmc_wl *wl = &display->wl; + + WARN_ON(refcount_read(&wl->refcount)); + + queue_delayed_work(i915->unordered_wq, &wl->work, + msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME)); +} + +static void intel_dmc_wl_work(struct work_struct *work) +{ + struct intel_dmc_wl *wl = + container_of(work, struct intel_dmc_wl, work.work); + struct intel_display *display = + container_of(wl, struct intel_display, wl); + unsigned long flags; + + spin_lock_irqsave(&wl->lock, flags); + + /* Bail out if refcount reached zero while waiting for the spinlock */ + if (!refcount_read(&wl->refcount)) + goto out_unlock; + + __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0); + + if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL, + DMC_WAKELOCK_CTL_ACK, 0, + DMC_WAKELOCK_CTL_TIMEOUT)) { + WARN_RATELIMIT(1, "DMC wakelock release timed out"); + goto out_unlock; + } + + wl->taken = false; + +out_unlock: + spin_unlock_irqrestore(&wl->lock, flags); +} + +static bool intel_dmc_wl_check_range(u32 address) +{ + int i; + bool wl_needed = false; + + for (i = 0; i < ARRAY_SIZE(lnl_wl_range); i++) { + if (address >= lnl_wl_range[i].start && + address <= lnl_wl_range[i].end) { + wl_needed = true; + break; + } + } + + return wl_needed; +} + +static bool __intel_dmc_wl_supported(struct intel_display *display) +{ + struct drm_i915_private *i915 = to_i915(display->drm); + + if (DISPLAY_VER(display) < 20 || + !intel_dmc_has_payload(i915) || + !display->params.enable_dmc_wl) + return false; + + return true; +} + +void intel_dmc_wl_init(struct intel_display *display) +{ + struct intel_dmc_wl *wl = &display->wl; + + /* don't call __intel_dmc_wl_supported(), DMC is not loaded yet */ + if (DISPLAY_VER(display) < 20 || !display->params.enable_dmc_wl) + return; + + INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work); + spin_lock_init(&wl->lock); + refcount_set(&wl->refcount, 0); +} + +void intel_dmc_wl_enable(struct intel_display *display) +{ + struct intel_dmc_wl *wl = &display->wl; + unsigned long flags; + + if (!__intel_dmc_wl_supported(display)) + return; + + spin_lock_irqsave(&wl->lock, flags); + + if (wl->enabled) + goto out_unlock; + + /* + * Enable wakelock in DMC. We shouldn't try to take the + * wakelock, because we're just enabling it, so call the + * non-locking version directly here. + */ + __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE); + + wl->enabled = true; + wl->taken = false; + +out_unlock: + spin_unlock_irqrestore(&wl->lock, flags); +} + +void intel_dmc_wl_disable(struct intel_display *display) +{ + struct intel_dmc_wl *wl = &display->wl; + unsigned long flags; + + if (!__intel_dmc_wl_supported(display)) + return; + + flush_delayed_work(&wl->work); + + spin_lock_irqsave(&wl->lock, flags); + + if (!wl->enabled) + goto out_unlock; + + /* Disable wakelock in DMC */ + __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0); + + refcount_set(&wl->refcount, 0); + wl->enabled = false; + wl->taken = false; + +out_unlock: + spin_unlock_irqrestore(&wl->lock, flags); +} + +void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg) +{ + struct intel_dmc_wl *wl = &display->wl; + unsigned long flags; + + if (!__intel_dmc_wl_supported(display)) + return; + + if (!intel_dmc_wl_check_range(reg.reg)) + return; + + spin_lock_irqsave(&wl->lock, flags); + + if (!wl->enabled) + goto out_unlock; + + cancel_delayed_work(&wl->work); + + if (refcount_inc_not_zero(&wl->refcount)) + goto out_unlock; + + refcount_set(&wl->refcount, 1); + + /* + * Only try to take the wakelock if it's not marked as taken + * yet. It may be already taken at this point if we have + * already released the last reference, but the work has not + * run yet. + */ + if (!wl->taken) { + __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0, + DMC_WAKELOCK_CTL_REQ); + + if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL, + DMC_WAKELOCK_CTL_ACK, + DMC_WAKELOCK_CTL_ACK, + DMC_WAKELOCK_CTL_TIMEOUT)) { + WARN_RATELIMIT(1, "DMC wakelock ack timed out"); + goto out_unlock; + } + + wl->taken = true; + } + +out_unlock: + spin_unlock_irqrestore(&wl->lock, flags); +} + +void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg) +{ + struct intel_dmc_wl *wl = &display->wl; + unsigned long flags; + + if (!__intel_dmc_wl_supported(display)) + return; + + if (!intel_dmc_wl_check_range(reg.reg)) + return; + + spin_lock_irqsave(&wl->lock, flags); + + if (!wl->enabled) + goto out_unlock; + + if (WARN_RATELIMIT(!refcount_read(&wl->refcount), + "Tried to put wakelock with refcount zero\n")) + goto out_unlock; + + if (refcount_dec_and_test(&wl->refcount)) { + __intel_dmc_wl_release(display); + + goto out_unlock; + } + +out_unlock: + spin_unlock_irqrestore(&wl->lock, flags); +} diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.h b/drivers/gpu/drm/i915/display/intel_dmc_wl.h new file mode 100644 index 0000000000..adab51208d --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2024 Intel Corporation + */ + +#ifndef __INTEL_WAKELOCK_H__ +#define __INTEL_WAKELOCK_H__ + +#include +#include +#include + +#include "i915_reg_defs.h" + +struct intel_display; + +struct intel_dmc_wl { + spinlock_t lock; /* protects enabled, taken and refcount */ + bool enabled; + bool taken; + refcount_t refcount; + struct delayed_work work; +}; + +void intel_dmc_wl_init(struct intel_display *display); +void intel_dmc_wl_enable(struct intel_display *display); +void intel_dmc_wl_disable(struct intel_display *display); +void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg); +void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg); + +#endif /* __INTEL_WAKELOCK_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 950f86fb13..9c9e060476 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -89,6 +89,9 @@ #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 +/* Max DSC line buffer depth supported by HW. */ +#define INTEL_DP_DSC_MAX_LINE_BUF_DEPTH 13 + /* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */ #define DP_DSC_FEC_OVERHEAD_FACTOR 1028530 @@ -123,6 +126,14 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp) return dig_port->base.type == INTEL_OUTPUT_EDP; } +bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + return HAS_AS_SDP(i915) && + drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd); +} + static void intel_dp_unset_edid(struct intel_dp *intel_dp); /* Is link rate UHBR and thus 128b/132b? */ @@ -214,7 +225,7 @@ static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp) * Sink rates for 128b/132b. If set, sink should support all 8b/10b * rates and 10 Gbps. */ - if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) { + if (drm_dp_128b132b_supported(intel_dp->dpcd)) { u8 uhbr_rates = 0; BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3); @@ -425,7 +436,7 @@ int intel_dp_max_link_data_rate(struct intel_dp *intel_dp, return max_rate; } -bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) +bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &intel_dig_port->base; @@ -447,11 +458,9 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp) static int icl_max_source_rate(struct intel_dp *intel_dp) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp)) + if (intel_encoder_is_combo(encoder) && !intel_dp_is_edp(intel_dp)) return 540000; return 810000; @@ -467,11 +476,9 @@ static int ehl_max_source_rate(struct intel_dp *intel_dp) static int mtl_max_source_rate(struct intel_dp *intel_dp) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(i915, dig_port->base.port); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - if (intel_is_c10phy(i915, phy)) + if (intel_encoder_is_c10phy(encoder)) return 810000; return 2000000; @@ -1202,15 +1209,15 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector, } bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp, + struct intel_connector *connector, int hdisplay, int clock) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); - struct intel_connector *connector = intel_dp->attached_connector; - if (!intel_dp_can_bigjoiner(intel_dp)) + if (!intel_dp_has_bigjoiner(intel_dp)) return false; - return clock > i915->max_dotclk_freq || hdisplay > 5120 || + return clock > i915->display.cdclk.max_dotclk_freq || hdisplay > 5120 || connector->force_bigjoiner_enable; } @@ -1224,7 +1231,7 @@ intel_dp_mode_valid(struct drm_connector *_connector, const struct drm_display_mode *fixed_mode; int target_clock = mode->clock; int max_rate, mode_rate, max_lanes, max_link_clock; - int max_dotclk = dev_priv->max_dotclk_freq; + int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq; u16 dsc_max_compressed_bpp = 0; u8 dsc_slice_count = 0; enum drm_mode_status status; @@ -1237,6 +1244,9 @@ intel_dp_mode_valid(struct drm_connector *_connector, if (mode->flags & DRM_MODE_FLAG_DBLCLK) return MODE_H_ILLEGAL; + if (mode->clock < 10000) + return MODE_CLOCK_LOW; + fixed_mode = intel_panel_fixed_mode(connector, mode); if (intel_dp_is_edp(intel_dp) && fixed_mode) { status = intel_panel_mode_valid(connector, mode); @@ -1246,10 +1256,8 @@ intel_dp_mode_valid(struct drm_connector *_connector, target_clock = fixed_mode->clock; } - if (mode->clock < 10000) - return MODE_CLOCK_LOW; - - if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) { + if (intel_dp_need_bigjoiner(intel_dp, connector, + mode->hdisplay, target_clock)) { bigjoiner = true; max_dotclk *= 2; } @@ -1310,11 +1318,7 @@ intel_dp_mode_valid(struct drm_connector *_connector, dsc = dsc_max_compressed_bpp && dsc_slice_count; } - /* - * Big joiner configuration needs DSC for TGL which is not true for - * XE_LPD where uncompressed joiner is supported. - */ - if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) + if (intel_dp_joiner_needs_dsc(dev_priv, bigjoiner) && !dsc) return MODE_CLOCK_HIGH; if (mode_rate > max_rate && !dsc) @@ -1708,7 +1712,6 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector, { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; - u8 line_buf_depth; int ret; /* @@ -1737,20 +1740,14 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector, connector->dp.dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & DP_DSC_RGB; - line_buf_depth = drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd); - if (!line_buf_depth) { + vdsc_cfg->line_buf_depth = min(INTEL_DP_DSC_MAX_LINE_BUF_DEPTH, + drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd)); + if (!vdsc_cfg->line_buf_depth) { drm_dbg_kms(&i915->drm, "DSC Sink Line Buffer Depth invalid\n"); return -EINVAL; } - if (vdsc_cfg->dsc_version_minor == 2) - vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? - DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; - else - vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? - DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; - vdsc_cfg->block_pred_enable = connector->dp.dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & DP_DSC_BLK_PREDICTION_IS_SUPPORTED; @@ -2405,6 +2402,16 @@ int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state) return intel_dp_link_required(adjusted_mode->crtc_clock, bpp); } +bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner) +{ + /* + * Pipe joiner needs compression up to display 12 due to bandwidth + * limitation. DG2 onwards pipe joiner can be enabled without + * compression. + */ + return DISPLAY_VER(i915) < 13 && use_joiner; +} + static int intel_dp_compute_link_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, @@ -2413,30 +2420,25 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - const struct intel_connector *connector = + struct intel_connector *connector = to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct link_config_limits limits; - bool joiner_needs_dsc = false; - bool dsc_needed; + bool dsc_needed, joiner_needs_dsc; int ret = 0; if (pipe_config->fec_enable && !intel_dp_supports_fec(intel_dp, connector, pipe_config)) return -EINVAL; - if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay, + if (intel_dp_need_bigjoiner(intel_dp, connector, + adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_clock)) pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe); - /* - * Pipe joiner needs compression up to display 12 due to bandwidth - * limitation. DG2 onwards pipe joiner can be enabled without - * compression. - */ - joiner_needs_dsc = DISPLAY_VER(i915) < 13 && pipe_config->bigjoiner_pipes; + joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, pipe_config->bigjoiner_pipes); dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || !intel_dp_compute_config_limits(intel_dp, pipe_config, @@ -2619,6 +2621,29 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; } +static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state) +{ + struct drm_dp_as_sdp *as_sdp = &crtc_state->infoframes.as_sdp; + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + + if (!crtc_state->vrr.enable || + !intel_dp_as_sdp_supported(intel_dp)) + return; + + crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC); + + /* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */ + as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC; + as_sdp->length = 0x9; + as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL; + as_sdp->vtotal = adjusted_mode->vtotal; + as_sdp->target_rr = 0; + as_sdp->duration_incr_ms = 0; + as_sdp->duration_incr_ms = 0; +} + static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) @@ -2639,7 +2664,7 @@ static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, vsc); - } else if (crtc_state->has_psr2) { + } else if (crtc_state->has_sel_update) { /* * [PSR2 without colorimetry] * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 @@ -2974,6 +2999,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, g4x_dp_set_clock(encoder, pipe_config); intel_vrr_compute_config(pipe_config, conn_state); + intel_dp_compute_as_sdp(intel_dp, pipe_config); intel_psr_compute_config(intel_dp, pipe_config, conn_state); intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16); intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); @@ -3366,6 +3392,14 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, fastset = false; } + if (CAN_PANEL_REPLAY(intel_dp)) { + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s] Forcing full modeset to compute panel replay state\n", + encoder->base.base.id, encoder->base.name); + crtc_state->uapi.mode_changed = true; + fastset = false; + } + return fastset; } @@ -4049,39 +4083,84 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) intel_dp->downstream_ports) == 0; } -static bool -intel_dp_can_mst(struct intel_dp *intel_dp) +static const char *intel_dp_mst_mode_str(enum drm_dp_mst_mode mst_mode) +{ + if (mst_mode == DRM_DP_MST) + return "MST"; + else if (mst_mode == DRM_DP_SST_SIDEBAND_MSG) + return "SST w/ sideband messaging"; + else + return "SST"; +} + +static enum drm_dp_mst_mode +intel_dp_mst_mode_choose(struct intel_dp *intel_dp, + enum drm_dp_mst_mode sink_mst_mode) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); - return i915->display.params.enable_dp_mst && - intel_dp_mst_source_support(intel_dp) && - drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); + if (!i915->display.params.enable_dp_mst) + return DRM_DP_SST; + + if (!intel_dp_mst_source_support(intel_dp)) + return DRM_DP_SST; + + if (sink_mst_mode == DRM_DP_SST_SIDEBAND_MSG && + !(intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B)) + return DRM_DP_SST; + + return sink_mst_mode; } -static void -intel_dp_configure_mst(struct intel_dp *intel_dp) +static enum drm_dp_mst_mode +intel_dp_mst_detect(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); - struct intel_encoder *encoder = - &dp_to_dig_port(intel_dp)->base; - bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + enum drm_dp_mst_mode sink_mst_mode; + enum drm_dp_mst_mode mst_detect; + + sink_mst_mode = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); + + mst_detect = intel_dp_mst_mode_choose(intel_dp, sink_mst_mode); drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", + "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s -> enable: %s\n", encoder->base.base.id, encoder->base.name, str_yes_no(intel_dp_mst_source_support(intel_dp)), - str_yes_no(sink_can_mst), - str_yes_no(i915->display.params.enable_dp_mst)); + intel_dp_mst_mode_str(sink_mst_mode), + str_yes_no(i915->display.params.enable_dp_mst), + intel_dp_mst_mode_str(mst_detect)); + return mst_detect; +} + +static void +intel_dp_mst_configure(struct intel_dp *intel_dp) +{ if (!intel_dp_mst_source_support(intel_dp)) return; - intel_dp->is_mst = sink_can_mst && - i915->display.params.enable_dp_mst; + intel_dp->is_mst = intel_dp->mst_detect != DRM_DP_SST; - drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, - intel_dp->is_mst); + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); + + /* Avoid stale info on the next detect cycle. */ + intel_dp->mst_detect = DRM_DP_SST; +} + +static void +intel_dp_mst_disconnect(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + if (!intel_dp->is_mst) + return; + + drm_dbg_kms(&i915->drm, "MST device may have disappeared %d vs %d\n", + intel_dp->is_mst, intel_dp->mst_mgr.mst_state); + intel_dp->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); } static bool @@ -4129,6 +4208,32 @@ intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, return false; } +static ssize_t intel_dp_as_sdp_pack(const struct drm_dp_as_sdp *as_sdp, + struct dp_sdp *sdp, size_t size) +{ + size_t length = sizeof(struct dp_sdp); + + if (size < length) + return -ENOSPC; + + memset(sdp, 0, size); + + /* Prepare AS (Adaptive Sync) SDP Header */ + sdp->sdp_header.HB0 = 0; + sdp->sdp_header.HB1 = as_sdp->sdp_type; + sdp->sdp_header.HB2 = 0x02; + sdp->sdp_header.HB3 = as_sdp->length; + + /* Fill AS (Adaptive Sync) SDP Payload */ + sdp->db[0] = as_sdp->mode; + sdp->db[1] = as_sdp->vtotal & 0xFF; + sdp->db[2] = (as_sdp->vtotal >> 8) & 0xFF; + sdp->db[3] = as_sdp->target_rr & 0xFF; + sdp->db[4] = (as_sdp->target_rr >> 8) & 0x3; + + return length; +} + static ssize_t intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915, const struct hdmi_drm_infoframe *drm_infoframe, @@ -4228,6 +4333,10 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder, &crtc_state->infoframes.drm.drm, &sdp, sizeof(sdp)); break; + case DP_SDP_ADAPTIVE_SYNC: + len = intel_dp_as_sdp_pack(&crtc_state->infoframes.as_sdp, &sdp, + sizeof(sdp)); + break; default: MISSING_CASE(type); return; @@ -4249,6 +4358,10 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder, u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; + + if (HAS_AS_SDP(dev_priv)) + dip_enable |= VIDEO_DIP_ENABLE_AS_ADL; + u32 val = intel_de_read(dev_priv, reg) & ~dip_enable; /* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */ @@ -4266,10 +4379,42 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder, return; intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); + intel_write_dp_sdp(encoder, crtc_state, DP_SDP_ADAPTIVE_SYNC); intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); } +static +int intel_dp_as_sdp_unpack(struct drm_dp_as_sdp *as_sdp, + const void *buffer, size_t size) +{ + const struct dp_sdp *sdp = buffer; + + if (size < sizeof(struct dp_sdp)) + return -EINVAL; + + memset(as_sdp, 0, sizeof(*as_sdp)); + + if (sdp->sdp_header.HB0 != 0) + return -EINVAL; + + if (sdp->sdp_header.HB1 != DP_SDP_ADAPTIVE_SYNC) + return -EINVAL; + + if (sdp->sdp_header.HB2 != 0x02) + return -EINVAL; + + if ((sdp->sdp_header.HB3 & 0x3F) != 9) + return -EINVAL; + + as_sdp->length = sdp->sdp_header.HB3 & DP_ADAPTIVE_SYNC_SDP_LENGTH; + as_sdp->mode = sdp->db[0] & DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE; + as_sdp->vtotal = (sdp->db[2] << 8) | sdp->db[1]; + as_sdp->target_rr = (u64)sdp->db[3] | ((u64)sdp->db[4] & 0x3); + + return 0; +} + static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, const void *buffer, size_t size) { @@ -4340,6 +4485,29 @@ static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, return 0; } +static void +intel_read_dp_as_sdp(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_dp_as_sdp *as_sdp) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + unsigned int type = DP_SDP_ADAPTIVE_SYNC; + struct dp_sdp sdp = {}; + int ret; + + if ((crtc_state->infoframes.enable & + intel_hdmi_infoframe_enable(type)) == 0) + return; + + dig_port->read_infoframe(encoder, crtc_state, type, &sdp, + sizeof(sdp)); + + ret = intel_dp_as_sdp_unpack(as_sdp, &sdp, sizeof(sdp)); + if (ret) + drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP AS SDP\n"); +} + static int intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, const void *buffer, size_t size) @@ -4446,6 +4614,10 @@ void intel_read_dp_sdp(struct intel_encoder *encoder, intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, &crtc_state->infoframes.drm.drm); break; + case DP_SDP_ADAPTIVE_SYNC: + intel_read_dp_as_sdp(encoder, crtc_state, + &crtc_state->infoframes.as_sdp); + break; default: MISSING_CASE(type); break; @@ -5095,6 +5267,8 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, !intel_dp_mst_is_master_trans(crtc_state)) continue; + intel_dp->link_trained = false; + intel_dp_check_frl_training(intel_dp); intel_dp_pcon_dsc_configure(intel_dp, crtc_state); intel_dp_start_link_train(intel_dp, crtc_state); @@ -5373,6 +5547,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) if (!intel_dp_get_dpcd(intel_dp)) return connector_status_disconnected; + intel_dp->mst_detect = intel_dp_mst_detect(intel_dp); + /* if there's no downstream port, we're done */ if (!drm_dp_is_branch(dpcd)) return connector_status_connected; @@ -5384,7 +5560,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) connector_status_connected : connector_status_disconnected; } - if (intel_dp_can_mst(intel_dp)) + if (intel_dp->mst_detect == DRM_DP_MST) return connector_status_connected; /* If no HPD, poke DDC gently */ @@ -5689,15 +5865,7 @@ intel_dp_detect(struct drm_connector *connector, memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd)); intel_dp->psr.sink_panel_replay_support = false; - if (intel_dp->is_mst) { - drm_dbg_kms(&dev_priv->drm, - "MST device may have disappeared %d vs %d\n", - intel_dp->is_mst, - intel_dp->mst_mgr.mst_state); - intel_dp->is_mst = false; - drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, - intel_dp->is_mst); - } + intel_dp_mst_disconnect(intel_dp); intel_dp_tunnel_disconnect(intel_dp); @@ -5716,7 +5884,7 @@ intel_dp_detect(struct drm_connector *connector, intel_dp_detect_dsc_caps(intel_dp, intel_connector); - intel_dp_configure_mst(intel_dp); + intel_dp_mst_configure(intel_dp); /* * TODO: Reset link params when switching to MST mode, until MST @@ -6499,7 +6667,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_encoder->port; - enum phy phy = intel_port_to_phy(dev_priv, port); int type; /* Initialize the work for modeset in case of link train failure */ @@ -6524,7 +6691,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, * Currently we don't support eDP on TypeC ports, although in * theory it could work on TypeC legacy ports. */ - drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); + drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder)); type = DRM_MODE_CONNECTOR_eDP; intel_encoder->type = INTEL_OUTPUT_EDP; diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index c540d3a73f..106ecfde36 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -88,6 +88,7 @@ void intel_dp_audio_compute_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state); bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp); bool intel_dp_is_edp(struct intel_dp *intel_dp); +bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp); bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state); int intel_dp_link_symbol_size(int rate); int intel_dp_link_symbol_clock(int rate); @@ -119,7 +120,8 @@ int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, int bw_overhead); int intel_dp_max_link_data_rate(struct intel_dp *intel_dp, int max_dprx_rate, int max_dprx_lanes); -bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp); +bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner); +bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp); bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable, @@ -149,6 +151,7 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, int mode_clock, int mode_hdisplay, bool bigjoiner); bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp, + struct intel_connector *connector, int hdisplay, int clock); static inline unsigned int intel_dp_unused_lane_mask(int lane_count) diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 4f4a0e3b31..b8a53bb174 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -61,9 +61,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp) u32 status; int ret; - ret = __intel_de_wait_for_register(i915, ch_ctl, - DP_AUX_CH_CTL_SEND_BUSY, 0, - 2, timeout_ms, &status); + ret = intel_de_wait_custom(i915, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY, 0, + 2, timeout_ms, &status); if (ret == -ETIMEDOUT) drm_err(&i915->drm, @@ -143,9 +142,15 @@ static int intel_dp_aux_sync_len(void) return precharge + preamble; } -static int intel_dp_aux_fw_sync_len(void) +int intel_dp_aux_fw_sync_len(void) { - int precharge = 10; /* 10-16 */ + /* + * We faced some glitches on Dell Precision 5490 MTL laptop with panel: + * "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20 + * is fixing these problems with the panel. It is still within range + * mentioned in eDP specification. + */ + int precharge = 12; /* 10-16 */ int preamble = 8; return precharge + preamble; diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h index 8447f3e601..76d1f2ed7c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.h +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h @@ -20,5 +20,6 @@ enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder); void intel_dp_aux_irq_handler(struct drm_i915_private *i915); u32 intel_dp_aux_pack(const u8 *src, int src_bytes); +int intel_dp_aux_fw_sync_len(void); #endif /* __INTEL_DP_AUX_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c index 9db43bd81c..92b03073ac 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c @@ -769,11 +769,9 @@ intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector, return -EINVAL; /* Wait for encryption confirmation */ - if (intel_de_wait_for_register(i915, - HDCP_STATUS(i915, cpu_transcoder, port), - stream_enc_status, - enable ? stream_enc_status : 0, - HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { + if (intel_de_wait(i915, HDCP_STATUS(i915, cpu_transcoder, port), + stream_enc_status, enable ? stream_enc_status : 0, + HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n", transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled"); return -ETIMEDOUT; @@ -804,11 +802,10 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector, return ret; /* Wait for encryption confirmation */ - if (intel_de_wait_for_register(i915, - HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe), - STREAM_ENCRYPTION_STATUS, - enable ? STREAM_ENCRYPTION_STATUS : 0, - HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { + if (intel_de_wait(i915, HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe), + STREAM_ENCRYPTION_STATUS, + enable ? STREAM_ENCRYPTION_STATUS : 0, + HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n", transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled"); return -ETIMEDOUT; diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index fb84ca98bb..8cfc55f3d9 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -114,10 +114,24 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable) return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1; } -static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +static bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp) +{ + return intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] == + DP_PHY_REPEATER_MODE_TRANSPARENT; +} + +/* + * Read the LTTPR common capabilities and switch the LTTPR PHYs to + * non-transparent mode if this is supported. Preserve the + * transparent/non-transparent mode on an active link. + * + * Return the number of detected LTTPRs in non-transparent mode or 0 if the + * LTTPRs are in transparent mode or the detection failed. + */ +static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { int lttpr_count; - int i; if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd)) return 0; @@ -131,6 +145,19 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI if (lttpr_count == 0) return 0; + /* + * Don't change the mode on an active link, to prevent a loss of link + * synchronization. See DP Standard v2.0 3.6.7. about the LTTPR + * resetting its internal state when the mode is changed from + * non-transparent to transparent. + */ + if (intel_dp->link_trained) { + if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp)) + goto out_reset_lttpr_count; + + return lttpr_count; + } + /* * See DP Standard v2.0 3.6.6.1. about the explicit disabling of * non-transparent mode and the disable->enable non-transparent mode @@ -151,11 +178,25 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n"); intel_dp_set_lttpr_transparent_mode(intel_dp, true); - intel_dp_reset_lttpr_count(intel_dp); - return 0; + goto out_reset_lttpr_count; } + return lttpr_count; + +out_reset_lttpr_count: + intel_dp_reset_lttpr_count(intel_dp); + + return 0; +} + +static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + int lttpr_count; + int i; + + lttpr_count = intel_dp_init_lttpr_phys(intel_dp, dpcd); + for (i = 0; i < lttpr_count; i++) intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i)); @@ -334,7 +375,7 @@ static bool has_per_lane_signal_levels(struct intel_dp *intel_dp, struct drm_i915_private *i915 = dp_to_i915(intel_dp); return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) || - DISPLAY_VER(i915) >= 11; + DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915); } /* 128b/132b */ @@ -1372,10 +1413,10 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp, { struct drm_i915_private *i915 = dp_to_i915(intel_dp); bool passed; - /* - * TODO: Reiniting LTTPRs here won't be needed once proper connector - * HW state readout is added. + * Reinit the LTTPRs here to ensure that they are switched to + * non-transparent mode. During an earlier LTTPR detection this + * could've been prevented by an active link. */ int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 8264ff7fb6..715d2f59f5 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -51,25 +51,39 @@ #include "intel_vdsc.h" #include "skl_scaler.h" -static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp, - const struct drm_display_mode *adjusted_mode, - struct intel_crtc_state *crtc_state, - bool dsc) +static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state, + bool dsc) { - if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) < 14 && dsc) { - int output_bpp = bpp; - /* DisplayPort 2 128b/132b, bits per lane is always 32 */ - int symbol_clock = crtc_state->port_clock / 32; - - if (output_bpp * adjusted_mode->crtc_clock >= - symbol_clock * 72) { - drm_dbg_kms(&i915->drm, "UHBR check failed(required bw %d available %d)\n", - output_bpp * adjusted_mode->crtc_clock, symbol_clock * 72); - return -EINVAL; - } - } + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; - return 0; + if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(i915) >= 20 || !dsc) + return INT_MAX; + + /* + * DSC->DPT interface width: + * ICL-MTL: 72 bits (each branch has 72 bits, only left branch is used) + * LNL+: 144 bits (not a bottleneck in any config) + * + * Bspec/49259 suggests that the FEC overhead needs to be + * applied here, though HW people claim that neither this FEC + * or any other overhead is applicable here (that is the actual + * available_bw is just symbol_clock * 72). However based on + * testing on MTL-P the + * - DELL U3224KBA display + * - Unigraf UCD-500 CTS test sink + * devices the + * - 5120x2880/995.59Mhz + * - 6016x3384/1357.23Mhz + * - 6144x3456/1413.39Mhz + * modes (all the ones having a DPT limit on the above devices), + * both the channel coding efficiency and an additional 3% + * overhead needs to be accounted for. + */ + return div64_u64(mul_u32_u32(intel_dp_link_symbol_clock(crtc_state->port_clock) * 72, + drm_dp_bw_channel_coding_efficiency(true)), + mul_u32_u32(adjusted_mode->crtc_clock, 1030000)); } static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state, @@ -88,11 +102,10 @@ static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state, if (dsc) { flags |= DRM_DP_BW_OVERHEAD_DSC; - /* TODO: add support for bigjoiner */ dsc_slice_count = intel_dp_dsc_get_slice_count(connector, adjusted_mode->clock, adjusted_mode->hdisplay, - false); + crtc_state->bigjoiner_pipes); } overhead = drm_dp_bw_overhead(crtc_state->lane_count, @@ -158,6 +171,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int bpp, slots = -EINVAL; + int max_dpt_bpp; int ret = 0; mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr); @@ -178,6 +192,13 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, crtc_state->port_clock, crtc_state->lane_count); + max_dpt_bpp = intel_dp_mst_max_dpt_bpp(crtc_state, dsc); + if (max_bpp > max_dpt_bpp) { + drm_dbg_kms(&i915->drm, "Limiting bpp to max DPT bpp (%d -> %d)\n", + max_bpp, max_dpt_bpp); + max_bpp = max_dpt_bpp; + } + drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n", min_bpp, max_bpp); @@ -189,10 +210,6 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp); - ret = intel_dp_mst_check_constraints(i915, bpp, adjusted_mode, crtc_state, dsc); - if (ret) - continue; - link_bpp_x16 = to_bpp_x16(dsc ? bpp : intel_dp_output_bpp(crtc_state->output_format, bpp)); @@ -404,15 +421,22 @@ static int mode_hblank_period_ns(const struct drm_display_mode *mode) static bool hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector, - const struct intel_crtc_state *crtc_state) + const struct intel_crtc_state *crtc_state, + const struct link_config_limits *limits) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + bool is_uhbr_sink = connector->mst_port && + drm_dp_128b132b_supported(connector->mst_port->dpcd); + int hblank_limit = is_uhbr_sink ? 500 : 300; if (!connector->dp.dsc_hblank_expansion_quirk) return false; - if (mode_hblank_period_ns(adjusted_mode) > 300) + if (is_uhbr_sink && !drm_dp_is_uhbr_rate(limits->max_rate)) + return false; + + if (mode_hblank_period_ns(adjusted_mode) > hblank_limit) return false; return true; @@ -428,7 +452,7 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); int min_bpp_x16 = limits->link.min_bpp_x16; - if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state)) + if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state, limits)) return true; if (!dsc) { @@ -525,14 +549,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; - const struct intel_connector *connector = + struct intel_connector *connector = to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct link_config_limits limits; - bool dsc_needed; + bool dsc_needed, joiner_needs_dsc; int ret = 0; if (pipe_config->fec_enable && @@ -542,11 +567,18 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; + if (intel_dp_need_bigjoiner(intel_dp, connector, + adjusted_mode->crtc_hdisplay, + adjusted_mode->crtc_clock)) + pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe); + pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->has_pch_encoder = false; - dsc_needed = intel_dp->force_dsc_en || + joiner_needs_dsc = intel_dp_joiner_needs_dsc(dev_priv, pipe_config->bigjoiner_pipes); + + dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || !intel_dp_mst_compute_config_limits(intel_dp, connector, pipe_config, @@ -566,8 +598,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, /* enable compression if the mode doesn't fit available BW */ if (dsc_needed) { - drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, force=%s)\n", - str_yes_no(ret), + drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n", + str_yes_no(ret), str_yes_no(joiner_needs_dsc), str_yes_no(intel_dp->force_dsc_en)); if (!intel_dp_mst_dsc_source_support(pipe_config)) @@ -613,7 +645,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) pipe_config->lane_lat_optim_mask = - bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); + bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); intel_dp_audio_compute_config(encoder, pipe_config, conn_state); @@ -954,6 +986,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, struct drm_dp_mst_atomic_payload *new_payload = drm_atomic_get_mst_payload_state(new_mst_state, connector->port); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_crtc *pipe_crtc; bool last_mst_stream; intel_dp->active_mst_links--; @@ -962,7 +995,13 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, DISPLAY_VER(dev_priv) >= 12 && last_mst_stream && !intel_dp_mst_is_master_trans(old_crtc_state)); - intel_crtc_vblank_off(old_crtc_state); + for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(old_crtc_state)) { + const struct intel_crtc_state *old_pipe_crtc_state = + intel_atomic_get_old_crtc_state(state, pipe_crtc); + + intel_crtc_vblank_off(old_pipe_crtc_state); + } intel_disable_transcoder(old_crtc_state); @@ -980,12 +1019,18 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, intel_ddi_disable_transcoder_func(old_crtc_state); - intel_dsc_disable(old_crtc_state); + for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(old_crtc_state)) { + const struct intel_crtc_state *old_pipe_crtc_state = + intel_atomic_get_old_crtc_state(state, pipe_crtc); - if (DISPLAY_VER(dev_priv) >= 9) - skl_scaler_disable(old_crtc_state); - else - ilk_pfit_disable(old_crtc_state); + intel_dsc_disable(old_pipe_crtc_state); + + if (DISPLAY_VER(dev_priv) >= 9) + skl_scaler_disable(old_pipe_crtc_state); + else + ilk_pfit_disable(old_pipe_crtc_state); + } /* * Power down mst path before disabling the port, otherwise we end @@ -1117,6 +1162,39 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_set_dp_msa(pipe_config, conn_state); } +static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + u32 clear = 0; + u32 set = 0; + + if (!IS_ALDERLAKE_P(i915)) + return; + + if (!IS_DISPLAY_STEP(i915, STEP_D0, STEP_FOREVER)) + return; + + /* Wa_14013163432:adlp */ + if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state)) + set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder); + + /* Wa_14014143976:adlp */ + if (IS_DISPLAY_STEP(i915, STEP_E0, STEP_FOREVER)) { + if (intel_dp_is_uhbr(crtc_state)) + set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder); + else if (crtc_state->fec_enable) + clear |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder); + + if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state)) + set |= DP_MST_DPT_DPTP_ALIGN_WA(crtc_state->cpu_transcoder); + } + + if (!clear && !set) + return; + + intel_de_rmw(i915, CHICKEN_MISC_3, clear, set); +} + static void intel_mst_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, @@ -1131,6 +1209,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr); enum transcoder trans = pipe_config->cpu_transcoder; bool first_mst_stream = intel_dp->active_mst_links == 1; + struct intel_crtc *pipe_crtc; drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder); @@ -1145,6 +1224,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff)); } + enable_bs_jitter_was(pipe_config); + intel_ddi_enable_transcoder_func(encoder, pipe_config); clear_act_sent(encoder, pipe_config); @@ -1172,7 +1253,13 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, intel_enable_transcoder(pipe_config); - intel_crtc_vblank_on(pipe_config); + for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc, + intel_crtc_joined_pipe_mask(pipe_config)) { + const struct intel_crtc_state *pipe_crtc_state = + intel_atomic_get_new_crtc_state(state, pipe_crtc); + + intel_crtc_vblank_on(pipe_crtc_state); + } intel_hdcp_enable(state, encoder, pipe_config, conn_state); } @@ -1285,7 +1372,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr; struct drm_dp_mst_port *port = intel_connector->port; const int min_bpp = 18; - int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; + int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq; int max_rate, mode_rate, max_lanes, max_link_clock; int ret; bool dsc = false, bigjoiner = false; @@ -1302,8 +1389,13 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, if (*status != MODE_OK) return 0; - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) { - *status = MODE_NO_DBLESCAN; + if (mode->flags & DRM_MODE_FLAG_DBLCLK) { + *status = MODE_H_ILLEGAL; + return 0; + } + + if (mode->clock < 10000) { + *status = MODE_CLOCK_LOW; return 0; } @@ -1314,10 +1406,6 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, max_link_clock, max_lanes); mode_rate = intel_dp_link_required(mode->clock, min_bpp); - ret = drm_modeset_lock(&mgr->base.lock, ctx); - if (ret) - return ret; - /* * TODO: * - Also check if compression would allow for the mode @@ -1330,27 +1418,18 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, * corresponding link capabilities of the sink) in case the * stream is uncompressed for it by the last branch device. */ - if (mode_rate > max_rate || mode->clock > max_dotclk || - drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) { - *status = MODE_CLOCK_HIGH; - return 0; - } - - if (mode->clock < 10000) { - *status = MODE_CLOCK_LOW; - return 0; - } - - if (mode->flags & DRM_MODE_FLAG_DBLCLK) { - *status = MODE_H_ILLEGAL; - return 0; - } - - if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) { + if (intel_dp_need_bigjoiner(intel_dp, intel_connector, + mode->hdisplay, target_clock)) { bigjoiner = true; max_dotclk *= 2; + } + + ret = drm_modeset_lock(&mgr->base.lock, ctx); + if (ret) + return ret; - /* TODO: add support for bigjoiner */ + if (mode_rate > max_rate || mode->clock > max_dotclk || + drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) { *status = MODE_CLOCK_HIGH; return 0; } @@ -1383,11 +1462,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, dsc = dsc_max_compressed_bpp && dsc_slice_count; } - /* - * Big joiner configuration needs DSC for TGL which is not true for - * XE_LPD where uncompressed joiner is supported. - */ - if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) { + if (intel_dp_joiner_needs_dsc(dev_priv, bigjoiner) && !dsc) { *status = MODE_CLOCK_HIGH; return 0; } @@ -1397,7 +1472,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, return 0; } - *status = intel_mode_valid_max_plane_size(dev_priv, mode, false); + *status = intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner); return 0; } @@ -1509,24 +1584,41 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp, static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct drm_dp_aux *aux = connector->dp.dsc_decompression_aux; struct drm_dp_desc desc; u8 dpcd[DP_RECEIVER_CAP_SIZE]; - if (!connector->dp.dsc_decompression_aux) + if (!aux) return false; - if (drm_dp_read_desc(connector->dp.dsc_decompression_aux, - &desc, true) < 0) + /* + * A logical port's OUI (at least for affected sinks) is all 0, so + * instead of that the parent port's OUI is used for identification. + */ + if (drm_dp_mst_port_is_logical(connector->port)) { + aux = drm_dp_mst_aux_for_parent(connector->port); + if (!aux) + aux = &connector->mst_port->aux; + } + + if (drm_dp_read_dpcd_caps(aux, dpcd) < 0) return false; - if (!drm_dp_has_quirk(&desc, - DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC)) + if (drm_dp_read_desc(aux, &desc, drm_dp_is_branch(dpcd)) < 0) return false; - if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd) < 0) + if (!drm_dp_has_quirk(&desc, + DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC)) return false; - if (!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE)) + /* + * UHBR (MST sink) devices requiring this quirk don't advertise the + * HBLANK expansion support. Presuming that they perform HBLANK + * expansion internally, or are affected by this issue on modes with a + * short HBLANK for other reasons. + */ + if (!drm_dp_128b132b_supported(dpcd) && + !(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE)) return false; drm_dbg_kms(&i915->drm, diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c index 75d76f91ec..6503abdc2b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c +++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c @@ -348,7 +348,7 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp, out_err: drm_dbg_kms(&i915->drm, - "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and redect it (err %pe)\n", + "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and reject it (err %pe)\n", drm_dp_tunnel_name(intel_dp->tunnel), connector->base.base.id, connector->base.name, encoder->base.base.id, encoder->base.name, diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index 4ca910874a..d20e4e9cf7 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -21,6 +21,7 @@ * DEALINGS IN THE SOFTWARE. */ +#include "bxt_dpio_phy_regs.h" #include "i915_reg.h" #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" @@ -29,6 +30,7 @@ #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dpio_phy.h" +#include "vlv_dpio_phy_regs.h" #include "vlv_sideband.h" /** @@ -123,9 +125,9 @@ */ /** - * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy + * struct bxt_dpio_phy_info - Hold info for a broxton DDI phy */ -struct bxt_ddi_phy_info { +struct bxt_dpio_phy_info { /** * @dual_channel: true if this phy has a second channel. */ @@ -161,7 +163,7 @@ struct bxt_ddi_phy_info { } channel[2]; }; -static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = { +static const struct bxt_dpio_phy_info bxt_dpio_phy_info[] = { [DPIO_PHY0] = { .dual_channel = true, .rcomp_phy = DPIO_PHY1, @@ -183,7 +185,7 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = { }, }; -static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = { +static const struct bxt_dpio_phy_info glk_dpio_phy_info[] = { [DPIO_PHY0] = { .dual_channel = false, .rcomp_phy = DPIO_PHY1, @@ -216,23 +218,23 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = { }, }; -static const struct bxt_ddi_phy_info * +static const struct bxt_dpio_phy_info * bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count) { if (IS_GEMINILAKE(dev_priv)) { - *count = ARRAY_SIZE(glk_ddi_phy_info); - return glk_ddi_phy_info; + *count = ARRAY_SIZE(glk_dpio_phy_info); + return glk_dpio_phy_info; } else { - *count = ARRAY_SIZE(bxt_ddi_phy_info); - return bxt_ddi_phy_info; + *count = ARRAY_SIZE(bxt_dpio_phy_info); + return bxt_dpio_phy_info; } } -static const struct bxt_ddi_phy_info * +static const struct bxt_dpio_phy_info * bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy) { int count; - const struct bxt_ddi_phy_info *phy_list = + const struct bxt_dpio_phy_info *phy_list = bxt_get_phy_list(dev_priv, &count); return &phy_list[phy]; @@ -241,7 +243,7 @@ bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy) void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, enum dpio_phy *phy, enum dpio_channel *ch) { - const struct bxt_ddi_phy_info *phy_info, *phys; + const struct bxt_dpio_phy_info *phy_info, *phys; int i, count; phys = bxt_get_phy_list(dev_priv, &count); @@ -269,16 +271,32 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, *ch = DPIO_CH0; } -void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +/* + * Like intel_de_rmw() but reads from a single per-lane register and + * writes to the group register to write the same value to all the lanes. + */ +static u32 bxt_dpio_phy_rmw_grp(struct drm_i915_private *i915, + i915_reg_t reg_single, + i915_reg_t reg_group, + u32 clear, u32 set) +{ + u32 old, val; + + old = intel_de_read(i915, reg_single); + val = (old & ~clear) | set; + intel_de_write(i915, reg_group, val); + + return old; +} + +void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int level = intel_ddi_level(encoder, crtc_state, 0); const struct intel_ddi_buf_trans *trans; enum dpio_channel ch; enum dpio_phy phy; - int n_entries; - u32 val; + int lane, n_entries; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) @@ -290,41 +308,51 @@ void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder, * While we write to the group register to program all lanes at once we * can read only lane registers and we pick lanes 0/1 for that. */ - val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch)); - val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT); - intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val); - - val = intel_de_read(dev_priv, BXT_PORT_TX_DW2_LN0(phy, ch)); - val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE); - val |= trans->entries[level].bxt.margin << MARGIN_000_SHIFT | - trans->entries[level].bxt.scale << UNIQ_TRANS_SCALE_SHIFT; - intel_de_write(dev_priv, BXT_PORT_TX_DW2_GRP(phy, ch), val); - - val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN0(phy, ch)); - val &= ~SCALE_DCOMP_METHOD; - if (trans->entries[level].bxt.enable) - val |= SCALE_DCOMP_METHOD; - - if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD)) - drm_err(&dev_priv->drm, - "Disabled scaling while ouniqetrangenmethod was set"); - - intel_de_write(dev_priv, BXT_PORT_TX_DW3_GRP(phy, ch), val); - - val = intel_de_read(dev_priv, BXT_PORT_TX_DW4_LN0(phy, ch)); - val &= ~DE_EMPHASIS; - val |= trans->entries[level].bxt.deemphasis << DEEMPH_SHIFT; - intel_de_write(dev_priv, BXT_PORT_TX_DW4_GRP(phy, ch), val); - - val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch)); - val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT; - intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val); + bxt_dpio_phy_rmw_grp(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch), + BXT_PORT_PCS_DW10_GRP(phy, ch), + TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT, 0); + + for (lane = 0; lane < crtc_state->lane_count; lane++) { + int level = intel_ddi_level(encoder, crtc_state, lane); + + intel_de_rmw(dev_priv, BXT_PORT_TX_DW2_LN(phy, ch, lane), + MARGIN_000_MASK | UNIQ_TRANS_SCALE_MASK, + MARGIN_000(trans->entries[level].bxt.margin) | + UNIQ_TRANS_SCALE(trans->entries[level].bxt.scale)); + } + + for (lane = 0; lane < crtc_state->lane_count; lane++) { + int level = intel_ddi_level(encoder, crtc_state, lane); + u32 val; + + intel_de_rmw(dev_priv, BXT_PORT_TX_DW3_LN(phy, ch, lane), + SCALE_DCOMP_METHOD, + trans->entries[level].bxt.enable ? + SCALE_DCOMP_METHOD : 0); + + val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN(phy, ch, lane)); + if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD)) + drm_err(&dev_priv->drm, + "Disabled scaling while ouniqetrangenmethod was set"); + } + + for (lane = 0; lane < crtc_state->lane_count; lane++) { + int level = intel_ddi_level(encoder, crtc_state, lane); + + intel_de_rmw(dev_priv, BXT_PORT_TX_DW4_LN(phy, ch, lane), + DE_EMPHASIS_MASK, + DE_EMPHASIS(trans->entries[level].bxt.deemphasis)); + } + + bxt_dpio_phy_rmw_grp(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch), + BXT_PORT_PCS_DW10_GRP(phy, ch), + 0, TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT); } -bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv, + enum dpio_phy phy) { - const struct bxt_ddi_phy_info *phy_info; + const struct bxt_dpio_phy_info *phy_info; phy_info = bxt_get_phy_info(dev_priv, phy); @@ -353,7 +381,7 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) { u32 val = intel_de_read(dev_priv, BXT_PORT_REF_DW6(phy)); - return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT; + return REG_FIELD_GET(GRC_CODE_MASK, val); } static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv, @@ -365,20 +393,20 @@ static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv, phy); } -static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +static void _bxt_dpio_phy_init(struct drm_i915_private *dev_priv, + enum dpio_phy phy) { - const struct bxt_ddi_phy_info *phy_info; + const struct bxt_dpio_phy_info *phy_info; u32 val; phy_info = bxt_get_phy_info(dev_priv, phy); - if (bxt_ddi_phy_is_enabled(dev_priv, phy)) { + if (bxt_dpio_phy_is_enabled(dev_priv, phy)) { /* Still read out the GRC value for state verification */ if (phy_info->rcomp_phy != -1) dev_priv->display.state.bxt_phy_grc = bxt_get_grc(dev_priv, phy); - if (bxt_ddi_phy_verify_state(dev_priv, phy)) { + if (bxt_dpio_phy_verify_state(dev_priv, phy)) { drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, " "won't reprogram it\n", phy); return; @@ -399,20 +427,17 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, * The flag should get set in 100us according to the HW team, but * use 1ms due to occasional timeouts observed with that. */ - if (intel_wait_for_register_fw(&dev_priv->uncore, - BXT_PORT_CL1CM_DW0(phy), - PHY_RESERVED | PHY_POWER_GOOD, - PHY_POWER_GOOD, - 1)) + if (intel_de_wait_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy), + PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1)) drm_err(&dev_priv->drm, "timeout during PHY%d power on\n", phy); /* Program PLL Rcomp code offset */ - intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK, - 0xE4 << IREF0RC_OFFSET_SHIFT); + intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy), + IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xE4)); - intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy), IREF1RC_OFFSET_MASK, - 0xE4 << IREF1RC_OFFSET_SHIFT); + intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy), + IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xE4)); /* Program power gating */ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW28(phy), 0, @@ -435,9 +460,9 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, val = bxt_get_grc(dev_priv, phy_info->rcomp_phy); dev_priv->display.state.bxt_phy_grc = val; - grc_code = val << GRC_CODE_FAST_SHIFT | - val << GRC_CODE_SLOW_SHIFT | - val; + grc_code = GRC_CODE_FAST(val) | + GRC_CODE_SLOW(val) | + GRC_CODE_NOM(val); intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code); intel_de_rmw(dev_priv, BXT_PORT_REF_DW8(phy), 0, GRC_DIS | GRC_RDY_OVRD); @@ -449,9 +474,9 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS); } -void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) +void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) { - const struct bxt_ddi_phy_info *phy_info; + const struct bxt_dpio_phy_info *phy_info; phy_info = bxt_get_phy_info(dev_priv, phy); @@ -460,9 +485,9 @@ void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0); } -void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) +void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) { - const struct bxt_ddi_phy_info *phy_info = + const struct bxt_dpio_phy_info *phy_info = bxt_get_phy_info(dev_priv, phy); enum dpio_phy rcomp_phy = phy_info->rcomp_phy; bool was_enabled; @@ -471,19 +496,19 @@ void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) was_enabled = true; if (rcomp_phy != -1) - was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy); + was_enabled = bxt_dpio_phy_is_enabled(dev_priv, rcomp_phy); /* * We need to copy the GRC calibration value from rcomp_phy, * so make sure it's powered up. */ if (!was_enabled) - _bxt_ddi_phy_init(dev_priv, rcomp_phy); + _bxt_dpio_phy_init(dev_priv, rcomp_phy); - _bxt_ddi_phy_init(dev_priv, phy); + _bxt_dpio_phy_init(dev_priv, phy); if (!was_enabled) - bxt_ddi_phy_uninit(dev_priv, rcomp_phy); + bxt_dpio_phy_uninit(dev_priv, rcomp_phy); } static bool __printf(6, 7) @@ -513,10 +538,10 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, return false; } -bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv, + enum dpio_phy phy) { - const struct bxt_ddi_phy_info *phy_info; + const struct bxt_dpio_phy_info *phy_info; u32 mask; bool ok; @@ -526,23 +551,23 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \ ## __VA_ARGS__) - if (!bxt_ddi_phy_is_enabled(dev_priv, phy)) + if (!bxt_dpio_phy_is_enabled(dev_priv, phy)) return false; ok = true; /* PLL Rcomp code offset */ ok &= _CHK(BXT_PORT_CL1CM_DW9(phy), - IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT, - "BXT_PORT_CL1CM_DW9(%d)", phy); + IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xe4), + "BXT_PORT_CL1CM_DW9(%d)", phy); ok &= _CHK(BXT_PORT_CL1CM_DW10(phy), - IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT, - "BXT_PORT_CL1CM_DW10(%d)", phy); + IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xe4), + "BXT_PORT_CL1CM_DW10(%d)", phy); /* Power gating */ mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG; ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask, - "BXT_PORT_CL1CM_DW28(%d)", phy); + "BXT_PORT_CL1CM_DW28(%d)", phy); if (phy_info->dual_channel) ok &= _CHK(BXT_PORT_CL2CM_DW6(phy), @@ -552,9 +577,9 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, if (phy_info->rcomp_phy != -1) { u32 grc_code = dev_priv->display.state.bxt_phy_grc; - grc_code = grc_code << GRC_CODE_FAST_SHIFT | - grc_code << GRC_CODE_SLOW_SHIFT | - grc_code; + grc_code = GRC_CODE_FAST(grc_code) | + GRC_CODE_SLOW(grc_code) | + GRC_CODE_NOM(grc_code); mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK | GRC_CODE_NOM_MASK; ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code, @@ -562,7 +587,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, mask = GRC_DIS | GRC_RDY_OVRD; ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask, - "BXT_PORT_REF_DW8(%d)", phy); + "BXT_PORT_REF_DW8(%d)", phy); } return ok; @@ -570,7 +595,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, } u8 -bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count) +bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count) { switch (lane_count) { case 1: @@ -586,8 +611,8 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count) } } -void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, - u8 lane_lat_optim_mask) +void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder, + u8 lane_lat_optim_mask) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; @@ -598,24 +623,18 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); for (lane = 0; lane < 4; lane++) { - u32 val = intel_de_read(dev_priv, - BXT_PORT_TX_DW14_LN(phy, ch, lane)); - /* * Note that on CHV this flag is called UPAR, but has * the same function. */ - val &= ~LATENCY_OPTIM; - if (lane_lat_optim_mask & BIT(lane)) - val |= LATENCY_OPTIM; - - intel_de_write(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane), - val); + intel_de_rmw(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane), + LATENCY_OPTIM, + lane_lat_optim_mask & BIT(lane) ? LATENCY_OPTIM : 0); } } u8 -bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) +bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; @@ -701,9 +720,8 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); - enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); + enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); u32 val; int i; @@ -740,7 +758,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder, for (i = 0; i < crtc_state->lane_count; i++) { val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i)); val &= ~DPIO_SWING_DEEMPH9P5_MASK; - val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT; + val |= DPIO_SWING_DEEMPH9P5(deemph_reg_value); vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val); } @@ -749,15 +767,15 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder, val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i)); val &= ~DPIO_SWING_MARGIN000_MASK; - val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT; + val |= DPIO_SWING_MARGIN000(margin_reg_value); /* * Supposedly this value shouldn't matter when unique transition * scale is disabled, but in fact it does matter. Let's just * always program the same value and hope it's OK. */ - val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); - val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT; + val &= ~DPIO_UNIQ_TRANS_SCALE_MASK; + val |= DPIO_UNIQ_TRANS_SCALE(0x9a); vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val); } @@ -796,9 +814,9 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder, bool reset) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); - enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); + enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); u32 val; val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch)); @@ -843,7 +861,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); - enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); + enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); enum pipe pipe = crtc->pipe; unsigned int lane_mask = intel_dp_unused_lane_mask(crtc_state->lane_count); @@ -866,39 +884,39 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder, /* program left/right clock distribution */ if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0); + val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0); val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); if (ch == DPIO_CH0) val |= CHV_BUFLEFTENA1_FORCE; if (ch == DPIO_CH1) val |= CHV_BUFRIGHTENA1_FORCE; - vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val); + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val); } else { - val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1); + val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1); val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); if (ch == DPIO_CH0) val |= CHV_BUFLEFTENA2_FORCE; if (ch == DPIO_CH1) val |= CHV_BUFRIGHTENA2_FORCE; - vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val); + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val); } /* program clock channel usage */ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch)); - val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; - if (pipe != PIPE_B) - val &= ~CHV_PCS_USEDCLKCHANNEL; + val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe == PIPE_B) + val |= DPIO_PCS_USEDCLKCHANNEL; else - val |= CHV_PCS_USEDCLKCHANNEL; + val &= ~DPIO_PCS_USEDCLKCHANNEL; vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val); if (crtc_state->lane_count > 2) { val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch)); - val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; - if (pipe != PIPE_B) - val &= ~CHV_PCS_USEDCLKCHANNEL; + val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe == PIPE_B) + val |= DPIO_PCS_USEDCLKCHANNEL; else - val |= CHV_PCS_USEDCLKCHANNEL; + val &= ~DPIO_PCS_USEDCLKCHANNEL; vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val); } @@ -908,10 +926,10 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder, * pick the CL based on the port. */ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch)); - if (pipe != PIPE_B) - val &= ~CHV_CMN_USEDCLKCHANNEL; - else + if (pipe == PIPE_B) val |= CHV_CMN_USEDCLKCHANNEL; + else + val &= ~CHV_CMN_USEDCLKCHANNEL; vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val); vlv_dpio_put(dev_priv); @@ -923,9 +941,8 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); - enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); + enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); int data, i, stagger; u32 val; @@ -946,11 +963,10 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, for (i = 0; i < crtc_state->lane_count; i++) { /* Set the upar bit */ if (crtc_state->lane_count == 1) - data = 0x0; + data = 0; else - data = (i == 1) ? 0x0 : 0x1; - vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i), - data << DPIO_UPAR_SHIFT); + data = (i == 1) ? 0 : DPIO_UPAR; + vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i), data); } /* Data lane stagger programming */ @@ -1012,21 +1028,21 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe; - enum dpio_phy phy = vlv_pipe_to_phy(pipe); u32 val; vlv_dpio_get(dev_priv); /* disable left/right clock distribution */ if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0); + val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0); val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); - vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val); + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val); } else { - val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1); + val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1); val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); - vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val); + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val); } vlv_dpio_put(dev_priv); @@ -1050,24 +1066,23 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum dpio_channel port = vlv_dig_port_to_channel(dig_port); - enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); + enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); + enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); vlv_dpio_get(dev_priv); - vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), 0x00000000); - vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(port), demph_reg_value); - vlv_dpio_write(dev_priv, phy, VLV_TX_DW2(port), + vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), 0x00000000); + vlv_dpio_write(dev_priv, phy, VLV_TX_DW4_GRP(ch), demph_reg_value); + vlv_dpio_write(dev_priv, phy, VLV_TX_DW2_GRP(ch), uniqtranscale_reg_value); - vlv_dpio_write(dev_priv, phy, VLV_TX_DW3(port), 0x0C782040); + vlv_dpio_write(dev_priv, phy, VLV_TX_DW3_GRP(ch), 0x0C782040); if (tx3_demph) - vlv_dpio_write(dev_priv, phy, VLV_TX3_DW4(port), tx3_demph); + vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(ch, 3), tx3_demph); - vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11(port), 0x00030000); - vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9(port), preemph_reg_value); - vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11_GRP(ch), 0x00030000); + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9_GRP(ch), preemph_reg_value); + vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), DPIO_TX_OCALINIT_EN); vlv_dpio_put(dev_priv); } @@ -1077,26 +1092,25 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder, { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum dpio_channel port = vlv_dig_port_to_channel(dig_port); - enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); + enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); + enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); /* Program Tx lane resets to default */ vlv_dpio_get(dev_priv); - vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port), - DPIO_PCS_TX_LANE2_RESET | - DPIO_PCS_TX_LANE1_RESET); - vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port), - DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | - DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | - (1<base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum dpio_channel port = vlv_dig_port_to_channel(dig_port); + enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); + enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); enum pipe pipe = crtc->pipe; - enum dpio_phy phy = vlv_pipe_to_phy(pipe); u32 val; vlv_dpio_get(dev_priv); /* Enable clock channels for this port */ - val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(port)); - val = 0; - if (pipe) - val |= (1<<21); - else - val &= ~(1<<21); - val |= 0x001000c4; - vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8(port), val); + val = DPIO_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe == PIPE_B) + val |= DPIO_PCS_USEDCLKCHANNEL; + val |= 0xc4; + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8_GRP(ch), val); /* Program lane clock */ - vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14(port), 0x00760018); - vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23(port), 0x00400888); + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14_GRP(ch), 0x00760018); + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23_GRP(ch), 0x00400888); vlv_dpio_put(dev_priv); } @@ -1137,12 +1148,11 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder, { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); - enum dpio_channel port = vlv_dig_port_to_channel(dig_port); - enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); + enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); + enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); vlv_dpio_get(dev_priv); - vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port), 0x00000000); - vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port), 0x00e00060); + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch), 0x00000000); + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch), 0x00e00060); vlv_dpio_put(dev_priv); } diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h index 9adc4e8c17..226994dcb8 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h @@ -29,18 +29,18 @@ enum dpio_phy { #ifdef I915 void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, enum dpio_phy *phy, enum dpio_channel *ch); -void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state); -void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy); -void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy); -bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, - enum dpio_phy phy); -bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, - enum dpio_phy phy); -u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count); -void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, - u8 lane_lat_optim_mask); -u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); +void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy); +void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy); +bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv, + enum dpio_phy phy); +bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv, + enum dpio_phy phy); +u8 bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count); +void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder, + u8 lane_lat_optim_mask); +u8 bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); enum dpio_channel vlv_dig_port_to_channel(struct intel_digital_port *dig_port); enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port); @@ -77,35 +77,35 @@ static inline void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, en enum dpio_phy *phy, enum dpio_channel *ch) { } -static inline void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +static inline void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { } -static inline void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) +static inline void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) { } -static inline void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) +static inline void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) { } -static inline bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +static inline bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv, + enum dpio_phy phy) { return false; } -static inline bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +static inline bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv, + enum dpio_phy phy) { return true; } -static inline u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count) +static inline u8 bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count) { return 0; } -static inline void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, - u8 lane_lat_optim_mask) +static inline void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder, + u8 lane_lat_optim_mask) { } -static inline u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) +static inline u8 bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) { return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 3038655377..a981f45fac 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -20,6 +20,7 @@ #include "intel_panel.h" #include "intel_pps.h" #include "intel_snps_phy.h" +#include "vlv_dpio_phy_regs.h" #include "vlv_sideband.h" struct intel_dpll_funcs { @@ -369,38 +370,68 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock) return clock->dot; } -static int i9xx_pll_refclk(struct drm_device *dev, - const struct intel_crtc_state *pipe_config) +static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(dev); - u32 dpll = pipe_config->dpll_hw_state.dpll; + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; - if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) - return dev_priv->display.vbt.lvds_ssc_freq; - else if (HAS_PCH_SPLIT(dev_priv)) + if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) + return i915->display.vbt.lvds_ssc_freq; + else if (HAS_PCH_SPLIT(i915)) return 120000; - else if (DISPLAY_VER(dev_priv) != 2) + else if (DISPLAY_VER(i915) != 2) return 96000; else return 48000; } +void i9xx_dpll_get_hw_state(struct intel_crtc *crtc, + struct intel_dpll_hw_state *dpll_hw_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx; + + if (DISPLAY_VER(dev_priv) >= 4) { + u32 tmp; + + /* No way to read it out on pipes B and C */ + if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) + tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe]; + else + tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe)); + + hw_state->dpll_md = tmp; + } + + hw_state->dpll = intel_de_read(dev_priv, DPLL(crtc->pipe)); + + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { + hw_state->fp0 = intel_de_read(dev_priv, FP0(crtc->pipe)); + hw_state->fp1 = intel_de_read(dev_priv, FP1(crtc->pipe)); + } else { + /* Mask out read-only status bits. */ + hw_state->dpll &= ~(DPLL_LOCK_VLV | + DPLL_PORTC_READY_MASK | + DPLL_PORTB_READY_MASK); + } +} + /* Returns the clock of the currently programmed mode of the given pipe. */ -void i9xx_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - u32 dpll = pipe_config->dpll_hw_state.dpll; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; + u32 dpll = hw_state->dpll; u32 fp; struct dpll clock; int port_clock; - int refclk = i9xx_pll_refclk(dev, pipe_config); + int refclk = i9xx_pll_refclk(crtc_state); if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) - fp = pipe_config->dpll_hw_state.fp0; + fp = hw_state->fp0; else - fp = pipe_config->dpll_hw_state.fp1; + fp = hw_state->fp1; clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; if (IS_PINEVIEW(dev_priv)) { @@ -475,68 +506,69 @@ void i9xx_crtc_clock_get(struct intel_crtc *crtc, * port_clock to compute adjusted_mode.crtc_clock in the * encoder's get_config() function. */ - pipe_config->port_clock = port_clock; + crtc_state->port_clock = port_clock; } -void vlv_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe); enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); - struct dpll clock; - u32 mdiv; + const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; int refclk = 100000; + struct dpll clock; + u32 tmp; /* In case of DSI, DPLL will not be used */ - if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) + if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0) return; vlv_dpio_get(dev_priv); - mdiv = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(crtc->pipe)); + tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(ch)); vlv_dpio_put(dev_priv); - clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; - clock.m2 = mdiv & DPIO_M2DIV_MASK; - clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; - clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; - clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; + clock.m1 = REG_FIELD_GET(DPIO_M1_DIV_MASK, tmp); + clock.m2 = REG_FIELD_GET(DPIO_M2_DIV_MASK, tmp); + clock.n = REG_FIELD_GET(DPIO_N_DIV_MASK, tmp); + clock.p1 = REG_FIELD_GET(DPIO_P1_DIV_MASK, tmp); + clock.p2 = REG_FIELD_GET(DPIO_P2_DIV_MASK, tmp); - pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); + crtc_state->port_clock = vlv_calc_dpll_params(refclk, &clock); } -void chv_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +void chv_crtc_clock_get(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum dpio_channel port = vlv_pipe_to_channel(crtc->pipe); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe); enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); + const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; struct dpll clock; u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; int refclk = 100000; /* In case of DSI, DPLL will not be used */ - if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) + if ((hw_state->dpll & DPLL_VCO_ENABLE) == 0) return; vlv_dpio_get(dev_priv); - cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(port)); - pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(port)); - pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(port)); - pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(port)); - pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port)); + cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(ch)); + pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(ch)); + pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(ch)); + pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(ch)); + pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch)); vlv_dpio_put(dev_priv); - clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; - clock.m2 = (pll_dw0 & 0xff) << 22; + clock.m1 = REG_FIELD_GET(DPIO_CHV_M1_DIV_MASK, pll_dw1) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; + clock.m2 = REG_FIELD_GET(DPIO_CHV_M2_DIV_MASK, pll_dw0) << 22; if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) - clock.m2 |= pll_dw2 & 0x3fffff; - clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; - clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; - clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; + clock.m2 |= REG_FIELD_GET(DPIO_CHV_M2_FRAC_DIV_MASK, pll_dw2); + clock.n = REG_FIELD_GET(DPIO_CHV_N_DIV_MASK, pll_dw1); + clock.p1 = REG_FIELD_GET(DPIO_CHV_P1_DIV_MASK, cmn_dw13); + clock.p2 = REG_FIELD_GET(DPIO_CHV_P2_DIV_MASK, cmn_dw13); - pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); + crtc_state->port_clock = chv_calc_dpll_params(refclk, &clock); } /* @@ -958,37 +990,20 @@ static u32 pnv_dpll_compute_fp(const struct dpll *dpll) return (1 << dpll->n) << 16 | dpll->m2; } -static void i9xx_update_pll_dividers(struct intel_crtc_state *crtc_state, - const struct dpll *clock, - const struct dpll *reduced_clock) +static u32 i965_dpll_md(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 fp, fp2; - - if (IS_PINEVIEW(dev_priv)) { - fp = pnv_dpll_compute_fp(clock); - fp2 = pnv_dpll_compute_fp(reduced_clock); - } else { - fp = i9xx_dpll_compute_fp(clock); - fp2 = i9xx_dpll_compute_fp(reduced_clock); - } - - crtc_state->dpll_hw_state.fp0 = fp; - crtc_state->dpll_hw_state.fp1 = fp2; + return (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; } -static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state, - const struct dpll *clock, - const struct dpll *reduced_clock) +static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state, + const struct dpll *clock, + const struct dpll *reduced_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; - i9xx_update_pll_dividers(crtc_state, clock, reduced_clock); - - dpll = DPLL_VGA_MODE_DIS; + dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) dpll |= DPLLB_MODE_LVDS; @@ -1047,27 +1062,40 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state, else dpll |= PLL_REF_INPUT_DREFCLK; - dpll |= DPLL_VCO_ENABLE; - crtc_state->dpll_hw_state.dpll = dpll; - - if (DISPLAY_VER(dev_priv) >= 4) { - u32 dpll_md = (crtc_state->pixel_multiplier - 1) - << DPLL_MD_UDI_MULTIPLIER_SHIFT; - crtc_state->dpll_hw_state.dpll_md = dpll_md; - } + return dpll; } -static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state, +static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state, const struct dpll *clock, const struct dpll *reduced_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 dpll; + struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; - i9xx_update_pll_dividers(crtc_state, clock, reduced_clock); + if (IS_PINEVIEW(dev_priv)) { + hw_state->fp0 = pnv_dpll_compute_fp(clock); + hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock); + } else { + hw_state->fp0 = i9xx_dpll_compute_fp(clock); + hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock); + } + + hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock); + + if (DISPLAY_VER(dev_priv) >= 4) + hw_state->dpll_md = i965_dpll_md(crtc_state); +} - dpll = DPLL_VGA_MODE_DIS; +static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state, + const struct dpll *clock, + const struct dpll *reduced_clock) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 dpll; + + dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; @@ -1104,8 +1132,19 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state, else dpll |= PLL_REF_INPUT_DREFCLK; - dpll |= DPLL_VCO_ENABLE; - crtc_state->dpll_hw_state.dpll = dpll; + return dpll; +} + +static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state, + const struct dpll *clock, + const struct dpll *reduced_clock) +{ + struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; + + hw_state->fp0 = i9xx_dpll_compute_fp(clock); + hw_state->fp1 = i9xx_dpll_compute_fp(reduced_clock); + + hw_state->dpll = i8xx_dpll(crtc_state, clock, reduced_clock); } static int hsw_crtc_compute_clock(struct intel_atomic_state *state, @@ -1185,62 +1224,54 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state, return ret; /* TODO: Do the readback via intel_compute_shared_dplls() */ - crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state); + crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll); crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); return 0; } +static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && + ((intel_panel_use_ssc(i915) && i915->display.vbt.lvds_ssc_freq == 100000) || + (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915)))) + return 25; + + if (crtc_state->sdvo_tv_clock) + return 20; + + return 21; +} + static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor) { return dpll->m < factor * dpll->n; } -static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state, - const struct dpll *clock, - const struct dpll *reduced_clock) +static u32 ilk_dpll_compute_fp(const struct dpll *clock, int factor) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 fp, fp2; - int factor; - - /* Enable autotuning of the PLL clock (if permissible) */ - factor = 21; - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { - if ((intel_panel_use_ssc(dev_priv) && - dev_priv->display.vbt.lvds_ssc_freq == 100000) || - (HAS_PCH_IBX(dev_priv) && - intel_is_dual_link_lvds(dev_priv))) - factor = 25; - } else if (crtc_state->sdvo_tv_clock) { - factor = 20; - } + u32 fp; fp = i9xx_dpll_compute_fp(clock); if (ilk_needs_fb_cb_tune(clock, factor)) fp |= FP_CB_TUNE; - fp2 = i9xx_dpll_compute_fp(reduced_clock); - if (ilk_needs_fb_cb_tune(reduced_clock, factor)) - fp2 |= FP_CB_TUNE; - - crtc_state->dpll_hw_state.fp0 = fp; - crtc_state->dpll_hw_state.fp1 = fp2; + return fp; } -static void ilk_compute_dpll(struct intel_crtc_state *crtc_state, - const struct dpll *clock, - const struct dpll *reduced_clock) +static u32 ilk_dpll(const struct intel_crtc_state *crtc_state, + const struct dpll *clock, + const struct dpll *reduced_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; - ilk_update_pll_dividers(crtc_state, clock, reduced_clock); - - dpll = 0; + dpll = DPLL_VCO_ENABLE; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) dpll |= DPLLB_MODE_LVDS; @@ -1302,9 +1333,20 @@ static void ilk_compute_dpll(struct intel_crtc_state *crtc_state, else dpll |= PLL_REF_INPUT_DREFCLK; - dpll |= DPLL_VCO_ENABLE; + return dpll; +} + +static void ilk_compute_dpll(struct intel_crtc_state *crtc_state, + const struct dpll *clock, + const struct dpll *reduced_clock) +{ + struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; + int factor = ilk_fb_cb_factor(crtc_state); + + hw_state->fp0 = ilk_dpll_compute_fp(clock, factor); + hw_state->fp1 = ilk_dpll_compute_fp(reduced_clock, factor); - crtc_state->dpll_hw_state.dpll = dpll; + hw_state->dpll = ilk_dpll(crtc_state, clock, reduced_clock); } static int ilk_crtc_compute_clock(struct intel_atomic_state *state, @@ -1377,39 +1419,56 @@ static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state, return intel_reserve_shared_dplls(state, crtc, NULL); } -void vlv_compute_dpll(struct intel_crtc_state *crtc_state) +static u32 vlv_dpll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + u32 dpll; - crtc_state->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | + dpll = DPLL_INTEGRATED_REF_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; + if (crtc->pipe != PIPE_A) - crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; + dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; /* DPLL not used with DSI, but still need the rest set up */ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) - crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | - DPLL_EXT_BUFFER_ENABLE_VLV; + dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV; - crtc_state->dpll_hw_state.dpll_md = - (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; + return dpll; } -void chv_compute_dpll(struct intel_crtc_state *crtc_state) +void vlv_compute_dpll(struct intel_crtc_state *crtc_state) +{ + struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; + + hw_state->dpll = vlv_dpll(crtc_state); + hw_state->dpll_md = i965_dpll_md(crtc_state); +} + +static u32 chv_dpll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + u32 dpll; - crtc_state->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | + dpll = DPLL_SSC_REF_CLK_CHV | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; + if (crtc->pipe != PIPE_A) - crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; + dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; /* DPLL not used with DSI, but still need the rest set up */ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) - crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; + dpll |= DPLL_VCO_ENABLE; - crtc_state->dpll_hw_state.dpll_md = - (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; + return dpll; +} + +void chv_compute_dpll(struct intel_crtc_state *crtc_state) +{ + struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; + + hw_state->dpll = chv_dpll(crtc_state); + hw_state->dpll_md = i965_dpll_md(crtc_state); } static int chv_crtc_compute_clock(struct intel_atomic_state *state, @@ -1765,7 +1824,7 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 dpll = crtc_state->dpll_hw_state.dpll; + const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; enum pipe pipe = crtc->pipe; int i; @@ -1775,157 +1834,152 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state) if (i9xx_has_pps(dev_priv)) assert_pps_unlocked(dev_priv, pipe); - intel_de_write(dev_priv, FP0(pipe), crtc_state->dpll_hw_state.fp0); - intel_de_write(dev_priv, FP1(pipe), crtc_state->dpll_hw_state.fp1); + intel_de_write(dev_priv, FP0(pipe), hw_state->fp0); + intel_de_write(dev_priv, FP1(pipe), hw_state->fp1); /* * Apparently we need to have VGA mode enabled prior to changing * the P1/P2 dividers. Otherwise the DPLL will keep using the old * dividers, even though the register value does change. */ - intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); - intel_de_write(dev_priv, DPLL(pipe), dpll); + intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll & ~DPLL_VGA_MODE_DIS); + intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll); /* Wait for the clocks to stabilize. */ intel_de_posting_read(dev_priv, DPLL(pipe)); udelay(150); if (DISPLAY_VER(dev_priv) >= 4) { - intel_de_write(dev_priv, DPLL_MD(pipe), - crtc_state->dpll_hw_state.dpll_md); + intel_de_write(dev_priv, DPLL_MD(pipe), hw_state->dpll_md); } else { /* The pixel multiplier can only be updated once the * DPLL is enabled and the clocks are stable. * * So write it again. */ - intel_de_write(dev_priv, DPLL(pipe), dpll); + intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll); } /* We do this three times for luck */ for (i = 0; i < 3; i++) { - intel_de_write(dev_priv, DPLL(pipe), dpll); + intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll); intel_de_posting_read(dev_priv, DPLL(pipe)); udelay(150); /* wait for warmup */ } } static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, - enum dpio_phy phy) + enum dpio_phy phy, enum dpio_channel ch) { - u32 reg_val; + u32 tmp; /* * PLLB opamp always calibrates to max value of 0x3f, force enable it * and set it to a reasonable value instead. */ - reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1)); - reg_val &= 0xffffff00; - reg_val |= 0x00000030; - vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val); - - reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13); - reg_val &= 0x00ffffff; - reg_val |= 0x8c000000; - vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val); - - reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1)); - reg_val &= 0xffffff00; - vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val); - - reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13); - reg_val &= 0x00ffffff; - reg_val |= 0xb0000000; - vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val); + tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch)); + tmp &= 0xffffff00; + tmp |= 0x00000030; + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp); + + tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11); + tmp &= 0x00ffffff; + tmp |= 0x8c000000; + vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp); + + tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW17(ch)); + tmp &= 0xffffff00; + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW17(ch), tmp); + + tmp = vlv_dpio_read(dev_priv, phy, VLV_REF_DW11); + tmp &= 0x00ffffff; + tmp |= 0xb0000000; + vlv_dpio_write(dev_priv, phy, VLV_REF_DW11, tmp); } static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct dpll *clock = &crtc_state->dpll; + enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe); enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); enum pipe pipe = crtc->pipe; - u32 mdiv; - u32 bestn, bestm1, bestm2, bestp1, bestp2; - u32 coreclk, reg_val; + u32 tmp, coreclk; vlv_dpio_get(dev_priv); - bestn = crtc_state->dpll.n; - bestm1 = crtc_state->dpll.m1; - bestm2 = crtc_state->dpll.m2; - bestp1 = crtc_state->dpll.p1; - bestp2 = crtc_state->dpll.p2; - /* See eDP HDMI DPIO driver vbios notes doc */ /* PLL B needs special handling */ if (pipe == PIPE_B) - vlv_pllb_recal_opamp(dev_priv, phy); + vlv_pllb_recal_opamp(dev_priv, phy, ch); /* Set up Tx target for periodic Rcomp update */ - vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9_BCAST, 0x0100000f); + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f); /* Disable target IRef on PLL */ - reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW8(pipe)); - reg_val &= 0x00ffffff; - vlv_dpio_write(dev_priv, phy, VLV_PLL_DW8(pipe), reg_val); + tmp = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW16(ch)); + tmp &= 0x00ffffff; + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW16(ch), tmp); /* Disable fast lock */ vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610); /* Set idtafcrecal before PLL is enabled */ - mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); - mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); - mdiv |= ((bestn << DPIO_N_SHIFT)); - mdiv |= (1 << DPIO_K_SHIFT); + tmp = DPIO_M1_DIV(clock->m1) | + DPIO_M2_DIV(clock->m2) | + DPIO_P1_DIV(clock->p1) | + DPIO_P2_DIV(clock->p2) | + DPIO_N_DIV(clock->n) | + DPIO_K_DIV(1); /* * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, * but we don't support that). * Note: don't use the DAC post divider as it seems unstable. */ - mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); - vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv); + tmp |= DPIO_S1_DIV(DPIO_S1_DIV_HDMIDP); + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp); - mdiv |= DPIO_ENABLE_CALIBRATION; - vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv); + tmp |= DPIO_ENABLE_CALIBRATION; + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(ch), tmp); /* Set HBR and RBR LPF coefficients */ if (crtc_state->port_clock == 162000 || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe), + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch), 0x009f0003); else - vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe), + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW18(ch), 0x00d0000f); if (intel_crtc_has_dp_encoder(crtc_state)) { /* Use SSC source */ if (pipe == PIPE_A) - vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe), + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch), 0x0df40000); else - vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe), + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch), 0x0df70000); } else { /* HDMI or VGA */ /* Use bend source */ if (pipe == PIPE_A) - vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe), + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch), 0x0df70000); else - vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe), + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(ch), 0x0df40000); } - coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(pipe)); + coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(ch)); coreclk = (coreclk & 0x0000ff00) | 0x01c00000; if (intel_crtc_has_dp_encoder(crtc_state)) coreclk |= 0x01000000; - vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(pipe), coreclk); + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(ch), coreclk); - vlv_dpio_write(dev_priv, phy, VLV_PLL_DW11(pipe), 0x87871000); + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW19(ch), 0x87871000); vlv_dpio_put(dev_priv); } @@ -1934,9 +1988,10 @@ static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; enum pipe pipe = crtc->pipe; - intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll); + intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll); intel_de_posting_read(dev_priv, DPLL(pipe)); udelay(150); @@ -1948,6 +2003,7 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; enum pipe pipe = crtc->pipe; assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); @@ -1957,16 +2013,14 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state) /* Enable Refclk */ intel_de_write(dev_priv, DPLL(pipe), - crtc_state->dpll_hw_state.dpll & - ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); + hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); - if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) { + if (hw_state->dpll & DPLL_VCO_ENABLE) { vlv_prepare_pll(crtc_state); _vlv_enable_pll(crtc_state); } - intel_de_write(dev_priv, DPLL_MD(pipe), - crtc_state->dpll_hw_state.dpll_md); + intel_de_write(dev_priv, DPLL_MD(pipe), hw_state->dpll_md); intel_de_posting_read(dev_priv, DPLL_MD(pipe)); } @@ -1974,93 +2028,87 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - enum dpio_channel port = vlv_pipe_to_channel(pipe); + const struct dpll *clock = &crtc_state->dpll; + enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe); enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); - u32 loopfilter, tribuf_calcntr; - u32 bestm2, bestp1, bestp2, bestm2_frac; - u32 dpio_val; - int vco; - - bestm2_frac = crtc_state->dpll.m2 & 0x3fffff; - bestm2 = crtc_state->dpll.m2 >> 22; - bestp1 = crtc_state->dpll.p1; - bestp2 = crtc_state->dpll.p2; - vco = crtc_state->dpll.vco; - dpio_val = 0; - loopfilter = 0; + u32 tmp, loopfilter, tribuf_calcntr; + u32 m2_frac; + + m2_frac = clock->m2 & 0x3fffff; vlv_dpio_get(dev_priv); /* p1 and p2 divider */ - vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(port), - 5 << DPIO_CHV_S1_DIV_SHIFT | - bestp1 << DPIO_CHV_P1_DIV_SHIFT | - bestp2 << DPIO_CHV_P2_DIV_SHIFT | - 1 << DPIO_CHV_K_DIV_SHIFT); + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(ch), + DPIO_CHV_S1_DIV(5) | + DPIO_CHV_P1_DIV(clock->p1) | + DPIO_CHV_P2_DIV(clock->p2) | + DPIO_CHV_K_DIV(1)); /* Feedback post-divider - m2 */ - vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(port), bestm2); + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(ch), + DPIO_CHV_M2_DIV(clock->m2 >> 22)); /* Feedback refclk divider - n and m1 */ - vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(port), - DPIO_CHV_M1_DIV_BY_2 | - 1 << DPIO_CHV_N_DIV_SHIFT); + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(ch), + DPIO_CHV_M1_DIV(DPIO_CHV_M1_DIV_BY_2) | + DPIO_CHV_N_DIV(1)); /* M2 fraction division */ - vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(port), bestm2_frac); + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(ch), + DPIO_CHV_M2_FRAC_DIV(m2_frac)); /* M2 fraction division enable */ - dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port)); - dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); - dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); - if (bestm2_frac) - dpio_val |= DPIO_CHV_FRAC_DIV_EN; - vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(port), dpio_val); + tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(ch)); + tmp &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); + tmp |= DPIO_CHV_FEEDFWD_GAIN(2); + if (m2_frac) + tmp |= DPIO_CHV_FRAC_DIV_EN; + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(ch), tmp); /* Program digital lock detect threshold */ - dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(port)); - dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | - DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); - dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); - if (!bestm2_frac) - dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; - vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(port), dpio_val); + tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(ch)); + tmp &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | + DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); + tmp |= DPIO_CHV_INT_LOCK_THRESHOLD(0x5); + if (!m2_frac) + tmp |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(ch), tmp); /* Loop filter */ - if (vco == 5400000) { - loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); - loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); - loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); + if (clock->vco == 5400000) { + loopfilter = DPIO_CHV_PROP_COEFF(0x3) | + DPIO_CHV_INT_COEFF(0x8) | + DPIO_CHV_GAIN_CTRL(0x1); tribuf_calcntr = 0x9; - } else if (vco <= 6200000) { - loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); - loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); - loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); + } else if (clock->vco <= 6200000) { + loopfilter = DPIO_CHV_PROP_COEFF(0x5) | + DPIO_CHV_INT_COEFF(0xB) | + DPIO_CHV_GAIN_CTRL(0x3); tribuf_calcntr = 0x9; - } else if (vco <= 6480000) { - loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); - loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); - loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); + } else if (clock->vco <= 6480000) { + loopfilter = DPIO_CHV_PROP_COEFF(0x4) | + DPIO_CHV_INT_COEFF(0x9) | + DPIO_CHV_GAIN_CTRL(0x3); tribuf_calcntr = 0x8; } else { /* Not supported. Apply the same limits as in the max case */ - loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); - loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); - loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); + loopfilter = DPIO_CHV_PROP_COEFF(0x4) | + DPIO_CHV_INT_COEFF(0x9) | + DPIO_CHV_GAIN_CTRL(0x3); tribuf_calcntr = 0; } - vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(port), loopfilter); + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(ch), loopfilter); - dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(port)); - dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; - dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); - vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(port), dpio_val); + tmp = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(ch)); + tmp &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; + tmp |= DPIO_CHV_TDC_TARGET_CNT(tribuf_calcntr); + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(ch), tmp); /* AFC Recal */ - vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), - vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)) | - DPIO_AFC_RECAL); + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), + vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)) | + DPIO_AFC_RECAL); vlv_dpio_put(dev_priv); } @@ -2069,17 +2117,18 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - enum dpio_channel port = vlv_pipe_to_channel(pipe); + const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; + enum dpio_channel ch = vlv_pipe_to_channel(crtc->pipe); enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); + enum pipe pipe = crtc->pipe; u32 tmp; vlv_dpio_get(dev_priv); /* Enable back the 10bit clock to display controller */ - tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)); + tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)); tmp |= DPIO_DCLKP_EN; - vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), tmp); + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), tmp); vlv_dpio_put(dev_priv); @@ -2089,7 +2138,7 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state) udelay(1); /* Enable PLL */ - intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll); + intel_de_write(dev_priv, DPLL(pipe), hw_state->dpll); /* Check PLL is locked */ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) @@ -2100,6 +2149,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; enum pipe pipe = crtc->pipe; assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); @@ -2109,9 +2159,9 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state) /* Enable Refclk and SSC */ intel_de_write(dev_priv, DPLL(pipe), - crtc_state->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); + hw_state->dpll & ~DPLL_VCO_ENABLE); - if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) { + if (hw_state->dpll & DPLL_VCO_ENABLE) { chv_prepare_pll(crtc_state); _chv_enable_pll(crtc_state); } @@ -2124,10 +2174,9 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state) * the value from DPLLBMD to either pipe B or C. */ intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); - intel_de_write(dev_priv, DPLL_MD(PIPE_B), - crtc_state->dpll_hw_state.dpll_md); + intel_de_write(dev_priv, DPLL_MD(PIPE_B), hw_state->dpll_md); intel_de_write(dev_priv, CBR4_VLV, 0); - dev_priv->display.state.chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md; + dev_priv->display.state.chv_dpll_md[pipe] = hw_state->dpll_md; /* * DPLLB VGA mode also seems to cause problems. @@ -2137,8 +2186,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state) (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); } else { - intel_de_write(dev_priv, DPLL_MD(pipe), - crtc_state->dpll_hw_state.dpll_md); + intel_de_write(dev_priv, DPLL_MD(pipe), hw_state->dpll_md); intel_de_posting_read(dev_priv, DPLL_MD(pipe)); } } @@ -2199,7 +2247,7 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) { - enum dpio_channel port = vlv_pipe_to_channel(pipe); + enum dpio_channel ch = vlv_pipe_to_channel(pipe); enum dpio_phy phy = vlv_pipe_to_phy(pipe); u32 val; @@ -2217,9 +2265,9 @@ void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) vlv_dpio_get(dev_priv); /* Disable 10bit clock to display controller */ - val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)); + val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(ch)); val &= ~DPIO_DCLKP_EN; - vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), val); + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(ch), val); vlv_dpio_put(dev_priv); } diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h index ac01bb19cc..a86a79408a 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.h +++ b/drivers/gpu/drm/i915/display/intel_dpll.h @@ -13,6 +13,7 @@ struct drm_i915_private; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_dpll_hw_state; enum pipe; void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv); @@ -22,6 +23,8 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc); int i9xx_calc_dpll_params(int refclk, struct dpll *clock); u32 i9xx_dpll_compute_fp(const struct dpll *dpll); +void i9xx_dpll_get_hw_state(struct intel_crtc *crtc, + struct intel_dpll_hw_state *dpll_hw_state); void vlv_compute_dpll(struct intel_crtc_state *crtc_state); void chv_compute_dpll(struct intel_crtc_state *crtc_state); @@ -39,12 +42,9 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, struct dpll *best_clock); int chv_calc_dpll_params(int refclk, struct dpll *pll_clock); -void i9xx_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); -void vlv_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); -void chv_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); +void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state); +void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state); +void chv_crtc_clock_get(struct intel_crtc_state *crtc_state); void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe); void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe); diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index b6d2441074..90998b0373 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -24,6 +24,7 @@ #include #include +#include "bxt_dpio_phy_regs.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_display_types.h" @@ -64,7 +65,8 @@ struct intel_shared_dpll_funcs { * the pll is not already enabled. */ void (*enable)(struct drm_i915_private *i915, - struct intel_shared_dpll *pll); + struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *dpll_hw_state); /* * Hook for disabling the pll, called from intel_disable_shared_dpll() @@ -81,7 +83,7 @@ struct intel_shared_dpll_funcs { */ bool (*get_hw_state)(struct drm_i915_private *i915, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state); + struct intel_dpll_hw_state *dpll_hw_state); /* * Hook for calculating the pll's output frequency based on its passed @@ -89,7 +91,7 @@ struct intel_shared_dpll_funcs { */ int (*get_freq)(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state); + const struct intel_dpll_hw_state *dpll_hw_state); }; struct intel_dpll_mgr { @@ -107,8 +109,8 @@ struct intel_dpll_mgr { struct intel_crtc *crtc, struct intel_encoder *encoder); void (*update_ref_clks)(struct drm_i915_private *i915); - void (*dump_hw_state)(struct drm_i915_private *i915, - const struct intel_dpll_hw_state *hw_state); + void (*dump_hw_state)(struct drm_printer *p, + const struct intel_dpll_hw_state *dpll_hw_state); bool (*compare_hw_state)(const struct intel_dpll_hw_state *a, const struct intel_dpll_hw_state *b); }; @@ -227,7 +229,7 @@ static void _intel_enable_shared_dpll(struct drm_i915_private *i915, if (pll->info->power_domain) pll->wakeref = intel_display_power_get(i915, pll->info->power_domain); - pll->info->funcs->enable(i915, pll); + pll->info->funcs->enable(i915, pll, &pll->state.hw_state); pll->on = true; } @@ -352,7 +354,7 @@ intel_dpll_mask_all(struct drm_i915_private *i915) static struct intel_shared_dpll * intel_find_shared_dpll(struct intel_atomic_state *state, const struct intel_crtc *crtc, - const struct intel_dpll_hw_state *pll_state, + const struct intel_dpll_hw_state *dpll_hw_state, unsigned long dpll_mask) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); @@ -379,9 +381,9 @@ intel_find_shared_dpll(struct intel_atomic_state *state, continue; } - if (memcmp(pll_state, + if (memcmp(dpll_hw_state, &shared_dpll[pll->index].hw_state, - sizeof(*pll_state)) == 0) { + sizeof(*dpll_hw_state)) == 0) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n", crtc->base.base.id, crtc->base.name, @@ -430,14 +432,14 @@ static void intel_reference_shared_dpll(struct intel_atomic_state *state, const struct intel_crtc *crtc, const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state) + const struct intel_dpll_hw_state *dpll_hw_state) { struct intel_shared_dpll_state *shared_dpll; shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); if (shared_dpll[pll->index].pipe_mask == 0) - shared_dpll[pll->index].hw_state = *pll_state; + shared_dpll[pll->index].hw_state = *dpll_hw_state; intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]); } @@ -519,8 +521,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state) static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) + struct intel_dpll_hw_state *dpll_hw_state) { + struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx; const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; u32 val; @@ -553,17 +556,19 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915) } static void ibx_pch_dpll_enable(struct drm_i915_private *i915, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *dpll_hw_state) { + const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx; const enum intel_dpll_id id = pll->info->id; /* PCH refclock must be enabled first */ ibx_assert_pch_refclk_enabled(i915); - intel_de_write(i915, PCH_FP0(id), pll->state.hw_state.fp0); - intel_de_write(i915, PCH_FP1(id), pll->state.hw_state.fp1); + intel_de_write(i915, PCH_FP0(id), hw_state->fp0); + intel_de_write(i915, PCH_FP1(id), hw_state->fp1); - intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll); + intel_de_write(i915, PCH_DPLL(id), hw_state->dpll); /* Wait for the clocks to stabilize. */ intel_de_posting_read(i915, PCH_DPLL(id)); @@ -574,7 +579,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *i915, * * So write it again. */ - intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll); + intel_de_write(i915, PCH_DPLL(id), hw_state->dpll); intel_de_posting_read(i915, PCH_DPLL(id)); udelay(200); } @@ -634,21 +639,25 @@ static int ibx_get_dpll(struct intel_atomic_state *state, return 0; } -static void ibx_dump_hw_state(struct drm_i915_private *i915, - const struct intel_dpll_hw_state *hw_state) +static void ibx_dump_hw_state(struct drm_printer *p, + const struct intel_dpll_hw_state *dpll_hw_state) { - drm_dbg_kms(&i915->drm, - "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " - "fp0: 0x%x, fp1: 0x%x\n", - hw_state->dpll, - hw_state->dpll_md, - hw_state->fp0, - hw_state->fp1); + const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx; + + drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " + "fp0: 0x%x, fp1: 0x%x\n", + hw_state->dpll, + hw_state->dpll_md, + hw_state->fp0, + hw_state->fp1); } -static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *a, - const struct intel_dpll_hw_state *b) +static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a, + const struct intel_dpll_hw_state *_b) { + const struct i9xx_dpll_hw_state *a = &_a->i9xx; + const struct i9xx_dpll_hw_state *b = &_b->i9xx; + return a->dpll == b->dpll && a->dpll_md == b->dpll_md && a->fp0 == b->fp0 && @@ -677,19 +686,24 @@ static const struct intel_dpll_mgr pch_pll_mgr = { }; static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *dpll_hw_state) { + const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw; const enum intel_dpll_id id = pll->info->id; - intel_de_write(i915, WRPLL_CTL(id), pll->state.hw_state.wrpll); + intel_de_write(i915, WRPLL_CTL(id), hw_state->wrpll); intel_de_posting_read(i915, WRPLL_CTL(id)); udelay(20); } static void hsw_ddi_spll_enable(struct drm_i915_private *i915, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *dpll_hw_state) { - intel_de_write(i915, SPLL_CTL, pll->state.hw_state.spll); + const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw; + + intel_de_write(i915, SPLL_CTL, hw_state->spll); intel_de_posting_read(i915, SPLL_CTL); udelay(20); } @@ -728,8 +742,9 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *i915, static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) + struct intel_dpll_hw_state *dpll_hw_state) { + struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw; const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; u32 val; @@ -749,8 +764,9 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915, static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) + struct intel_dpll_hw_state *dpll_hw_state) { + struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw; intel_wakeref_t wakeref; u32 val; @@ -975,11 +991,12 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */, static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state) + const struct intel_dpll_hw_state *dpll_hw_state) { + const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw; int refclk; int n, p, r; - u32 wrpll = pll_state->wrpll; + u32 wrpll = hw_state->wrpll; switch (wrpll & WRPLL_REF_MASK) { case WRPLL_REF_SPECIAL_HSW: @@ -1020,11 +1037,12 @@ hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state, struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw; unsigned int p, n2, r2; hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p); - crtc_state->dpll_hw_state.wrpll = + hw_state->wrpll = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL | WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p); @@ -1099,7 +1117,7 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state) static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state) + const struct intel_dpll_hw_state *dpll_hw_state) { int link_clock = 0; @@ -1127,11 +1145,12 @@ hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state, { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw; if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000)) return -EINVAL; - crtc_state->dpll_hw_state.spll = + hw_state->spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC; return 0; @@ -1150,11 +1169,12 @@ hsw_ddi_spll_get_dpll(struct intel_atomic_state *state, static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state) + const struct intel_dpll_hw_state *dpll_hw_state) { + const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw; int link_clock = 0; - switch (pll_state->spll & SPLL_FREQ_MASK) { + switch (hw_state->spll & SPLL_FREQ_MASK) { case SPLL_FREQ_810MHz: link_clock = 81000; break; @@ -1225,16 +1245,21 @@ static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915) i915->display.dpll.ref_clks.nssc = 135000; } -static void hsw_dump_hw_state(struct drm_i915_private *i915, - const struct intel_dpll_hw_state *hw_state) +static void hsw_dump_hw_state(struct drm_printer *p, + const struct intel_dpll_hw_state *dpll_hw_state) { - drm_dbg_kms(&i915->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", - hw_state->wrpll, hw_state->spll); + const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw; + + drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", + hw_state->wrpll, hw_state->spll); } -static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *a, - const struct intel_dpll_hw_state *b) +static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a, + const struct intel_dpll_hw_state *_b) { + const struct hsw_dpll_hw_state *a = &_a->hsw; + const struct hsw_dpll_hw_state *b = &_b->hsw; + return a->wrpll == b->wrpll && a->spll == b->spll; } @@ -1254,7 +1279,8 @@ static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = { }; static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *hw_state) { } @@ -1265,7 +1291,7 @@ static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915, static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) + struct intel_dpll_hw_state *dpll_hw_state) { return true; } @@ -1332,26 +1358,31 @@ static const struct skl_dpll_regs skl_dpll_regs[4] = { }; static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + const struct skl_dpll_hw_state *hw_state) { const enum intel_dpll_id id = pll->info->id; intel_de_rmw(i915, DPLL_CTRL1, - DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id), - pll->state.hw_state.ctrl1 << (id * 6)); + DPLL_CTRL1_HDMI_MODE(id) | + DPLL_CTRL1_SSC(id) | + DPLL_CTRL1_LINK_RATE_MASK(id), + hw_state->ctrl1 << (id * 6)); intel_de_posting_read(i915, DPLL_CTRL1); } static void skl_ddi_pll_enable(struct drm_i915_private *i915, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *dpll_hw_state) { + const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl; const struct skl_dpll_regs *regs = skl_dpll_regs; const enum intel_dpll_id id = pll->info->id; - skl_ddi_pll_write_ctrl1(i915, pll); + skl_ddi_pll_write_ctrl1(i915, pll, hw_state); - intel_de_write(i915, regs[id].cfgcr1, pll->state.hw_state.cfgcr1); - intel_de_write(i915, regs[id].cfgcr2, pll->state.hw_state.cfgcr2); + intel_de_write(i915, regs[id].cfgcr1, hw_state->cfgcr1); + intel_de_write(i915, regs[id].cfgcr2, hw_state->cfgcr2); intel_de_posting_read(i915, regs[id].cfgcr1); intel_de_posting_read(i915, regs[id].cfgcr2); @@ -1363,9 +1394,12 @@ static void skl_ddi_pll_enable(struct drm_i915_private *i915, } static void skl_ddi_dpll0_enable(struct drm_i915_private *i915, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *dpll_hw_state) { - skl_ddi_pll_write_ctrl1(i915, pll); + const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl; + + skl_ddi_pll_write_ctrl1(i915, pll, hw_state); } static void skl_ddi_pll_disable(struct drm_i915_private *i915, @@ -1386,13 +1420,14 @@ static void skl_ddi_dpll0_disable(struct drm_i915_private *i915, static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) + struct intel_dpll_hw_state *dpll_hw_state) { - u32 val; + struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl; const struct skl_dpll_regs *regs = skl_dpll_regs; const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; bool ret; + u32 val; wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_DISPLAY_CORE); @@ -1423,8 +1458,9 @@ out: static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) + struct intel_dpll_hw_state *dpll_hw_state) { + struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl; const struct skl_dpll_regs *regs = skl_dpll_regs; const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; @@ -1695,16 +1731,17 @@ skip_remaining_dividers: static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state) + const struct intel_dpll_hw_state *dpll_hw_state) { + const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl; int ref_clock = i915->display.dpll.ref_clks.nssc; u32 p0, p1, p2, dco_freq; - p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK; - p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK; + p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK; + p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK; - if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1)) - p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8; + if (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1)) + p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8; else p1 = 1; @@ -1752,10 +1789,10 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, return 0; } - dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) * + dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) * ref_clock; - dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * + dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * ref_clock / 0x8000; if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0)) @@ -1767,37 +1804,35 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl; struct skl_wrpll_params wrpll_params = {}; - u32 ctrl1, cfgcr1, cfgcr2; int ret; - /* - * See comment in intel_dpll_hw_state to understand why we always use 0 - * as the DPLL id in this function. - */ - ctrl1 = DPLL_CTRL1_OVERRIDE(0); - - ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); - ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, i915->display.dpll.ref_clks.nssc, &wrpll_params); if (ret) return ret; - cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | + /* + * See comment in intel_dpll_hw_state to understand why we always use 0 + * as the DPLL id in this function. + */ + hw_state->ctrl1 = + DPLL_CTRL1_OVERRIDE(0) | + DPLL_CTRL1_HDMI_MODE(0); + + hw_state->cfgcr1 = + DPLL_CFGCR1_FREQ_ENABLE | DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | wrpll_params.dco_integer; - cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) | + hw_state->cfgcr2 = + DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) | DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) | DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | wrpll_params.central_freq; - crtc_state->dpll_hw_state.ctrl1 = ctrl1; - crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; - crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; - crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL, &crtc_state->dpll_hw_state); @@ -1807,6 +1842,7 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) static int skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { + struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl; u32 ctrl1; /* @@ -1836,18 +1872,19 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) break; } - crtc_state->dpll_hw_state.ctrl1 = ctrl1; + hw_state->ctrl1 = ctrl1; return 0; } static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state) + const struct intel_dpll_hw_state *dpll_hw_state) { + const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl; int link_clock = 0; - switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >> + switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >> DPLL_CTRL1_LINK_RATE_SHIFT(0)) { case DPLL_CTRL1_LINK_RATE_810: link_clock = 81000; @@ -1921,16 +1958,18 @@ static int skl_get_dpll(struct intel_atomic_state *state, static int skl_ddi_pll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state) + const struct intel_dpll_hw_state *dpll_hw_state) { + const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl; + /* * ctrl1 register is already shifted for each pll, just use 0 to get * the internal shift for each field */ - if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) - return skl_ddi_wrpll_get_freq(i915, pll, pll_state); + if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) + return skl_ddi_wrpll_get_freq(i915, pll, dpll_hw_state); else - return skl_ddi_lcpll_get_freq(i915, pll, pll_state); + return skl_ddi_lcpll_get_freq(i915, pll, dpll_hw_state); } static void skl_update_dpll_ref_clks(struct drm_i915_private *i915) @@ -1939,19 +1978,21 @@ static void skl_update_dpll_ref_clks(struct drm_i915_private *i915) i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref; } -static void skl_dump_hw_state(struct drm_i915_private *i915, - const struct intel_dpll_hw_state *hw_state) +static void skl_dump_hw_state(struct drm_printer *p, + const struct intel_dpll_hw_state *dpll_hw_state) { - drm_dbg_kms(&i915->drm, "dpll_hw_state: " - "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", - hw_state->ctrl1, - hw_state->cfgcr1, - hw_state->cfgcr2); + const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl; + + drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", + hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2); } -static bool skl_compare_hw_state(const struct intel_dpll_hw_state *a, - const struct intel_dpll_hw_state *b) +static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a, + const struct intel_dpll_hw_state *_b) { + const struct skl_dpll_hw_state *a = &_a->skl; + const struct skl_dpll_hw_state *b = &_b->skl; + return a->ctrl1 == b->ctrl1 && a->cfgcr1 == b->cfgcr1 && a->cfgcr2 == b->cfgcr2; @@ -1991,12 +2032,14 @@ static const struct intel_dpll_mgr skl_pll_mgr = { }; static void bxt_ddi_pll_enable(struct drm_i915_private *i915, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *dpll_hw_state) { - u32 temp; + const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt; enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ enum dpio_phy phy; enum dpio_channel ch; + u32 temp; bxt_port_to_phy_channel(i915, port, &phy, &ch); @@ -2019,43 +2062,43 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915, /* Write P1 & P2 */ intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch), - PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0); + PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0); /* Write M2 integer */ intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0), - PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0); + PORT_PLL_M2_INT_MASK, hw_state->pll0); /* Write N */ intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1), - PORT_PLL_N_MASK, pll->state.hw_state.pll1); + PORT_PLL_N_MASK, hw_state->pll1); /* Write M2 fraction */ intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2), - PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2); + PORT_PLL_M2_FRAC_MASK, hw_state->pll2); /* Write M2 fraction enable */ intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3), - PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3); + PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3); /* Write coeff */ temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6)); temp &= ~PORT_PLL_PROP_COEFF_MASK; temp &= ~PORT_PLL_INT_COEFF_MASK; temp &= ~PORT_PLL_GAIN_CTL_MASK; - temp |= pll->state.hw_state.pll6; + temp |= hw_state->pll6; intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp); /* Write calibration val */ intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8), - PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8); + PORT_PLL_TARGET_CNT_MASK, hw_state->pll8); intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9), - PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9); + PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9); temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10)); temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H; temp &= ~PORT_PLL_DCO_AMP_MASK; - temp |= pll->state.hw_state.pll10; + temp |= hw_state->pll10; intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp); /* Recalibrate with new settings */ @@ -2063,7 +2106,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915, temp |= PORT_PLL_RECALIBRATE; intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp); temp &= ~PORT_PLL_10BIT_CLK_ENABLE; - temp |= pll->state.hw_state.ebb4; + temp |= hw_state->ebb4; intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp); /* Enable PLL */ @@ -2075,7 +2118,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915, drm_err(&i915->drm, "PLL %d not locked\n", port); if (IS_GEMINILAKE(i915)) { - temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN0(phy, ch)); + temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN(phy, ch, 0)); temp |= DCC_DELAY_RANGE_2; intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp); } @@ -2087,7 +2130,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915, temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch)); temp &= ~LANE_STAGGER_MASK; temp &= ~LANESTAGGER_STRAP_OVRD; - temp |= pll->state.hw_state.pcsdw12; + temp |= hw_state->pcsdw12; intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp); } @@ -2112,8 +2155,9 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *i915, static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) + struct intel_dpll_hw_state *dpll_hw_state) { + struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt; enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ intel_wakeref_t wakeref; enum dpio_phy phy; @@ -2245,7 +2289,7 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, const struct dpll *clk_div) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state; + struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt; int clock = crtc_state->port_clock; int vco = clk_div->vco; u32 prop_coef, int_coef, gain_ctl, targ_cnt; @@ -2283,45 +2327,47 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, else lanestagger = 0x02; - dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2); - dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22); - dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n); - dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff); + hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2); + hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22); + hw_state->pll1 = PORT_PLL_N(clk_div->n); + hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff); if (clk_div->m2 & 0x3fffff) - dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE; + hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE; - dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) | + hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) | PORT_PLL_INT_COEFF(int_coef) | PORT_PLL_GAIN_CTL(gain_ctl); - dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt); + hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt); - dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5); + hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5); - dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) | + hw_state->pll10 = PORT_PLL_DCO_AMP(15) | PORT_PLL_DCO_AMP_OVR_EN_H; - dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE; + hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE; - dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger; + hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger; return 0; } static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state) + const struct intel_dpll_hw_state *dpll_hw_state) { + const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt; struct dpll clock; clock.m1 = 2; - clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22; - if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE) - clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2); - clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1); - clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0); - clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0); + clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22; + if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE) + clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, + hw_state->pll2); + clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1); + clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0); + clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0); return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock); } @@ -2402,28 +2448,26 @@ static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915) /* DSI non-SSC ref 19.2MHz */ } -static void bxt_dump_hw_state(struct drm_i915_private *i915, - const struct intel_dpll_hw_state *hw_state) +static void bxt_dump_hw_state(struct drm_printer *p, + const struct intel_dpll_hw_state *dpll_hw_state) { - drm_dbg_kms(&i915->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," - "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " - "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", - hw_state->ebb0, - hw_state->ebb4, - hw_state->pll0, - hw_state->pll1, - hw_state->pll2, - hw_state->pll3, - hw_state->pll6, - hw_state->pll8, - hw_state->pll9, - hw_state->pll10, - hw_state->pcsdw12); + const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt; + + drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," + "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " + "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", + hw_state->ebb0, hw_state->ebb4, + hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3, + hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10, + hw_state->pcsdw12); } -static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *a, - const struct intel_dpll_hw_state *b) +static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a, + const struct intel_dpll_hw_state *_b) { + const struct bxt_dpll_hw_state *a = &_a->bxt; + const struct bxt_dpll_hw_state *b = &_b->bxt; + return a->ebb0 == b->ebb0 && a->ebb4 == b->ebb4 && a->pll0 == b->pll0 && @@ -2706,7 +2750,7 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state) + const struct intel_dpll_hw_state *dpll_hw_state) { /* * The PLL outputs multiple frequencies at the same time, selection is @@ -2777,17 +2821,18 @@ icl_calc_wrpll(struct intel_crtc_state *crtc_state, static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state) + const struct intel_dpll_hw_state *dpll_hw_state) { + const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl; int ref_clock = icl_wrpll_ref_clock(i915); u32 dco_fraction; u32 p0, p1, p2, dco_freq; - p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK; - p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK; + p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK; + p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK; - if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) - p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> + if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) + p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> DPLL_CFGCR1_QDIV_RATIO_SHIFT; else p1 = 1; @@ -2819,10 +2864,10 @@ static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915, break; } - dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * + dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock; - dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> + dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> DPLL_CFGCR0_DCO_FRACTION_SHIFT; if (ehl_combo_pll_div_frac_wa_needed(i915)) @@ -2838,33 +2883,34 @@ static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915, static void icl_calc_dpll_state(struct drm_i915_private *i915, const struct skl_wrpll_params *pll_params, - struct intel_dpll_hw_state *pll_state) + struct intel_dpll_hw_state *dpll_hw_state) { + struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl; u32 dco_fraction = pll_params->dco_fraction; if (ehl_combo_pll_div_frac_wa_needed(i915)) dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2); - pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) | + hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) | pll_params->dco_integer; - pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) | + hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) | DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) | DPLL_CFGCR1_KDIV(pll_params->kdiv) | DPLL_CFGCR1_PDIV(pll_params->pdiv); if (DISPLAY_VER(i915) >= 12) - pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL; + hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL; else - pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400; + hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400; if (i915->display.vbt.override_afc_startup) - pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val); + hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val); } static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, u32 *target_dco_khz, - struct intel_dpll_hw_state *state, + struct icl_dpll_hw_state *hw_state, bool is_dkl) { static const u8 div1_vals[] = { 7, 5, 3, 2 }; @@ -2920,12 +2966,12 @@ static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, *target_dco_khz = dco; - state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1); + hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1); - state->mg_clktop2_coreclkctl1 = + hw_state->mg_clktop2_coreclkctl1 = MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio); - state->mg_clktop2_hsclkctl = + hw_state->mg_clktop2_hsclkctl = MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) | hsdiv | @@ -2943,9 +2989,10 @@ static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, * adapted to integer-only calculation, that's why it looks so different. */ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, - struct intel_dpll_hw_state *pll_state) + struct intel_dpll_hw_state *dpll_hw_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl; int refclk_khz = i915->display.dpll.ref_clks.nssc; int clock = crtc_state->port_clock; u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; @@ -2960,7 +3007,7 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, int ret; ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz, - pll_state, is_dkl); + hw_state, is_dkl); if (ret) return ret; @@ -3050,61 +3097,61 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, /* write pll_state calculations */ if (is_dkl) { - pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) | + hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) | DKL_PLL_DIV0_PROP_COEFF(prop_coeff) | DKL_PLL_DIV0_FBPREDIV(m1div) | DKL_PLL_DIV0_FBDIV_INT(m2div_int); if (i915->display.vbt.override_afc_startup) { u8 val = i915->display.vbt.override_afc_startup_val; - pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val); + hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val); } - pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) | + hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) | DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt); - pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) | + hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) | DKL_PLL_SSC_STEP_LEN(ssc_steplen) | DKL_PLL_SSC_STEP_NUM(ssc_steplog) | (use_ssc ? DKL_PLL_SSC_EN : 0); - pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) | + hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) | DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac); - pll_state->mg_pll_tdc_coldst_bias = + hw_state->mg_pll_tdc_coldst_bias = DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) | DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain); } else { - pll_state->mg_pll_div0 = + hw_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) | MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) | MG_PLL_DIV0_FBDIV_INT(m2div_int); - pll_state->mg_pll_div1 = + hw_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) | MG_PLL_DIV1_DITHER_DIV_2 | MG_PLL_DIV1_NDIVRATIO(1) | MG_PLL_DIV1_FBPREDIV(m1div); - pll_state->mg_pll_lf = + hw_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) | MG_PLL_LF_AFCCNTSEL_512 | MG_PLL_LF_GAINCTRL(1) | MG_PLL_LF_INT_COEFF(int_coeff) | MG_PLL_LF_PROP_COEFF(prop_coeff); - pll_state->mg_pll_frac_lock = + hw_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 | MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 | MG_PLL_FRAC_LOCK_LOCKTHRESH(10) | MG_PLL_FRAC_LOCK_DCODITHEREN | MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain); if (use_ssc || m2div_rem > 0) - pll_state->mg_pll_frac_lock |= + hw_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN; - pll_state->mg_pll_ssc = + hw_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) | MG_PLL_SSC_TYPE(2) | MG_PLL_SSC_STEPLENGTH(ssc_steplen) | @@ -3112,14 +3159,14 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, MG_PLL_SSC_FLLEN | MG_PLL_SSC_STEPSIZE(ssc_stepsize); - pll_state->mg_pll_tdc_coldst_bias = + hw_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART | MG_PLL_TDC_COLDST_IREFINT_EN | MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) | MG_PLL_TDC_TDCOVCCORR_EN | MG_PLL_TDC_TDCSEL(3); - pll_state->mg_pll_bias = + hw_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) | MG_PLL_BIAS_INIT_DCOAMP(0x3F) | MG_PLL_BIAS_BIAS_BONUS(10) | @@ -3129,17 +3176,17 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, MG_PLL_BIAS_IREFTRIM(iref_trim); if (refclk_khz == 38400) { - pll_state->mg_pll_tdc_coldst_bias_mask = + hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; - pll_state->mg_pll_bias_mask = 0; + hw_state->mg_pll_bias_mask = 0; } else { - pll_state->mg_pll_tdc_coldst_bias_mask = -1U; - pll_state->mg_pll_bias_mask = -1U; + hw_state->mg_pll_tdc_coldst_bias_mask = -1U; + hw_state->mg_pll_bias_mask = -1U; } - pll_state->mg_pll_tdc_coldst_bias &= - pll_state->mg_pll_tdc_coldst_bias_mask; - pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask; + hw_state->mg_pll_tdc_coldst_bias &= + hw_state->mg_pll_tdc_coldst_bias_mask; + hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask; } return 0; @@ -3147,31 +3194,32 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state) + const struct intel_dpll_hw_state *dpll_hw_state) { + const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl; u32 m1, m2_int, m2_frac, div1, div2, ref_clock; u64 tmp; ref_clock = i915->display.dpll.ref_clks.nssc; if (DISPLAY_VER(i915) >= 12) { - m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK; + m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK; m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT; - m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK; + m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK; - if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) { - m2_frac = pll_state->mg_pll_bias & + if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) { + m2_frac = hw_state->mg_pll_bias & DKL_PLL_BIAS_FBDIV_FRAC_MASK; m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT; } else { m2_frac = 0; } } else { - m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK; - m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; + m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK; + m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; - if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) { - m2_frac = pll_state->mg_pll_div0 & + if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) { + m2_frac = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK; m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT; } else { @@ -3179,7 +3227,7 @@ static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915, } } - switch (pll_state->mg_clktop2_hsclkctl & + switch (hw_state->mg_clktop2_hsclkctl & MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) { case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2: div1 = 2; @@ -3194,11 +3242,11 @@ static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915, div1 = 7; break; default: - MISSING_CASE(pll_state->mg_clktop2_hsclkctl); + MISSING_CASE(hw_state->mg_clktop2_hsclkctl); return 0; } - div2 = (pll_state->mg_clktop2_hsclkctl & + div2 = (hw_state->mg_clktop2_hsclkctl & MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >> MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT; @@ -3389,7 +3437,6 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct icl_port_dpll *port_dpll = @@ -3408,8 +3455,7 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state, port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY]; - dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(i915, - encoder->port)); + dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder)); port_dpll->pll = intel_find_shared_dpll(state, crtc, &port_dpll->hw_state, BIT(dpll_id)); @@ -3435,15 +3481,12 @@ static int icl_compute_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(state->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); - - if (intel_phy_is_combo(i915, phy)) + if (intel_encoder_is_combo(encoder)) return icl_compute_combo_phy_dpll(state, crtc); - else if (intel_phy_is_tc(i915, phy)) + else if (intel_encoder_is_tc(encoder)) return icl_compute_tc_phy_dplls(state, crtc); - MISSING_CASE(phy); + MISSING_CASE(encoder->port); return 0; } @@ -3452,15 +3495,12 @@ static int icl_get_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(state->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); - - if (intel_phy_is_combo(i915, phy)) + if (intel_encoder_is_combo(encoder)) return icl_get_combo_phy_dpll(state, crtc, encoder); - else if (intel_phy_is_tc(i915, phy)) + else if (intel_encoder_is_tc(encoder)) return icl_get_tc_phy_dplls(state, crtc, encoder); - MISSING_CASE(phy); + MISSING_CASE(encoder->port); return -EINVAL; } @@ -3493,8 +3533,9 @@ static void icl_put_dplls(struct intel_atomic_state *state, static bool mg_pll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) + struct intel_dpll_hw_state *dpll_hw_state) { + struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl; const enum intel_dpll_id id = pll->info->id; enum tc_port tc_port = icl_pll_id_to_tc_port(id); intel_wakeref_t wakeref; @@ -3559,8 +3600,9 @@ out: static bool dkl_pll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) + struct intel_dpll_hw_state *dpll_hw_state) { + struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl; const enum intel_dpll_id id = pll->info->id; enum tc_port tc_port = icl_pll_id_to_tc_port(id); intel_wakeref_t wakeref; @@ -3630,9 +3672,10 @@ out: static bool icl_pll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state, + struct intel_dpll_hw_state *dpll_hw_state, i915_reg_t enable_reg) { + struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl; const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; bool ret = false; @@ -3690,24 +3733,24 @@ out: static bool combo_pll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) + struct intel_dpll_hw_state *dpll_hw_state) { i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll); - return icl_pll_get_hw_state(i915, pll, hw_state, enable_reg); + return icl_pll_get_hw_state(i915, pll, dpll_hw_state, enable_reg); } static bool tbt_pll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) + struct intel_dpll_hw_state *dpll_hw_state) { - return icl_pll_get_hw_state(i915, pll, hw_state, TBT_PLL_ENABLE); + return icl_pll_get_hw_state(i915, pll, dpll_hw_state, TBT_PLL_ENABLE); } static void icl_dpll_write(struct drm_i915_private *i915, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + const struct icl_dpll_hw_state *hw_state) { - struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; const enum intel_dpll_id id = pll->info->id; i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG; @@ -3747,9 +3790,9 @@ static void icl_dpll_write(struct drm_i915_private *i915, } static void icl_mg_pll_write(struct drm_i915_private *i915, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + const struct icl_dpll_hw_state *hw_state) { - struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id); /* @@ -3790,9 +3833,9 @@ static void icl_mg_pll_write(struct drm_i915_private *i915, } static void dkl_pll_write(struct drm_i915_private *i915, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + const struct icl_dpll_hw_state *hw_state) { - struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id); u32 val; @@ -3905,13 +3948,15 @@ static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct inte } static void combo_pll_enable(struct drm_i915_private *i915, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *dpll_hw_state) { + const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl; i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll); icl_pll_power_enable(i915, pll, enable_reg); - icl_dpll_write(i915, pll); + icl_dpll_write(i915, pll, hw_state); /* * DVFS pre sequence would be here, but in our driver the cdclk code @@ -3927,11 +3972,14 @@ static void combo_pll_enable(struct drm_i915_private *i915, } static void tbt_pll_enable(struct drm_i915_private *i915, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *dpll_hw_state) { + const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl; + icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE); - icl_dpll_write(i915, pll); + icl_dpll_write(i915, pll, hw_state); /* * DVFS pre sequence would be here, but in our driver the cdclk code @@ -3945,16 +3993,18 @@ static void tbt_pll_enable(struct drm_i915_private *i915, } static void mg_pll_enable(struct drm_i915_private *i915, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *dpll_hw_state) { + const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl; i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll); icl_pll_power_enable(i915, pll, enable_reg); if (DISPLAY_VER(i915) >= 12) - dkl_pll_write(i915, pll); + dkl_pll_write(i915, pll, hw_state); else - icl_mg_pll_write(i915, pll); + icl_mg_pll_write(i915, pll, hw_state); /* * DVFS pre sequence would be here, but in our driver the cdclk code @@ -4026,33 +4076,36 @@ static void icl_update_dpll_ref_clks(struct drm_i915_private *i915) i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref; } -static void icl_dump_hw_state(struct drm_i915_private *i915, - const struct intel_dpll_hw_state *hw_state) +static void icl_dump_hw_state(struct drm_printer *p, + const struct intel_dpll_hw_state *dpll_hw_state) { - drm_dbg_kms(&i915->drm, - "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, " - "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, " - "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, " - "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, " - "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, " - "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n", - hw_state->cfgcr0, hw_state->cfgcr1, - hw_state->div0, - hw_state->mg_refclkin_ctl, - hw_state->mg_clktop2_coreclkctl1, - hw_state->mg_clktop2_hsclkctl, - hw_state->mg_pll_div0, - hw_state->mg_pll_div1, - hw_state->mg_pll_lf, - hw_state->mg_pll_frac_lock, - hw_state->mg_pll_ssc, - hw_state->mg_pll_bias, - hw_state->mg_pll_tdc_coldst_bias); -} - -static bool icl_compare_hw_state(const struct intel_dpll_hw_state *a, - const struct intel_dpll_hw_state *b) + const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl; + + drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, " + "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, " + "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, " + "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, " + "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, " + "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n", + hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0, + hw_state->mg_refclkin_ctl, + hw_state->mg_clktop2_coreclkctl1, + hw_state->mg_clktop2_hsclkctl, + hw_state->mg_pll_div0, + hw_state->mg_pll_div1, + hw_state->mg_pll_lf, + hw_state->mg_pll_frac_lock, + hw_state->mg_pll_ssc, + hw_state->mg_pll_bias, + hw_state->mg_pll_tdc_coldst_bias); +} + +static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a, + const struct intel_dpll_hw_state *_b) { + const struct icl_dpll_hw_state *a = &_a->icl; + const struct icl_dpll_hw_state *b = &_b->icl; + /* FIXME split combo vs. mg more thoroughly */ return a->cfgcr0 == b->cfgcr0 && a->cfgcr1 == b->cfgcr1 && @@ -4417,33 +4470,33 @@ void intel_update_active_dpll(struct intel_atomic_state *state, * intel_dpll_get_freq - calculate the DPLL's output frequency * @i915: i915 device * @pll: DPLL for which to calculate the output frequency - * @pll_state: DPLL state from which to calculate the output frequency + * @dpll_hw_state: DPLL state from which to calculate the output frequency * - * Return the output frequency corresponding to @pll's passed in @pll_state. + * Return the output frequency corresponding to @pll's passed in @dpll_hw_state. */ int intel_dpll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state) + const struct intel_dpll_hw_state *dpll_hw_state) { if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq)) return 0; - return pll->info->funcs->get_freq(i915, pll, pll_state); + return pll->info->funcs->get_freq(i915, pll, dpll_hw_state); } /** * intel_dpll_get_hw_state - readout the DPLL's hardware state * @i915: i915 device * @pll: DPLL for which to calculate the output frequency - * @hw_state: DPLL's hardware state + * @dpll_hw_state: DPLL's hardware state * - * Read out @pll's hardware state into @hw_state. + * Read out @pll's hardware state into @dpll_hw_state. */ bool intel_dpll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) + struct intel_dpll_hw_state *dpll_hw_state) { - return pll->info->funcs->get_hw_state(i915, pll, hw_state); + return pll->info->funcs->get_hw_state(i915, pll, dpll_hw_state); } static void readout_dpll_hw_state(struct drm_i915_private *i915, @@ -4514,22 +4567,24 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915) } /** - * intel_dpll_dump_hw_state - write hw_state to dmesg + * intel_dpll_dump_hw_state - dump hw_state * @i915: i915 drm device - * @hw_state: hw state to be written to the log + * @p: where to print the state to + * @dpll_hw_state: hw state to be dumped * - * Write the relevant values in @hw_state to dmesg using drm_dbg_kms. + * Dumo out the relevant values in @dpll_hw_state. */ void intel_dpll_dump_hw_state(struct drm_i915_private *i915, - const struct intel_dpll_hw_state *hw_state) + struct drm_printer *p, + const struct intel_dpll_hw_state *dpll_hw_state) { if (i915->display.dpll.mgr) { - i915->display.dpll.mgr->dump_hw_state(i915, hw_state); + i915->display.dpll.mgr->dump_hw_state(p, dpll_hw_state); } else { /* fallback for platforms that don't use the shared dpll * infrastructure */ - ibx_dump_hw_state(i915, hw_state); + ibx_dump_hw_state(p, dpll_hw_state); } } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index cc0e138630..f09e513ce0 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -36,6 +36,7 @@ enum tc_port; struct drm_i915_private; +struct drm_printer; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; @@ -180,18 +181,19 @@ enum icl_port_dpll_id { ICL_PORT_DPLL_COUNT, }; -struct intel_dpll_hw_state { - /* i9xx, pch plls */ +struct i9xx_dpll_hw_state { u32 dpll; u32 dpll_md; u32 fp0; u32 fp1; +}; - /* hsw, bdw */ +struct hsw_dpll_hw_state { u32 wrpll; u32 spll; +}; - /* skl */ +struct skl_dpll_hw_state { /* * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in * lower part of ctrl1 and they get shifted into position when writing @@ -201,20 +203,18 @@ struct intel_dpll_hw_state { u32 ctrl1; /* HDMI only, 0 when used for DP */ u32 cfgcr1, cfgcr2; +}; - /* icl */ - u32 cfgcr0; +struct bxt_dpll_hw_state { + u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12; +}; + +struct icl_dpll_hw_state { + u32 cfgcr0, cfgcr1; /* tgl */ u32 div0; - /* bxt */ - u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12; - - /* - * ICL uses the following, already defined: - * u32 cfgcr0, cfgcr1; - */ u32 mg_refclkin_ctl; u32 mg_clktop2_coreclkctl1; u32 mg_clktop2_hsclkctl; @@ -229,6 +229,55 @@ struct intel_dpll_hw_state { u32 mg_pll_tdc_coldst_bias_mask; }; +struct intel_mpllb_state { + u32 clock; /* in KHz */ + u32 ref_control; + u32 mpllb_cp; + u32 mpllb_div; + u32 mpllb_div2; + u32 mpllb_fracn1; + u32 mpllb_fracn2; + u32 mpllb_sscen; + u32 mpllb_sscstep; +}; + +struct intel_c10pll_state { + u32 clock; /* in KHz */ + u8 tx; + u8 cmn; + u8 pll[20]; +}; + +struct intel_c20pll_state { + u32 clock; /* in kHz */ + u16 tx[3]; + u16 cmn[4]; + union { + u16 mplla[10]; + u16 mpllb[11]; + }; +}; + +struct intel_cx0pll_state { + union { + struct intel_c10pll_state c10; + struct intel_c20pll_state c20; + }; + bool ssc_enabled; +}; + +struct intel_dpll_hw_state { + union { + struct i9xx_dpll_hw_state i9xx; + struct hsw_dpll_hw_state hsw; + struct skl_dpll_hw_state skl; + struct bxt_dpll_hw_state bxt; + struct icl_dpll_hw_state icl; + struct intel_mpllb_state mpllb; + struct intel_cx0pll_state cx0pll; + }; +}; + /** * struct intel_shared_dpll_state - hold the DPLL atomic state * @@ -364,10 +413,10 @@ void intel_update_active_dpll(struct intel_atomic_state *state, struct intel_encoder *encoder); int intel_dpll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state); + const struct intel_dpll_hw_state *dpll_hw_state); bool intel_dpll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state); + struct intel_dpll_hw_state *dpll_hw_state); void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_shared_dpll_swap_state(struct intel_atomic_state *state); @@ -377,7 +426,8 @@ void intel_dpll_readout_hw_state(struct drm_i915_private *i915); void intel_dpll_sanitize_state(struct drm_i915_private *i915); void intel_dpll_dump_hw_state(struct drm_i915_private *i915, - const struct intel_dpll_hw_state *hw_state); + struct drm_printer *p, + const struct intel_dpll_hw_state *dpll_hw_state); bool intel_dpll_compare_hw_state(struct drm_i915_private *i915, const struct intel_dpll_hw_state *a, const struct intel_dpll_hw_state *b); diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index e4515bf920..4baaa92cea 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -343,12 +343,13 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state) static u32 dsb_chicken(struct intel_crtc *crtc) { if (crtc->mode_flags & I915_MODE_FLAG_VRR) - return DSB_CTRL_WAIT_SAFE_WINDOW | + return DSB_SKIP_WAITS_EN | + DSB_CTRL_WAIT_SAFE_WINDOW | DSB_CTRL_NO_WAIT_VBLANK | DSB_INST_WAIT_SAFE_WINDOW | DSB_INST_NO_WAIT_VBLANK; else - return 0; + return DSB_SKIP_WAITS_EN; } static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl, diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c index d3cf6a6522..bd5888ce48 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.c +++ b/drivers/gpu/drm/i915/display/intel_dsi.c @@ -64,14 +64,11 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, struct intel_connector *intel_connector = to_intel_connector(connector); const struct drm_display_mode *fixed_mode = intel_panel_fixed_mode(intel_connector, mode); - int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; + int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq; enum drm_mode_status status; drm_dbg_kms(&dev_priv->drm, "\n"); - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) - return MODE_NO_DBLESCAN; - status = intel_panel_mode_valid(intel_connector, mode); if (status != MODE_OK) return status; diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index c076da75b0..1840f5b592 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -223,7 +223,7 @@ intel_dvo_mode_valid(struct drm_connector *_connector, struct intel_dvo *intel_dvo = intel_attached_dvo(connector); const struct drm_display_mode *fixed_mode = intel_panel_fixed_mode(connector, mode); - int max_dotclk = to_i915(connector->base.dev)->max_dotclk_freq; + int max_dotclk = to_i915(connector->base.dev)->display.cdclk.max_dotclk_freq; int target_clock = mode->clock; enum drm_mode_status status; @@ -231,9 +231,6 @@ intel_dvo_mode_valid(struct drm_connector *_connector, if (status != MODE_OK) return status; - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) - return MODE_NO_DBLESCAN; - /* XXX: Validate clock range */ if (fixed_mode) { diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index 3ea6470d6d..86b443433e 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -1106,7 +1106,7 @@ static int intel_fb_offset_to_xy(int *x, int *y, { struct drm_i915_private *i915 = to_i915(fb->dev); unsigned int height; - u32 alignment; + u32 alignment, unused; if (DISPLAY_VER(i915) >= 12 && !intel_fb_needs_pot_stride_remap(to_intel_framebuffer(fb)) && @@ -1128,8 +1128,8 @@ static int intel_fb_offset_to_xy(int *x, int *y, height = ALIGN(height, intel_tile_height(fb, color_plane)); /* Catch potential overflows early */ - if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]), - fb->offsets[color_plane])) { + if (check_add_overflow(mul_u32_u32(height, fb->pitches[color_plane]), + fb->offsets[color_plane], &unused)) { drm_dbg_kms(&i915->drm, "Bad offset 0x%08x or pitch %d for color plane %d\n", fb->offsets[color_plane], fb->pitches[color_plane], diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index b453fcbd67..984f13d8c0 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -54,6 +54,7 @@ #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_fbc.h" +#include "intel_fbc_regs.h" #include "intel_frontbuffer.h" #define for_each_fbc_id(__dev_priv, __fbc_id) \ @@ -826,10 +827,36 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc) static void intel_fbc_program_workarounds(struct intel_fbc *fbc) { + struct drm_i915_private *i915 = fbc->i915; + + if (IS_SKYLAKE(i915) || IS_BROXTON(i915)) { + /* + * WaFbcHighMemBwCorruptionAvoidance:skl,bxt + * Display WA #0883: skl,bxt + */ + intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id), + 0, DPFC_DISABLE_DUMMY0); + } + + if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || + IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) { + /* + * WaFbcNukeOnHostModify:skl,kbl,cfl + * Display WA #0873: skl,kbl,cfl + */ + intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id), + 0, DPFC_NUKE_ON_ANY_MODIFICATION); + } + + /* Wa_1409120013:icl,jsl,tgl,dg1 */ + if (IS_DISPLAY_VER(i915, 11, 12)) + intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id), + 0, DPFC_CHICKEN_COMP_DUMMY_PIXEL); + /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */ - if (DISPLAY_VER(fbc->i915) >= 11 && !IS_DG2(fbc->i915)) - intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0, - DPFC_CHICKEN_FORCE_SLB_INVALIDATION); + if (DISPLAY_VER(i915) >= 11 && !IS_DG2(i915)) + intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id), + 0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION); } static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) @@ -1224,7 +1251,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, * Recommendation is to keep this combination disabled * Bspec: 50422 HSD: 14010260002 */ - if (IS_DISPLAY_VER(i915, 12, 14) && crtc_state->has_psr2) { + if (IS_DISPLAY_VER(i915, 12, 14) && crtc_state->has_sel_update) { plane_state->no_fbc_reason = "PSR2 enabled"; return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_fbc_regs.h b/drivers/gpu/drm/i915/display/intel_fbc_regs.h new file mode 100644 index 0000000000..ae0699c3c2 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_fbc_regs.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2024 Intel Corporation */ + +#ifndef __INTEL_FBC_REGS__ +#define __INTEL_FBC_REGS__ + +#include "intel_display_reg_defs.h" + +#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */ +#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */ +#define FBC_CONTROL _MMIO(0x3208) +#define FBC_CTL_EN REG_BIT(31) +#define FBC_CTL_PERIODIC REG_BIT(30) +#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16) +#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x)) +#define FBC_CTL_STOP_ON_MOD REG_BIT(15) +#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */ +#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm only */ +#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5) +#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x)) +#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0) +#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x)) +#define FBC_COMMAND _MMIO(0x320c) +#define FBC_CMD_COMPRESS REG_BIT(0) +#define FBC_STATUS _MMIO(0x3210) +#define FBC_STAT_COMPRESSING REG_BIT(31) +#define FBC_STAT_COMPRESSED REG_BIT(30) +#define FBC_STAT_MODIFIED REG_BIT(29) +#define FBC_STAT_CURRENT_LINE_MASK REG_GENMASK(10, 0) +#define FBC_CONTROL2 _MMIO(0x3214) /* i965gm only */ +#define FBC_CTL_FENCE_DBL REG_BIT(4) +#define FBC_CTL_IDLE_MASK REG_GENMASK(3, 2) +#define FBC_CTL_IDLE_IMM REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 0) +#define FBC_CTL_IDLE_FULL REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 1) +#define FBC_CTL_IDLE_LINE REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 2) +#define FBC_CTL_IDLE_DEBUG REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 3) +#define FBC_CTL_CPU_FENCE_EN REG_BIT(1) +#define FBC_CTL_PLANE_MASK REG_GENMASK(1, 0) +#define FBC_CTL_PLANE(i9xx_plane) REG_FIELD_PREP(FBC_CTL_PLANE_MASK, (i9xx_plane)) +#define FBC_FENCE_OFF _MMIO(0x3218) /* i965gm only, BSpec typo has 321Bh */ +#define FBC_MOD_NUM _MMIO(0x3220) /* i965gm only */ +#define FBC_MOD_NUM_MASK REG_GENMASK(31, 1) +#define FBC_MOD_NUM_VALID REG_BIT(0) +#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) /* 49 reisters */ +#define FBC_TAG_MASK REG_GENMASK(1, 0) /* 16 tags per register */ +#define FBC_TAG_MODIFIED REG_FIELD_PREP(FBC_TAG_MASK, 0) +#define FBC_TAG_UNCOMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 1) +#define FBC_TAG_UNCOMPRESSIBLE REG_FIELD_PREP(FBC_TAG_MASK, 2) +#define FBC_TAG_COMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 3) + +#define FBC_LL_SIZE (1536) + +#define DPFC_CB_BASE _MMIO(0x3200) +#define ILK_DPFC_CB_BASE(fbc_id) _MMIO_PIPE((fbc_id), 0x43200, 0x43240) +#define DPFC_CONTROL _MMIO(0x3208) +#define ILK_DPFC_CONTROL(fbc_id) _MMIO_PIPE((fbc_id), 0x43208, 0x43248) +#define DPFC_CTL_EN REG_BIT(31) +#define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */ +#define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane)) +#define DPFC_CTL_FENCE_EN_G4X REG_BIT(29) /* g4x-snb */ +#define DPFC_CTL_PLANE_MASK_IVB REG_GENMASK(30, 29) /* ivb only */ +#define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane)) +#define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */ +#define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */ +#define DPFC_CTL_PLANE_BINDING_MASK REG_GENMASK(12, 11) /* lnl+ */ +#define DPFC_CTL_PLANE_BINDING(plane_id) REG_FIELD_PREP(DPFC_CTL_PLANE_BINDING_MASK, (plane_id)) +#define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */ +#define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */ +#define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */ +#define DPFC_CTL_LIMIT_MASK REG_GENMASK(7, 6) +#define DPFC_CTL_LIMIT_1X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 0) +#define DPFC_CTL_LIMIT_2X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 1) +#define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2) +#define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0) +#define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence)) +#define DPFC_RECOMP_CTL _MMIO(0x320c) +#define ILK_DPFC_RECOMP_CTL(fbc_id) _MMIO_PIPE((fbc_id), 0x4320c, 0x4324c) +#define DPFC_RECOMP_STALL_EN REG_BIT(27) +#define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16) +#define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0) +#define DPFC_STATUS _MMIO(0x3210) +#define ILK_DPFC_STATUS(fbc_id) _MMIO_PIPE((fbc_id), 0x43210, 0x43250) +#define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16) +#define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0) +#define DPFC_STATUS2 _MMIO(0x3214) +#define ILK_DPFC_STATUS2(fbc_id) _MMIO_PIPE((fbc_id), 0x43214, 0x43254) +#define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0) +#define DPFC_FENCE_YOFF _MMIO(0x3218) +#define ILK_DPFC_FENCE_YOFF(fbc_id) _MMIO_PIPE((fbc_id), 0x43218, 0x43258) +#define DPFC_CHICKEN _MMIO(0x3224) +#define ILK_DPFC_CHICKEN(fbc_id) _MMIO_PIPE((fbc_id), 0x43224, 0x43264) +#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */ +#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */ +#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */ +#define DPFC_CHICKEN_FORCE_SLB_INVALIDATION REG_BIT(13) /* icl+ */ +#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */ + +#define GLK_FBC_STRIDE(fbc_id) _MMIO_PIPE((fbc_id), 0x43228, 0x43268) +#define FBC_STRIDE_OVERRIDE REG_BIT(15) +#define FBC_STRIDE_MASK REG_GENMASK(14, 0) +#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x)) + +#define ILK_FBC_RT_BASE _MMIO(0x2128) +#define ILK_FBC_RT_VALID REG_BIT(0) +#define SNB_FBC_FRONT_BUFFER REG_BIT(1) + +#define SNB_DPFC_CTL_SA _MMIO(0x100100) +#define SNB_DPFC_FENCE_EN REG_BIT(29) +#define SNB_DPFC_FENCENO_MASK REG_GENMASK(4, 0) +#define SNB_DPFC_FENCENO(fence) REG_FIELD_PREP(SNB_DPFC_FENCENO_MASK, (fence)) +#define SNB_DPFC_CPU_FENCE_OFFSET _MMIO(0x100104) + +#define IVB_FBC_RT_BASE _MMIO(0x7020) +#define IVB_FBC_RT_BASE_UPPER _MMIO(0x7024) + +#define MSG_FBC_REND_STATE(fbc_id) _MMIO_PIPE((fbc_id), 0x50380, 0x50384) +#define FBC_REND_NUKE REG_BIT(2) +#define FBC_REND_CACHE_CLEAN REG_BIT(1) + +#endif /* __INTEL_FBC_REGS__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 99894a855e..bda702c2ca 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -24,7 +24,6 @@ * David Airlie */ -#include #include #include #include @@ -39,6 +38,7 @@ #include #include +#include #include #include #include @@ -58,7 +58,6 @@ struct intel_fbdev { struct intel_framebuffer *fb; struct i915_vma *vma; unsigned long vma_flags; - async_cookie_t cookie; int preferred_bpp; /* Whether or not fbdev hpd processing is temporarily suspended */ @@ -135,6 +134,29 @@ static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) return i915_gem_fb_mmap(obj, vma); } +static void intel_fbdev_fb_destroy(struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct intel_fbdev *ifbdev = container_of(fb_helper, struct intel_fbdev, helper); + + drm_fb_helper_fini(&ifbdev->helper); + + /* + * We rely on the object-free to release the VMA pinning for + * the info->screen_base mmaping. Leaking the VMA is simpler than + * trying to rectify all the possible error paths leading here. + */ + intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags); + drm_framebuffer_remove(&ifbdev->fb->base); + + drm_client_release(&fb_helper->client); + drm_fb_helper_unprepare(&ifbdev->helper); + kfree(ifbdev); +} + +__diag_push(); +__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for fb ops"); + static const struct fb_ops intelfb_ops = { .owner = THIS_MODULE, __FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev), @@ -144,8 +166,11 @@ static const struct fb_ops intelfb_ops = { .fb_pan_display = intel_fbdev_pan_display, __FB_DEFAULT_DEFERRED_OPS_DRAW(intel_fbdev), .fb_mmap = intel_fbdev_mmap, + .fb_destroy = intel_fbdev_fb_destroy, }; +__diag_pop(); + static int intelfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { @@ -153,7 +178,6 @@ static int intelfb_create(struct drm_fb_helper *helper, struct intel_framebuffer *intel_fb = ifbdev->fb; struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); const struct i915_gtt_view view = { .type = I915_GTT_VIEW_NORMAL, }; @@ -245,7 +269,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ifbdev->vma_flags = flags; intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - vga_switcheroo_client_fb_set(pdev, info); + return 0; out_unpin: @@ -271,25 +295,6 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = { .fb_dirty = intelfb_dirty, }; -static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) -{ - /* We rely on the object-free to release the VMA pinning for - * the info->screen_base mmaping. Leaking the VMA is simpler than - * trying to rectify all the possible error paths leading here. - */ - - drm_fb_helper_fini(&ifbdev->helper); - - if (ifbdev->vma) - intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags); - - if (ifbdev->fb) - drm_framebuffer_remove(&ifbdev->fb->base); - - drm_fb_helper_unprepare(&ifbdev->helper); - kfree(ifbdev); -} - /* * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible. * The core display code will have read out the current plane configuration, @@ -453,93 +458,6 @@ static void intel_fbdev_suspend_worker(struct work_struct *work) true); } -int intel_fbdev_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_fbdev *ifbdev; - int ret; - - if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv))) - return -ENODEV; - - ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); - if (ifbdev == NULL) - return -ENOMEM; - - mutex_init(&ifbdev->hpd_lock); - drm_fb_helper_prepare(dev, &ifbdev->helper, 32, &intel_fb_helper_funcs); - - if (intel_fbdev_init_bios(dev, ifbdev)) - ifbdev->helper.preferred_bpp = ifbdev->preferred_bpp; - else - ifbdev->preferred_bpp = ifbdev->helper.preferred_bpp; - - ret = drm_fb_helper_init(dev, &ifbdev->helper); - if (ret) { - kfree(ifbdev); - return ret; - } - - dev_priv->display.fbdev.fbdev = ifbdev; - INIT_WORK(&dev_priv->display.fbdev.suspend_work, intel_fbdev_suspend_worker); - - return 0; -} - -static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) -{ - struct intel_fbdev *ifbdev = data; - - /* Due to peculiar init order wrt to hpd handling this is separate. */ - if (drm_fb_helper_initial_config(&ifbdev->helper)) - intel_fbdev_unregister(to_i915(ifbdev->helper.dev)); -} - -void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv) -{ - struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; - - if (!ifbdev) - return; - - ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); -} - -static void intel_fbdev_sync(struct intel_fbdev *ifbdev) -{ - if (!ifbdev->cookie) - return; - - /* Only serialises with all preceding async calls, hence +1 */ - async_synchronize_cookie(ifbdev->cookie + 1); - ifbdev->cookie = 0; -} - -void intel_fbdev_unregister(struct drm_i915_private *dev_priv) -{ - struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; - - if (!ifbdev) - return; - - intel_fbdev_set_suspend(&dev_priv->drm, FBINFO_STATE_SUSPENDED, true); - - if (!current_is_async()) - intel_fbdev_sync(ifbdev); - - drm_fb_helper_unregister_info(&ifbdev->helper); -} - -void intel_fbdev_fini(struct drm_i915_private *dev_priv) -{ - struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->display.fbdev.fbdev); - - if (!ifbdev) - return; - - intel_fbdev_destroy(ifbdev); -} - /* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD * processing, fbdev will perform a full connector reprobe if a hotplug event * was received while HPD was suspended. @@ -622,15 +540,13 @@ set_suspend: intel_fbdev_hpd_set_suspend(dev_priv, state); } -void intel_fbdev_output_poll_changed(struct drm_device *dev) +static int intel_fbdev_output_poll_changed(struct drm_device *dev) { struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev; bool send_hpd; if (!ifbdev) - return; - - intel_fbdev_sync(ifbdev); + return -EINVAL; mutex_lock(&ifbdev->hpd_lock); send_hpd = !ifbdev->hpd_suspended; @@ -639,21 +555,137 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup)) drm_fb_helper_hotplug_event(&ifbdev->helper); + + return 0; } -void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv) +static int intel_fbdev_restore_mode(struct drm_i915_private *dev_priv) { struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; + int ret; if (!ifbdev) - return; + return -EINVAL; - intel_fbdev_sync(ifbdev); if (!ifbdev->vma) + return -ENOMEM; + + ret = drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper); + if (ret) + return ret; + + intel_fbdev_invalidate(ifbdev); + + return 0; +} + +/* + * Fbdev client and struct drm_client_funcs + */ + +static void intel_fbdev_client_unregister(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + struct drm_device *dev = fb_helper->dev; + struct pci_dev *pdev = to_pci_dev(dev->dev); + + if (fb_helper->info) { + vga_switcheroo_client_fb_set(pdev, NULL); + drm_fb_helper_unregister_info(fb_helper); + } else { + drm_fb_helper_unprepare(fb_helper); + drm_client_release(&fb_helper->client); + kfree(fb_helper); + } +} + +static int intel_fbdev_client_restore(struct drm_client_dev *client) +{ + struct drm_i915_private *dev_priv = to_i915(client->dev); + int ret; + + ret = intel_fbdev_restore_mode(dev_priv); + if (ret) + return ret; + + vga_switcheroo_process_delayed_switch(); + + return 0; +} + +static int intel_fbdev_client_hotplug(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + struct drm_device *dev = client->dev; + struct pci_dev *pdev = to_pci_dev(dev->dev); + int ret; + + if (dev->fb_helper) + return intel_fbdev_output_poll_changed(dev); + + ret = drm_fb_helper_init(dev, fb_helper); + if (ret) + goto err_drm_err; + + ret = drm_fb_helper_initial_config(fb_helper); + if (ret) + goto err_drm_fb_helper_fini; + + vga_switcheroo_client_fb_set(pdev, fb_helper->info); + + return 0; + +err_drm_fb_helper_fini: + drm_fb_helper_fini(fb_helper); +err_drm_err: + drm_err(dev, "Failed to setup i915 fbdev emulation (ret=%d)\n", ret); + return ret; +} + +static const struct drm_client_funcs intel_fbdev_client_funcs = { + .owner = THIS_MODULE, + .unregister = intel_fbdev_client_unregister, + .restore = intel_fbdev_client_restore, + .hotplug = intel_fbdev_client_hotplug, +}; + +void intel_fbdev_setup(struct drm_i915_private *i915) +{ + struct drm_device *dev = &i915->drm; + struct intel_fbdev *ifbdev; + int ret; + + if (!HAS_DISPLAY(i915)) return; - if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0) - intel_fbdev_invalidate(ifbdev); + ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL); + if (!ifbdev) + return; + drm_fb_helper_prepare(dev, &ifbdev->helper, 32, &intel_fb_helper_funcs); + + i915->display.fbdev.fbdev = ifbdev; + INIT_WORK(&i915->display.fbdev.suspend_work, intel_fbdev_suspend_worker); + mutex_init(&ifbdev->hpd_lock); + if (intel_fbdev_init_bios(dev, ifbdev)) + ifbdev->helper.preferred_bpp = ifbdev->preferred_bpp; + else + ifbdev->preferred_bpp = ifbdev->helper.preferred_bpp; + + ret = drm_client_init(dev, &ifbdev->helper.client, "intel-fbdev", + &intel_fbdev_client_funcs); + if (ret) { + drm_err(dev, "Failed to register client: %d\n", ret); + goto err_drm_fb_helper_unprepare; + } + + drm_client_register(&ifbdev->helper.client); + + return; + +err_drm_fb_helper_unprepare: + drm_fb_helper_unprepare(&ifbdev->helper); + mutex_destroy(&ifbdev->hpd_lock); + kfree(ifbdev); } struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev) diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h index 04fd523a50..08de2d5b34 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.h +++ b/drivers/gpu/drm/i915/display/intel_fbdev.h @@ -14,29 +14,11 @@ struct intel_fbdev; struct intel_framebuffer; #ifdef CONFIG_DRM_FBDEV_EMULATION -int intel_fbdev_init(struct drm_device *dev); -void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv); -void intel_fbdev_unregister(struct drm_i915_private *dev_priv); -void intel_fbdev_fini(struct drm_i915_private *dev_priv); +void intel_fbdev_setup(struct drm_i915_private *dev_priv); void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous); -void intel_fbdev_output_poll_changed(struct drm_device *dev); -void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv); struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev); #else -static inline int intel_fbdev_init(struct drm_device *dev) -{ - return 0; -} - -static inline void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv) -{ -} - -static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv) -{ -} - -static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv) +static inline void intel_fbdev_setup(struct drm_i915_private *dev_priv) { } @@ -44,13 +26,6 @@ static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bo { } -static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) -{ -} - -static inline void intel_fbdev_restore_mode(struct drm_i915_private *i915) -{ -} static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev) { return NULL; diff --git a/drivers/gpu/drm/i915/display/intel_fixed.h b/drivers/gpu/drm/i915/display/intel_fixed.h new file mode 100644 index 0000000000..a327094de2 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_fixed.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2018 Intel Corporation + */ + +#ifndef _I915_FIXED_H_ +#define _I915_FIXED_H_ + +#include +#include +#include +#include + +typedef struct { + u32 val; +} uint_fixed_16_16_t; + +#define FP_16_16_MAX ((uint_fixed_16_16_t){ .val = UINT_MAX }) + +static inline bool is_fixed16_zero(uint_fixed_16_16_t val) +{ + return val.val == 0; +} + +static inline uint_fixed_16_16_t u32_to_fixed16(u32 val) +{ + uint_fixed_16_16_t fp = { .val = val << 16 }; + + WARN_ON(val > U16_MAX); + + return fp; +} + +static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp) +{ + return DIV_ROUND_UP(fp.val, 1 << 16); +} + +static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp) +{ + return fp.val >> 16; +} + +static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1, + uint_fixed_16_16_t min2) +{ + uint_fixed_16_16_t min = { .val = min(min1.val, min2.val) }; + + return min; +} + +static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1, + uint_fixed_16_16_t max2) +{ + uint_fixed_16_16_t max = { .val = max(max1.val, max2.val) }; + + return max; +} + +static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val) +{ + uint_fixed_16_16_t fp = { .val = (u32)val }; + + WARN_ON(val > U32_MAX); + + return fp; +} + +static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val, + uint_fixed_16_16_t d) +{ + return DIV_ROUND_UP(val.val, d.val); +} + +static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul) +{ + u64 tmp; + + tmp = mul_u32_u32(val, mul.val); + tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16); + WARN_ON(tmp > U32_MAX); + + return (u32)tmp; +} + +static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, + uint_fixed_16_16_t mul) +{ + u64 tmp; + + tmp = mul_u32_u32(val.val, mul.val); + tmp = tmp >> 16; + + return clamp_u64_to_fixed16(tmp); +} + +static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d) +{ + u64 tmp; + + tmp = (u64)val << 16; + tmp = DIV_ROUND_UP_ULL(tmp, d); + + return clamp_u64_to_fixed16(tmp); +} + +static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d) +{ + u64 tmp; + + tmp = (u64)val << 16; + tmp = DIV_ROUND_UP_ULL(tmp, d.val); + WARN_ON(tmp > U32_MAX); + + return (u32)tmp; +} + +static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul) +{ + u64 tmp; + + tmp = mul_u32_u32(val, mul.val); + + return clamp_u64_to_fixed16(tmp); +} + +static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1, + uint_fixed_16_16_t add2) +{ + u64 tmp; + + tmp = (u64)add1.val + add2.val; + + return clamp_u64_to_fixed16(tmp); +} + +static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1, + u32 add2) +{ + uint_fixed_16_16_t tmp_add2 = u32_to_fixed16(add2); + u64 tmp; + + tmp = (u64)add1.val + tmp_add2.val; + + return clamp_u64_to_fixed16(tmp); +} + +#endif /* _I915_FIXED_H_ */ diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index d3e03ed5b7..9c8e1e91ff 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -411,7 +411,7 @@ gmbus_wait_idle(struct drm_i915_private *i915) add_wait_queue(&i915->display.gmbus.wait_queue, &wait); intel_de_write_fw(i915, GMBUS4(i915), irq_enable); - ret = intel_de_wait_for_register_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10); + ret = intel_de_wait_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10); intel_de_write_fw(i915, GMBUS4(i915), 0); remove_wait_queue(&i915->display.gmbus.wait_queue, &wait); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 9edac27bab..d5ed4c7dfb 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -369,9 +369,9 @@ static int intel_hdcp_load_keys(struct drm_i915_private *i915) } /* Wait for the keys to load (500us) */ - ret = __intel_wait_for_register(&i915->uncore, HDCP_KEY_STATUS, - HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, - 10, 1, &val); + ret = intel_de_wait_custom(i915, HDCP_KEY_STATUS, + HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, + 10, 1, &val); if (ret) return ret; else if (!(val & HDCP_KEY_LOAD_STATUS)) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c index 302bff75b0..35823e1f65 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c @@ -13,6 +13,12 @@ #include "intel_hdcp_gsc.h" #include "intel_hdcp_gsc_message.h" +struct intel_hdcp_gsc_message { + struct i915_vma *vma; + void *hdcp_cmd_in; + void *hdcp_cmd_out; +}; + bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915) { return DISPLAY_VER(i915) >= 14; diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h index eba2057c5a..5f610df61c 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h @@ -10,12 +10,7 @@ #include struct drm_i915_private; - -struct intel_hdcp_gsc_message { - struct i915_vma *vma; - void *hdcp_cmd_in; - void *hdcp_cmd_out; -}; +struct intel_hdcp_gsc_message; bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915); ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 90d2236fed..5f6deceaf8 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -114,6 +114,8 @@ static u32 g4x_infoframe_enable(unsigned int type) return VIDEO_DIP_ENABLE_GAMUT; case DP_SDP_VSC: return 0; + case DP_SDP_ADAPTIVE_SYNC: + return 0; case HDMI_INFOFRAME_TYPE_AVI: return VIDEO_DIP_ENABLE_AVI; case HDMI_INFOFRAME_TYPE_SPD: @@ -137,6 +139,8 @@ static u32 hsw_infoframe_enable(unsigned int type) return VIDEO_DIP_ENABLE_GMP_HSW; case DP_SDP_VSC: return VIDEO_DIP_ENABLE_VSC_HSW; + case DP_SDP_ADAPTIVE_SYNC: + return VIDEO_DIP_ENABLE_AS_ADL; case DP_SDP_PPS: return VDIP_ENABLE_PPS; case HDMI_INFOFRAME_TYPE_AVI: @@ -164,6 +168,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv, return HSW_TVIDEO_DIP_GMP_DATA(cpu_transcoder, i); case DP_SDP_VSC: return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i); + case DP_SDP_ADAPTIVE_SYNC: + return ADL_TVIDEO_DIP_AS_SDP_DATA(cpu_transcoder, i); case DP_SDP_PPS: return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i); case HDMI_INFOFRAME_TYPE_AVI: @@ -186,6 +192,8 @@ static int hsw_dip_data_size(struct drm_i915_private *dev_priv, switch (type) { case DP_SDP_VSC: return VIDEO_DIP_VSC_DATA_SIZE; + case DP_SDP_ADAPTIVE_SYNC: + return VIDEO_DIP_ASYNC_DATA_SIZE; case DP_SDP_PPS: return VIDEO_DIP_PPS_DATA_SIZE; case HDMI_PACKET_TYPE_GAMUT_METADATA: @@ -563,6 +571,9 @@ static u32 hsw_infoframes_enabled(struct intel_encoder *encoder, if (DISPLAY_VER(dev_priv) >= 10) mask |= VIDEO_DIP_ENABLE_DRM_GLK; + if (HAS_AS_SDP(dev_priv)) + mask |= VIDEO_DIP_ENABLE_AS_ADL; + return val & mask; } @@ -570,6 +581,7 @@ static const u8 infoframe_type_to_idx[] = { HDMI_PACKET_TYPE_GENERAL_CONTROL, HDMI_PACKET_TYPE_GAMUT_METADATA, DP_SDP_VSC, + DP_SDP_ADAPTIVE_SYNC, HDMI_INFOFRAME_TYPE_AVI, HDMI_INFOFRAME_TYPE_SPD, HDMI_INFOFRAME_TYPE_VENDOR, @@ -1212,7 +1224,7 @@ static void hsw_set_infoframes(struct intel_encoder *encoder, val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW | - VIDEO_DIP_ENABLE_DRM_GLK); + VIDEO_DIP_ENABLE_DRM_GLK | VIDEO_DIP_ENABLE_AS_ADL); if (!enable) { intel_de_write(dev_priv, reg, val); @@ -1832,7 +1844,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, bool has_hdmi_sink) { struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); - enum phy phy = intel_port_to_phy(dev_priv, hdmi_to_dig_port(hdmi)->base.port); + struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base; if (clock < 25000) return MODE_CLOCK_LOW; @@ -1854,11 +1866,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, return MODE_CLOCK_RANGE; /* ICL+ combo PHY PLL can't generate 500-533.2 MHz */ - if (intel_phy_is_combo(dev_priv, phy) && clock > 500000 && clock < 533200) + if (intel_encoder_is_combo(encoder) && clock > 500000 && clock < 533200) return MODE_CLOCK_RANGE; /* ICL+ TC PHY PLL can't generate 500-532.8 MHz */ - if (intel_phy_is_tc(dev_priv, phy) && clock > 500000 && clock < 532800) + if (intel_encoder_is_tc(encoder) && clock > 500000 && clock < 532800) return MODE_CLOCK_RANGE; /* @@ -1981,7 +1993,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); enum drm_mode_status status; int clock = mode->clock; - int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; + int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq; bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state); bool ycbcr_420_only; enum intel_output_format sink_format; @@ -2664,8 +2676,9 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, drm_scdc_set_scrambling(connector, scrambling); } -static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) +static u8 chv_encoder_to_ddc_pin(struct intel_encoder *encoder) { + enum port port = encoder->port; u8 ddc_pin; switch (port) { @@ -2686,8 +2699,9 @@ static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) return ddc_pin; } -static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) +static u8 bxt_encoder_to_ddc_pin(struct intel_encoder *encoder) { + enum port port = encoder->port; u8 ddc_pin; switch (port) { @@ -2705,9 +2719,9 @@ static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) return ddc_pin; } -static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv, - enum port port) +static u8 cnp_encoder_to_ddc_pin(struct intel_encoder *encoder) { + enum port port = encoder->port; u8 ddc_pin; switch (port) { @@ -2731,22 +2745,23 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv, return ddc_pin; } -static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) +static u8 icl_encoder_to_ddc_pin(struct intel_encoder *encoder) { - enum phy phy = intel_port_to_phy(dev_priv, port); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; - if (intel_phy_is_combo(dev_priv, phy)) + if (intel_encoder_is_combo(encoder)) return GMBUS_PIN_1_BXT + port; - else if (intel_phy_is_tc(dev_priv, phy)) - return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port); + else if (intel_encoder_is_tc(encoder)) + return GMBUS_PIN_9_TC1_ICP + intel_encoder_to_tc(encoder); drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port)); return GMBUS_PIN_2_BXT; } -static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) +static u8 mcc_encoder_to_ddc_pin(struct intel_encoder *encoder) { - enum phy phy = intel_port_to_phy(dev_priv, port); + enum phy phy = intel_encoder_to_phy(encoder); u8 ddc_pin; switch (phy) { @@ -2767,11 +2782,12 @@ static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) return ddc_pin; } -static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) +static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder) { - enum phy phy = intel_port_to_phy(dev_priv, port); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum phy phy = intel_encoder_to_phy(encoder); - WARN_ON(port == PORT_C); + WARN_ON(encoder->port == PORT_C); /* * Pin mapping for RKL depends on which PCH is present. With TGP, the @@ -2785,11 +2801,12 @@ static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) return GMBUS_PIN_1_BXT + phy; } -static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port port) +static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder) { - enum phy phy = intel_port_to_phy(i915, port); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_encoder_to_phy(encoder); - drm_WARN_ON(&i915->drm, port == PORT_A); + drm_WARN_ON(&i915->drm, encoder->port == PORT_A); /* * Pin mapping for GEN9 BC depends on which PCH is present. With TGP, @@ -2803,16 +2820,16 @@ static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port po return GMBUS_PIN_1_BXT + phy; } -static u8 dg1_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) +static u8 dg1_encoder_to_ddc_pin(struct intel_encoder *encoder) { - return intel_port_to_phy(dev_priv, port) + 1; + return intel_encoder_to_phy(encoder) + 1; } -static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) +static u8 adls_encoder_to_ddc_pin(struct intel_encoder *encoder) { - enum phy phy = intel_port_to_phy(dev_priv, port); + enum phy phy = intel_encoder_to_phy(encoder); - WARN_ON(port == PORT_B || port == PORT_C); + WARN_ON(encoder->port == PORT_B || encoder->port == PORT_C); /* * Pin mapping for ADL-S requires TC pins for all combo phy outputs @@ -2824,9 +2841,9 @@ static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port return GMBUS_PIN_9_TC1_ICP + phy - PHY_B; } -static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv, - enum port port) +static u8 g4x_encoder_to_ddc_pin(struct intel_encoder *encoder) { + enum port port = encoder->port; u8 ddc_pin; switch (port) { @@ -2850,30 +2867,29 @@ static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv, static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; u8 ddc_pin; if (IS_ALDERLAKE_S(dev_priv)) - ddc_pin = adls_port_to_ddc_pin(dev_priv, port); + ddc_pin = adls_encoder_to_ddc_pin(encoder); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) - ddc_pin = dg1_port_to_ddc_pin(dev_priv, port); + ddc_pin = dg1_encoder_to_ddc_pin(encoder); else if (IS_ROCKETLAKE(dev_priv)) - ddc_pin = rkl_port_to_ddc_pin(dev_priv, port); + ddc_pin = rkl_encoder_to_ddc_pin(encoder); else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv)) - ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port); + ddc_pin = gen9bc_tgp_encoder_to_ddc_pin(encoder); else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && HAS_PCH_TGP(dev_priv)) - ddc_pin = mcc_port_to_ddc_pin(dev_priv, port); + ddc_pin = mcc_encoder_to_ddc_pin(encoder); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - ddc_pin = icl_port_to_ddc_pin(dev_priv, port); + ddc_pin = icl_encoder_to_ddc_pin(encoder); else if (HAS_PCH_CNP(dev_priv)) - ddc_pin = cnp_port_to_ddc_pin(dev_priv, port); + ddc_pin = cnp_encoder_to_ddc_pin(encoder); else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - ddc_pin = bxt_port_to_ddc_pin(dev_priv, port); + ddc_pin = bxt_encoder_to_ddc_pin(encoder); else if (IS_CHERRYVIEW(dev_priv)) - ddc_pin = chv_port_to_ddc_pin(dev_priv, port); + ddc_pin = chv_encoder_to_ddc_pin(encoder); else - ddc_pin = g4x_port_to_ddc_pin(dev_priv, port); + ddc_pin = g4x_encoder_to_ddc_pin(encoder); return ddc_pin; } diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c index 76076509f7..d270bb7b94 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -1444,7 +1444,7 @@ void intel_hpd_enable_detection(struct intel_encoder *encoder) void intel_hpd_irq_setup(struct drm_i915_private *i915) { - if (i915->display_irqs_enabled && i915->display.funcs.hotplug) + if (i915->display.irq.display_irqs_enabled && i915->display.funcs.hotplug) i915->display.funcs.hotplug->hpd_irq_setup(i915); } diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c index 5863763de5..93e6cac9a4 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c @@ -72,7 +72,7 @@ #include "i915_drv.h" #include "i915_irq.h" -#include "i915_reg.h" +#include "intel_audio_regs.h" #include "intel_de.h" #include "intel_lpe_audio.h" #include "intel_pci_config.h" diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 221f5c6c87..8b89590734 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -392,16 +392,13 @@ intel_lvds_mode_valid(struct drm_connector *_connector, struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct drm_display_mode *fixed_mode = intel_panel_fixed_mode(connector, mode); - int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq; + int max_pixclk = to_i915(connector->base.dev)->display.cdclk.max_dotclk_freq; enum drm_mode_status status; status = intel_cpu_transcoder_mode_valid(i915, mode); if (status != MODE_OK) return status; - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) - return MODE_NO_DBLESCAN; - status = intel_panel_mode_valid(connector, mode); if (status != MODE_OK) return status; diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index fcbb083318..68bd5101ec 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -27,7 +27,6 @@ #include #include -#include #include #include @@ -263,7 +262,6 @@ struct intel_opregion { struct opregion_asle *asle; struct opregion_asle_ext *asle_ext; void *rvda; - void *vbt_firmware; const void *vbt; u32 vbt_size; struct work_struct asle_work; @@ -869,46 +867,6 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = { { } }; -static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) -{ - struct intel_opregion *opregion = dev_priv->display.opregion; - const struct firmware *fw = NULL; - const char *name = dev_priv->display.params.vbt_firmware; - int ret; - - if (!name || !*name) - return -ENOENT; - - ret = request_firmware(&fw, name, dev_priv->drm.dev); - if (ret) { - drm_err(&dev_priv->drm, - "Requesting VBT firmware \"%s\" failed (%d)\n", - name, ret); - return ret; - } - - if (intel_bios_is_valid_vbt(dev_priv, fw->data, fw->size)) { - opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL); - if (opregion->vbt_firmware) { - drm_dbg_kms(&dev_priv->drm, - "Found valid VBT firmware \"%s\"\n", name); - opregion->vbt = opregion->vbt_firmware; - opregion->vbt_size = fw->size; - ret = 0; - } else { - ret = -ENOMEM; - } - } else { - drm_dbg_kms(&dev_priv->drm, "Invalid VBT firmware \"%s\"\n", - name); - ret = -EINVAL; - } - - release_firmware(fw); - - return ret; -} - int intel_opregion_setup(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion; @@ -1006,9 +964,6 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) drm_dbg(&dev_priv->drm, "Mailbox #2 for backlight present\n"); } - if (intel_load_vbt_firmware(dev_priv) == 0) - goto out; - if (dmi_check_system(intel_no_opregion_vbt)) goto out; @@ -1176,6 +1131,16 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con return drm_edid; } +bool intel_opregion_vbt_present(struct drm_i915_private *i915) +{ + struct intel_opregion *opregion = i915->display.opregion; + + if (!opregion || !opregion->vbt) + return false; + + return true; +} + const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size) { struct intel_opregion *opregion = i915->display.opregion; @@ -1186,7 +1151,7 @@ const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size) if (size) *size = opregion->vbt_size; - return opregion->vbt; + return kmemdup(opregion->vbt, opregion->vbt_size, GFP_KERNEL); } bool intel_opregion_headless_sku(struct drm_i915_private *i915) @@ -1312,7 +1277,6 @@ void intel_opregion_cleanup(struct drm_i915_private *i915) memunmap(opregion->header); if (opregion->rvda) memunmap(opregion->rvda); - kfree(opregion->vbt_firmware); kfree(opregion); i915->display.opregion = NULL; } diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h index 0bec224f71..4b2b8e7526 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.h +++ b/drivers/gpu/drm/i915/display/intel_opregion.h @@ -53,6 +53,7 @@ int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector); +bool intel_opregion_vbt_present(struct drm_i915_private *i915); const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size); bool intel_opregion_headless_sku(struct drm_i915_private *i915); @@ -119,6 +120,11 @@ intel_opregion_get_edid(struct intel_connector *connector) return NULL; } +static inline bool intel_opregion_vbt_present(struct drm_i915_private *i915) +{ + return false; +} + static inline const void * intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size) { diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 2b1392d5a9..1c2099ed55 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -972,10 +972,11 @@ static int check_overlay_dst(struct intel_overlay *overlay, rec->dst_width, rec->dst_height); clipped = req; - drm_rect_intersect(&clipped, &crtc_state->pipe_src); - if (!drm_rect_visible(&clipped) || - !drm_rect_equals(&clipped, &req)) + if (!drm_rect_intersect(&clipped, &crtc_state->pipe_src)) + return -EINVAL; + + if (!drm_rect_equals(&clipped, &req)) return -EINVAL; return 0; diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 073ea3166c..6f4ff6a89c 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -47,10 +47,12 @@ bool intel_panel_use_ssc(struct drm_i915_private *i915) { - if (i915->display.params.panel_use_ssc >= 0) - return i915->display.params.panel_use_ssc != 0; - return i915->display.vbt.lvds_use_ssc && - !intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE); + struct intel_display *display = &i915->display; + + if (display->params.panel_use_ssc >= 0) + return display->params.panel_use_ssc != 0; + return display->vbt.lvds_use_ssc && + !intel_has_quirk(display, QUIRK_LVDS_SSC_DISABLE); } const struct drm_display_mode * diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index baf679759e..826e38a9e6 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -474,7 +474,7 @@ static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); /* read out port_clock from the DPLL */ - i9xx_crtc_clock_get(crtc, crtc_state); + i9xx_crtc_clock_get(crtc_state); /* * In case there is an active pipe without active ports, @@ -529,7 +529,7 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state) &crtc_state->dpll_hw_state); drm_WARN_ON(&dev_priv->drm, !pll_active); - tmp = crtc_state->dpll_hw_state.dpll; + tmp = crtc_state->dpll_hw_state.i9xx.dpll; crtc_state->pixel_multiplier = ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c index 744e332fa2..9ca981b7a1 100644 --- a/drivers/gpu/drm/i915/display/intel_pmdemand.c +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c @@ -119,10 +119,11 @@ intel_pmdemand_update_phys_mask(struct drm_i915_private *i915, if (!encoder) return; - phy = intel_port_to_phy(i915, encoder->port); - if (intel_phy_is_tc(i915, phy)) + if (intel_encoder_is_tc(encoder)) return; + phy = intel_encoder_to_phy(encoder); + if (set_bit) pmdemand_state->active_combo_phys_mask |= BIT(phy); else @@ -222,14 +223,7 @@ static bool intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915, struct intel_encoder *encoder) { - enum phy phy; - - if (!encoder) - return false; - - phy = intel_port_to_phy(i915, encoder->port); - - return intel_phy_is_tc(i915, phy); + return encoder && intel_encoder_is_tc(encoder); } static bool diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h b/drivers/gpu/drm/i915/display/intel_pmdemand.h index 2941a1a18b..128fd61f8f 100644 --- a/drivers/gpu/drm/i915/display/intel_pmdemand.h +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h @@ -43,9 +43,8 @@ struct intel_pmdemand_state { struct pmdemand_params params; }; -#define to_intel_pmdemand_state(x) container_of((x), \ - struct intel_pmdemand_state, \ - base) +#define to_intel_pmdemand_state(global_state) \ + container_of_const((global_state), struct intel_pmdemand_state, base) void intel_pmdemand_init_early(struct drm_i915_private *i915); int intel_pmdemand_init(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index 2d65a538f8..0ccbf9a859 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -605,8 +605,7 @@ static void wait_panel_status(struct intel_dp *intel_dp, intel_de_read(dev_priv, pp_stat_reg), intel_de_read(dev_priv, pp_ctrl_reg)); - if (intel_de_wait_for_register(dev_priv, pp_stat_reg, - mask, value, 5000)) + if (intel_de_wait(dev_priv, pp_stat_reg, mask, value, 5000)) drm_err(&dev_priv->drm, "[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", dig_port->base.base.base.id, dig_port->base.base.name, @@ -1351,7 +1350,7 @@ static void pps_init_delays_bios(struct intel_dp *intel_dp, static void pps_init_delays_vbt(struct intel_dp *intel_dp, struct edp_power_seq *vbt) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_display *display = to_intel_display(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; *vbt = connector->panel.vbt.edp.pps; @@ -1364,9 +1363,9 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp, * just fails to power back on. Increasing the delay to 800ms * seems sufficient to avoid this problem. */ - if (intel_has_quirk(dev_priv, QUIRK_INCREASE_T12_DELAY)) { + if (intel_has_quirk(display, QUIRK_INCREASE_T12_DELAY)) { vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Increasing T12 panel delay as per the quirk to %d\n", vbt->t11_t12); } @@ -1671,6 +1670,37 @@ void intel_pps_setup(struct drm_i915_private *i915) i915->display.pps.mmio_base = PPS_BASE; } +static int intel_pps_show(struct seq_file *m, void *data) +{ + struct intel_connector *connector = m->private; + struct intel_dp *intel_dp = intel_attached_dp(connector); + + if (connector->base.status != connector_status_connected) + return -ENODEV; + + seq_printf(m, "Panel power up delay: %d\n", + intel_dp->pps.panel_power_up_delay); + seq_printf(m, "Panel power down delay: %d\n", + intel_dp->pps.panel_power_down_delay); + seq_printf(m, "Backlight on delay: %d\n", + intel_dp->pps.backlight_on_delay); + seq_printf(m, "Backlight off delay: %d\n", + intel_dp->pps.backlight_off_delay); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(intel_pps); + +void intel_pps_connector_debugfs_add(struct intel_connector *connector) +{ + struct dentry *root = connector->base.debugfs_entry; + int connector_type = connector->base.connector_type; + + if (connector_type == DRM_MODE_CONNECTOR_eDP) + debugfs_create_file("i915_panel_timings", 0444, root, + connector, &intel_pps_fops); +} + void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) { i915_reg_t pp_reg; diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h index a2c2467e3c..07ef96ca8d 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.h +++ b/drivers/gpu/drm/i915/display/intel_pps.h @@ -51,6 +51,8 @@ void vlv_pps_init(struct intel_encoder *encoder, void intel_pps_unlock_regs_wa(struct drm_i915_private *i915); void intel_pps_setup(struct drm_i915_private *i915); +void intel_pps_connector_debugfs_add(struct intel_connector *connector); + void assert_pps_unlocked(struct drm_i915_private *i915, enum pipe pipe); #endif /* __INTEL_PPS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index aabd018bd7..3c7da86222 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -171,14 +171,27 @@ * * The rest of the bits are more self-explanatory and/or * irrelevant for normal operation. + * + * Description of intel_crtc_state variables. has_psr, has_panel_replay and + * has_sel_update: + * + * has_psr (alone): PSR1 + * has_psr + has_sel_update: PSR2 + * has_psr + has_panel_replay: Panel Replay + * has_psr + has_panel_replay + has_sel_update: Panel Replay Selective Update + * + * Description of some intel_psr varibles. enabled, panel_replay_enabled, + * sel_update_enabled + * + * enabled (alone): PSR1 + * enabled + sel_update_enabled: PSR2 + * enabled + panel_replay_enabled: Panel Replay + * enabled + panel_replay_enabled + sel_update_enabled: Panel Replay SU */ #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \ (intel_dp)->psr.source_support) -#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \ - (intel_dp)->psr.source_panel_replay_support) - bool intel_encoder_can_psr(struct intel_encoder *encoder) { if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST) @@ -330,6 +343,9 @@ static void psr_irq_control(struct intel_dp *intel_dp) enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 mask; + if (intel_dp->psr.panel_replay_enabled) + return; + mask = psr_irq_psr_error_bit_get(intel_dp); if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ) mask |= psr_irq_post_exit_bit_get(intel_dp) | @@ -619,40 +635,59 @@ static bool psr2_su_region_et_valid(struct intel_dp *intel_dp) return false; } -static void intel_psr_enable_sink(struct intel_dp *intel_dp) +static unsigned int intel_psr_get_enable_sink_offset(struct intel_dp *intel_dp) +{ + return intel_dp->psr.panel_replay_enabled ? + PANEL_REPLAY_CONFIG : DP_PSR_EN_CFG; +} + +/* + * Note: Most of the bits are same in PANEL_REPLAY_CONFIG and DP_PSR_EN_CFG. We + * are relying on PSR definitions on these "common" bits. + */ +void intel_psr_enable_sink(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 dpcd_val = DP_PSR_ENABLE; - if (intel_dp->psr.panel_replay_enabled) - return; - - if (intel_dp->psr.psr2_enabled) { + if (crtc_state->has_sel_update) { /* Enable ALPM at sink for psr2 */ - drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, - DP_ALPM_ENABLE | - DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE); + if (!crtc_state->has_panel_replay) { + drm_dp_dpcd_writeb(&intel_dp->aux, + DP_RECEIVER_ALPM_CONFIG, + DP_ALPM_ENABLE | + DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE); + + if (crtc_state->enable_psr2_su_region_et) + dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET; + } dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; - if (psr2_su_region_et_valid(intel_dp)) - dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET; } else { if (intel_dp->psr.link_standby) dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; - if (DISPLAY_VER(dev_priv) >= 8) + if (!crtc_state->has_panel_replay && DISPLAY_VER(dev_priv) >= 8) dpcd_val |= DP_PSR_CRC_VERIFICATION; } - if (intel_dp->psr.req_psr2_sdp_prior_scanline) + if (crtc_state->has_panel_replay) + dpcd_val |= DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN | + DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN; + + if (crtc_state->req_psr2_sdp_prior_scanline) dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE; if (intel_dp->psr.entry_setup_frames > 0) dpcd_val |= DP_PSR_FRAME_CAPTURE; - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); + drm_dp_dpcd_writeb(&intel_dp->aux, + intel_psr_get_enable_sink_offset(intel_dp), + dpcd_val); - drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); + if (intel_dp_is_edp(intel_dp)) + drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); } static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) @@ -1126,6 +1161,141 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d return true; } +/* + * See Bspec: 71632 for the table + * + * Silence_period = tSilence,Min + ((tSilence,Max - tSilence,Min) / 2) + * + * Half cycle duration: + * + * Link rates 1.62 - 4.32 and tLFPS_Cycle = 70 ns + * FLOOR( (Link Rate * tLFPS_Cycle) / (2 * 10) ) + * + * Link rates 5.4 - 8.1 + * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ] = 10 + * LFPS Period chosen is the mid-point of the min:max values from the table + * FLOOR( LFPS Period in Symbol clocks / + * (2 * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ]) ) + */ +static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate, + int *silence_period, + int *lfps_half_cycle) +{ + switch (link_rate) { + case 162000: + *silence_period = 20; + *lfps_half_cycle = 5; + break; + case 216000: + *silence_period = 27; + *lfps_half_cycle = 7; + break; + case 243000: + *silence_period = 31; + *lfps_half_cycle = 8; + break; + case 270000: + *silence_period = 34; + *lfps_half_cycle = 9; + break; + case 324000: + *silence_period = 41; + *lfps_half_cycle = 11; + break; + case 432000: + *silence_period = 56; + *lfps_half_cycle = 15; + break; + case 540000: + *silence_period = 69; + *lfps_half_cycle = 12; + break; + case 648000: + *silence_period = 84; + *lfps_half_cycle = 15; + break; + case 675000: + *silence_period = 87; + *lfps_half_cycle = 15; + break; + case 810000: + *silence_period = 104; + *lfps_half_cycle = 19; + break; + default: + *silence_period = *lfps_half_cycle = -1; + return false; + } + return true; +} + +/* + * AUX-Less Wake Time = CEILING( ((PHY P2 to P0) + tLFPS_Period, Max+ + * tSilence, Max+ tPHY Establishment + tCDS) / tline) + * For the "PHY P2 to P0" latency see the PHY Power Control page + * (PHY P2 to P0) : https://gfxspecs.intel.com/Predator/Home/Index/68965 + * : 12 us + * The tLFPS_Period, Max term is 800ns + * The tSilence, Max term is 180ns + * The tPHY Establishment (a.k.a. t1) term is 50us + * The tCDS term is 1 or 2 times t2 + * t2 = Number ML_PHY_LOCK * tML_PHY_LOCK + * Number ML_PHY_LOCK = ( 7 + CEILING( 6.5us / tML_PHY_LOCK ) + 1) + * Rounding up the 6.5us padding to the next ML_PHY_LOCK boundary and + * adding the "+ 1" term ensures all ML_PHY_LOCK sequences that start + * within the CDS period complete within the CDS period regardless of + * entry into the period + * tML_PHY_LOCK = TPS4 Length * ( 10 / (Link Rate in MHz) ) + * TPS4 Length = 252 Symbols + */ +static int _lnl_compute_aux_less_wake_time(int port_clock) +{ + int tphy2_p2_to_p0 = 12 * 1000; + int tlfps_period_max = 800; + int tsilence_max = 180; + int t1 = 50 * 1000; + int tps4 = 252; + int tml_phy_lock = 1000 * 1000 * tps4 * 10 / port_clock; + int num_ml_phy_lock = 7 + DIV_ROUND_UP(6500, tml_phy_lock) + 1; + int t2 = num_ml_phy_lock * tml_phy_lock; + int tcds = 1 * t2; + + return DIV_ROUND_UP(tphy2_p2_to_p0 + tlfps_period_max + tsilence_max + + t1 + tcds, 1000); +} + +static int _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + int aux_less_wake_time, aux_less_wake_lines, silence_period, + lfps_half_cycle; + + aux_less_wake_time = + _lnl_compute_aux_less_wake_time(crtc_state->port_clock); + aux_less_wake_lines = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, + aux_less_wake_time); + + if (!_lnl_get_silence_period_and_lfps_half_cycle(crtc_state->port_clock, + &silence_period, + &lfps_half_cycle)) + return false; + + if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK || + silence_period > PORT_ALPM_CTL_SILENCE_PERIOD_MASK || + lfps_half_cycle > PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK) + return false; + + if (i915->display.params.psr_safest_params) + aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK; + + intel_dp->psr.alpm_parameters.fast_wake_lines = aux_less_wake_lines; + intel_dp->psr.alpm_parameters.silence_period_sym_clocks = silence_period; + intel_dp->psr.alpm_parameters.lfps_half_cycle_num_of_syms = lfps_half_cycle; + + return true; +} + static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { @@ -1142,6 +1312,9 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp, if (check_entry_lines > 15) return false; + if (!_lnl_compute_aux_less_alpm_params(intel_dp, crtc_state)) + return false; + if (i915->display.params.psr_safest_params) check_entry_lines = 15; @@ -1150,28 +1323,52 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp, return true; } +/* + * IO wake time for DISPLAY_VER < 12 is not directly mentioned in Bspec. There + * are 50 us io wake time and 32 us fast wake time. Clearly preharge pulses are + * not (improperly) included in 32 us fast wake time. 50 us - 32 us = 18 us. + */ +static int skl_io_buffer_wake_time(void) +{ + return 18; +} + +static int tgl_io_buffer_wake_time(void) +{ + return 10; +} + +static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + + if (DISPLAY_VER(i915) >= 12) + return tgl_io_buffer_wake_time(); + else + return skl_io_buffer_wake_time(); +} + static bool _compute_alpm_params(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time; + int tfw_exit_latency = 20; /* eDP spec */ + int phy_wake = 4; /* eDP spec */ + int preamble = 8; /* eDP spec */ + int precharge = intel_dp_aux_fw_sync_len() - preamble; u8 max_wake_lines; - if (DISPLAY_VER(i915) >= 12) { - io_wake_time = 42; - /* - * According to Bspec it's 42us, but based on testing - * it is not enough -> use 45 us. - */ - fast_wake_time = 45; + io_wake_time = max(precharge, io_buffer_wake_time(crtc_state)) + + preamble + phy_wake + tfw_exit_latency; + fast_wake_time = precharge + preamble + phy_wake + + tfw_exit_latency; + if (DISPLAY_VER(i915) >= 12) /* TODO: Check how we can use ALPM_CTL fast wake extended field */ max_wake_lines = 12; - } else { - io_wake_time = 50; - fast_wake_time = 32; + else max_wake_lines = 8; - } io_wake_lines = intel_usecs_to_scanlines( &crtc_state->hw.adjusted_mode, io_wake_time); @@ -1435,13 +1632,14 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, if (CAN_PANEL_REPLAY(intel_dp)) crtc_state->has_panel_replay = true; - else - crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state); - if (!(crtc_state->has_panel_replay || crtc_state->has_psr)) + crtc_state->has_psr = crtc_state->has_panel_replay ? true : + _psr_compute_config(intel_dp, crtc_state); + + if (!crtc_state->has_psr) return; - crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); + crtc_state->has_sel_update = intel_psr2_config_valid(intel_dp, crtc_state); } void intel_psr_get_config(struct intel_encoder *encoder, @@ -1465,7 +1663,7 @@ void intel_psr_get_config(struct intel_encoder *encoder, goto unlock; if (intel_dp->psr.panel_replay_enabled) { - pipe_config->has_panel_replay = true; + pipe_config->has_psr = pipe_config->has_panel_replay = true; } else { /* * Not possible to read EDP_PSR/PSR2_CTL registers as it is @@ -1474,7 +1672,7 @@ void intel_psr_get_config(struct intel_encoder *encoder, pipe_config->has_psr = true; } - pipe_config->has_psr2 = intel_dp->psr.psr2_enabled; + pipe_config->has_sel_update = intel_dp->psr.psr2_enabled; pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); if (!intel_dp->psr.psr2_enabled) @@ -1570,14 +1768,44 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; struct intel_psr *psr = &intel_dp->psr; + u32 alpm_ctl; - if (DISPLAY_VER(dev_priv) < 20) + if (DISPLAY_VER(dev_priv) < 20 || (!intel_dp->psr.psr2_enabled && + !intel_dp_is_edp(intel_dp))) return; - intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder), - ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE | - ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines) | - ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines)); + /* + * Panel Replay on eDP is always using ALPM aux less. I.e. no need to + * check panel support at this point. + */ + if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) { + alpm_ctl = ALPM_CTL_ALPM_ENABLE | + ALPM_CTL_ALPM_AUX_LESS_ENABLE | + ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS; + + intel_de_write(dev_priv, PORT_ALPM_CTL(cpu_transcoder), + PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE | + PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) | + PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) | + PORT_ALPM_CTL_SILENCE_PERIOD( + psr->alpm_parameters.silence_period_sym_clocks)); + + intel_de_write(dev_priv, PORT_ALPM_LFPS_CTL(cpu_transcoder), + PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) | + PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION( + psr->alpm_parameters.lfps_half_cycle_num_of_syms) | + PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION( + psr->alpm_parameters.lfps_half_cycle_num_of_syms) | + PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION( + psr->alpm_parameters.lfps_half_cycle_num_of_syms)); + } else { + alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE | + ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines); + } + + alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines); + + intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder), alpm_ctl); } static void intel_psr_enable_source(struct intel_dp *intel_dp, @@ -1585,7 +1813,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; - u32 mask; + u32 mask = 0; /* * Only HSW and BDW have PSR AUX registers that need to be setup. @@ -1599,34 +1827,46 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, * mask LPSP to avoid dependency on other drivers that might block * runtime_pm besides preventing other hw tracking issues now we * can rely on frontbuffer tracking. + * + * From bspec prior LunarLake: + * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in + * panel replay mode. + * + * From bspec beyod LunarLake: + * Panel Replay on DP: No bits are applicable + * Panel Replay on eDP: All bits are applicable */ - mask = EDP_PSR_DEBUG_MASK_MEMUP | - EDP_PSR_DEBUG_MASK_HPD; + if (DISPLAY_VER(dev_priv) < 20 || intel_dp_is_edp(intel_dp)) + mask = EDP_PSR_DEBUG_MASK_HPD; - /* - * For some unknown reason on HSW non-ULT (or at least on - * Dell Latitude E6540) external displays start to flicker - * when PSR is enabled on the eDP. SR/PC6 residency is much - * higher than should be possible with an external display. - * As a workaround leave LPSP unmasked to prevent PSR entry - * when external displays are active. - */ - if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv)) - mask |= EDP_PSR_DEBUG_MASK_LPSP; + if (intel_dp_is_edp(intel_dp)) { + mask |= EDP_PSR_DEBUG_MASK_MEMUP; - if (DISPLAY_VER(dev_priv) < 20) - mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP; + /* + * For some unknown reason on HSW non-ULT (or at least on + * Dell Latitude E6540) external displays start to flicker + * when PSR is enabled on the eDP. SR/PC6 residency is much + * higher than should be possible with an external display. + * As a workaround leave LPSP unmasked to prevent PSR entry + * when external displays are active. + */ + if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv)) + mask |= EDP_PSR_DEBUG_MASK_LPSP; - /* - * No separate pipe reg write mask on hsw/bdw, so have to unmask all - * registers in order to keep the CURSURFLIVE tricks working :( - */ - if (IS_DISPLAY_VER(dev_priv, 9, 10)) - mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; + if (DISPLAY_VER(dev_priv) < 20) + mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP; - /* allow PSR with sprite enabled */ - if (IS_HASWELL(dev_priv)) - mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE; + /* + * No separate pipe reg write mask on hsw/bdw, so have to unmask all + * registers in order to keep the CURSURFLIVE tricks working :( + */ + if (IS_DISPLAY_VER(dev_priv, 9, 10)) + mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; + + /* allow PSR with sprite enabled */ + if (IS_HASWELL(dev_priv)) + mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE; + } intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask); @@ -1645,7 +1885,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, intel_dp->psr.psr2_sel_fetch_enabled ? IGNORE_PSR2_HW_TRACKING : 0); - lnl_alpm_configure(intel_dp); + if (intel_dp_is_edp(intel_dp)) + lnl_alpm_configure(intel_dp); /* * Wa_16013835468 @@ -1686,6 +1927,9 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp) enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 val; + if (intel_dp->psr.panel_replay_enabled) + goto no_err; + /* * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR * will still keep the error set even after the reset done in the @@ -1703,6 +1947,7 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp) return false; } +no_err: return true; } @@ -1711,12 +1956,11 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); u32 val; drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled); - intel_dp->psr.psr2_enabled = crtc_state->has_psr2; + intel_dp->psr.psr2_enabled = crtc_state->has_sel_update; intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay; intel_dp->psr.busy_frontbuffer_bits = 0; intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; @@ -1733,14 +1977,22 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, if (!psr_interrupt_error_check(intel_dp)) return; - if (intel_dp->psr.panel_replay_enabled) + if (intel_dp->psr.panel_replay_enabled) { drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n"); - else + } else { drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n", intel_dp->psr.psr2_enabled ? "2" : "1"); - intel_snps_phy_update_psr_power_state(dev_priv, phy, true); - intel_psr_enable_sink(intel_dp); + /* + * Panel replay has to be enabled before link training: doing it + * only for PSR here. + */ + intel_psr_enable_sink(intel_dp, crtc_state); + } + + if (intel_dp_is_edp(intel_dp)) + intel_snps_phy_update_psr_power_state(&dig_port->base, true); + intel_psr_enable_source(intel_dp, crtc_state); intel_dp->psr.enabled = true; intel_dp->psr.paused = false; @@ -1810,8 +2062,6 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; - enum phy phy = intel_port_to_phy(dev_priv, - dp_to_dig_port(intel_dp)->base.port); lockdep_assert_held(&intel_dp->psr.lock); @@ -1846,12 +2096,25 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0); } - intel_snps_phy_update_psr_power_state(dev_priv, phy, false); + if (intel_dp_is_edp(intel_dp)) + intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false); + + /* Panel Replay on eDP is always using ALPM aux less. */ + if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) { + intel_de_rmw(dev_priv, ALPM_CTL(cpu_transcoder), + ALPM_CTL_ALPM_ENABLE | + ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0); + + intel_de_rmw(dev_priv, PORT_ALPM_CTL(cpu_transcoder), + PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0); + } /* Disable PSR on Sink */ - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); + drm_dp_dpcd_writeb(&intel_dp->aux, + intel_psr_get_enable_sink_offset(intel_dp), 0); - if (intel_dp->psr.psr2_enabled) + if (!intel_dp->psr.panel_replay_enabled && + intel_dp->psr.psr2_enabled) drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0); intel_dp->psr.enabled = false; @@ -1899,7 +2162,7 @@ void intel_psr_pause(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_psr *psr = &intel_dp->psr; - if (!CAN_PSR(intel_dp)) + if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) return; mutex_lock(&psr->lock); @@ -1932,7 +2195,7 @@ void intel_psr_resume(struct intel_dp *intel_dp) { struct intel_psr *psr = &intel_dp->psr; - if (!CAN_PSR(intel_dp)) + if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) return; mutex_lock(&psr->lock); @@ -2069,14 +2332,19 @@ exit: crtc_state->psr2_man_track_ctl = val; } -static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state, - bool full_update) +static u32 +psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state, + bool full_update, bool cursor_in_su_area) { int width, height; if (!crtc_state->enable_psr2_su_region_et || full_update) return 0; + if (!cursor_in_su_area) + return PIPESRC_WIDTH(0) | + PIPESRC_HEIGHT(drm_rect_height(&crtc_state->pipe_src)); + width = drm_rect_width(&crtc_state->psr2_su_area); height = drm_rect_height(&crtc_state->psr2_su_area); @@ -2128,7 +2396,8 @@ static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_st */ static void intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state, - struct intel_crtc *crtc) + struct intel_crtc *crtc, + bool *cursor_in_su_area) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_plane_state *new_plane_state; @@ -2156,6 +2425,7 @@ intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state, clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst, &crtc_state->pipe_src); + *cursor_in_su_area = true; } } @@ -2201,7 +2471,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_plane_state *new_plane_state, *old_plane_state; struct intel_plane *plane; - bool full_update = false; + bool full_update = false, cursor_in_su_area = false; int i, ret; if (!crtc_state->enable_psr2_sel_fetch) @@ -2214,7 +2484,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, crtc_state->psr2_su_area.x1 = 0; crtc_state->psr2_su_area.y1 = -1; - crtc_state->psr2_su_area.x2 = INT_MAX; + crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src); crtc_state->psr2_su_area.y2 = -1; /* @@ -2318,7 +2588,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, * drm_atomic_add_affected_planes to ensure visible cursor is added into * affected planes even when cursor is not updated by itself. */ - intel_psr2_sel_fetch_et_alignment(state, crtc); + intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area); intel_psr2_sel_fetch_pipe_alignment(crtc_state); @@ -2382,7 +2652,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, skip_sel_fetch_set_loop: psr2_man_trk_ctl_calc(crtc_state, full_update); crtc_state->pipe_srcsz_early_tpt = - psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update); + psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update, + cursor_in_su_area); return 0; } @@ -2417,7 +2688,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state); needs_to_disable |= !new_crtc_state->has_psr; needs_to_disable |= !new_crtc_state->active_planes; - needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled; + needs_to_disable |= new_crtc_state->has_sel_update != psr->psr2_enabled; needs_to_disable |= DISPLAY_VER(i915) < 11 && new_crtc_state->wm_level_disabled; @@ -2439,7 +2710,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder; - if (!(crtc_state->has_psr || crtc_state->has_panel_replay)) + if (!crtc_state->has_psr) return; for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, @@ -3039,6 +3310,13 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp) } } +/* + * On common bits: + * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR + * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR + * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR + * this function is relying on PSR definitions + */ void intel_psr_short_pulse(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -3048,7 +3326,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | DP_PSR_LINK_CRC_ERROR; - if (!CAN_PSR(intel_dp)) + if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) return; mutex_lock(&psr->lock); @@ -3062,12 +3340,14 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) goto exit; } - if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) { + if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) || + (error_status & errors)) { intel_psr_disable_locked(intel_dp); psr->sink_not_reliable = true; } - if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status) + if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR && + !error_status) drm_dbg_kms(&dev_priv->drm, "PSR sink internal error, disabling PSR\n"); if (error_status & DP_PSR_RFB_STORAGE_ERROR) @@ -3087,8 +3367,10 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) /* clear status register */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status); - psr_alpm_check(intel_dp); - psr_capability_changed_check(intel_dp); + if (!psr->panel_replay_enabled) { + psr_alpm_check(intel_dp); + psr_capability_changed_check(intel_dp); + } exit: mutex_unlock(&psr->lock); @@ -3412,16 +3694,9 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data) "reserved", "sink internal error", }; - static const char * const panel_replay_status[] = { - "Sink device frame is locked to the Source device", - "Sink device is coasting, using the VTotal target", - "Sink device is governing the frame rate (frame rate unlock is granted)", - "Sink device in the process of re-locking with the Source device", - }; const char *str; int ret; u8 status, error_status; - u32 idx; if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) { seq_puts(m, "PSR/Panel-Replay Unsupported\n"); @@ -3435,16 +3710,11 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data) if (ret) return ret; - str = "unknown"; - if (intel_dp->psr.panel_replay_enabled) { - idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT; - if (idx < ARRAY_SIZE(panel_replay_status)) - str = panel_replay_status[idx]; - } else if (intel_dp->psr.enabled) { - idx = status & DP_PSR_SINK_STATE_MASK; - if (idx < ARRAY_SIZE(sink_status)) - str = sink_status[idx]; - } + status &= DP_PSR_SINK_STATE_MASK; + if (status < ARRAY_SIZE(sink_status)) + str = sink_status[status]; + else + str = "unknown"; seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str); diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index cde781df84..d483c85870 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -21,8 +21,13 @@ struct intel_encoder; struct intel_plane; struct intel_plane_state; +#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \ + (intel_dp)->psr.source_panel_replay_support) + bool intel_encoder_can_psr(struct intel_encoder *encoder); void intel_psr_init_dpcd(struct intel_dp *intel_dp); +void intel_psr_enable_sink(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); void intel_psr_pre_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_psr_post_plane_update(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h index 8427a736f6..ebc2299957 100644 --- a/drivers/gpu/drm/i915/display/intel_psr_regs.h +++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h @@ -9,7 +9,7 @@ #include "intel_display_reg_defs.h" #include "intel_dp_aux_regs.h" -#define TRANS_EXITLINE(trans) _MMIO_TRANS2((trans), _TRANS_EXITLINE_A) +#define TRANS_EXITLINE(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_EXITLINE_A) #define EXITLINE_ENABLE REG_BIT(31) #define EXITLINE_MASK REG_GENMASK(12, 0) #define EXITLINE_SHIFT 0 @@ -23,7 +23,7 @@ #define HSW_SRD_CTL _MMIO(0x64800) #define _SRD_CTL_A 0x60800 #define _SRD_CTL_EDP 0x6f800 -#define EDP_PSR_CTL(tran) _MMIO_TRANS2(tran, _SRD_CTL_A) +#define EDP_PSR_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_CTL_A) #define EDP_PSR_ENABLE REG_BIT(31) #define BDW_PSR_SINGLE_FRAME REG_BIT(30) #define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK REG_BIT(29) /* SW can't modify */ @@ -66,8 +66,8 @@ #define EDP_PSR_IIR _MMIO(0x64838) #define _PSR_IMR_A 0x60814 #define _PSR_IIR_A 0x60818 -#define TRANS_PSR_IMR(tran) _MMIO_TRANS2(tran, _PSR_IMR_A) -#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A) +#define TRANS_PSR_IMR(tran) _MMIO_TRANS2(dev_priv, tran, _PSR_IMR_A) +#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(dev_priv, tran, _PSR_IIR_A) #define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \ 0 : ((trans) - TRANSCODER_A + 1) * 8) #define TGL_PSR_MASK REG_GENMASK(2, 0) @@ -86,7 +86,7 @@ #define HSW_SRD_AUX_CTL _MMIO(0x64810) #define _SRD_AUX_CTL_A 0x60810 #define _SRD_AUX_CTL_EDP 0x6f810 -#define EDP_PSR_AUX_CTL(tran) _MMIO_TRANS2(tran, _SRD_AUX_CTL_A) +#define EDP_PSR_AUX_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_AUX_CTL_A) #define EDP_PSR_AUX_CTL_TIME_OUT_MASK DP_AUX_CH_CTL_TIME_OUT_MASK #define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK DP_AUX_CH_CTL_MESSAGE_SIZE_MASK #define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK DP_AUX_CH_CTL_PRECHARGE_2US_MASK @@ -96,12 +96,12 @@ #define HSW_SRD_AUX_DATA(i) _MMIO(0x64814 + (i) * 4) /* 5 registers */ #define _SRD_AUX_DATA_A 0x60814 #define _SRD_AUX_DATA_EDP 0x6f814 -#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) * 4) /* 5 registers */ +#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(dev_priv, tran, _SRD_AUX_DATA_A + (i) * 4) /* 5 registers */ #define HSW_SRD_STATUS _MMIO(0x64840) #define _SRD_STATUS_A 0x60840 #define _SRD_STATUS_EDP 0x6f840 -#define EDP_PSR_STATUS(tran) _MMIO_TRANS2(tran, _SRD_STATUS_A) +#define EDP_PSR_STATUS(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_STATUS_A) #define EDP_PSR_STATUS_STATE_MASK REG_GENMASK(31, 29) #define EDP_PSR_STATUS_STATE_IDLE REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 0) #define EDP_PSR_STATUS_STATE_SRDONACK REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 1) @@ -126,14 +126,14 @@ #define HSW_SRD_PERF_CNT _MMIO(0x64844) #define _SRD_PERF_CNT_A 0x60844 #define _SRD_PERF_CNT_EDP 0x6f844 -#define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(tran, _SRD_PERF_CNT_A) +#define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_PERF_CNT_A) #define EDP_PSR_PERF_CNT_MASK REG_GENMASK(23, 0) /* PSR_MASK on SKL+ */ #define HSW_SRD_DEBUG _MMIO(0x64860) #define _SRD_DEBUG_A 0x60860 #define _SRD_DEBUG_EDP 0x6f860 -#define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(tran, _SRD_DEBUG_A) +#define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(dev_priv, tran, _SRD_DEBUG_A) #define EDP_PSR_DEBUG_MASK_MAX_SLEEP REG_BIT(28) #define EDP_PSR_DEBUG_MASK_LPSP REG_BIT(27) #define EDP_PSR_DEBUG_MASK_MEMUP REG_BIT(26) @@ -153,7 +153,7 @@ #define _PSR2_CTL_A 0x60900 #define _PSR2_CTL_EDP 0x6f900 -#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A) +#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PSR2_CTL_A) #define EDP_PSR2_ENABLE REG_BIT(31) #define EDP_SU_TRACK_ENABLE REG_BIT(30) /* up to adl-p */ #define TGL_EDP_PSR2_BLOCK_COUNT_MASK REG_BIT(28) @@ -195,7 +195,7 @@ #define _PSR_EVENT_TRANS_C 0x62848 #define _PSR_EVENT_TRANS_D 0x63848 #define _PSR_EVENT_TRANS_EDP 0x6f848 -#define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A) +#define PSR_EVENT(tran) _MMIO_TRANS2(dev_priv, tran, _PSR_EVENT_TRANS_A) #define PSR_EVENT_PSR2_WD_TIMER_EXPIRE REG_BIT(17) #define PSR_EVENT_PSR2_DISABLED REG_BIT(16) #define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN REG_BIT(15) @@ -215,13 +215,13 @@ #define _PSR2_STATUS_A 0x60940 #define _PSR2_STATUS_EDP 0x6f940 -#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A) +#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(dev_priv, tran, _PSR2_STATUS_A) #define EDP_PSR2_STATUS_STATE_MASK REG_GENMASK(31, 28) #define EDP_PSR2_STATUS_STATE_DEEP_SLEEP REG_FIELD_PREP(EDP_PSR2_STATUS_STATE_MASK, 0x8) #define _PSR2_SU_STATUS_A 0x60914 #define _PSR2_SU_STATUS_EDP 0x6f914 -#define _PSR2_SU_STATUS(tran, index) _MMIO_TRANS2(tran, _PSR2_SU_STATUS_A + (index) * 4) +#define _PSR2_SU_STATUS(tran, index) _MMIO_TRANS2(dev_priv, tran, _PSR2_SU_STATUS_A + (index) * 4) #define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3)) #define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10) #define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame)) @@ -229,7 +229,7 @@ #define _PSR2_MAN_TRK_CTL_A 0x60910 #define _PSR2_MAN_TRK_CTL_EDP 0x6f910 -#define PSR2_MAN_TRK_CTL(tran) _MMIO_TRANS2(tran, _PSR2_MAN_TRK_CTL_A) +#define PSR2_MAN_TRK_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PSR2_MAN_TRK_CTL_A) #define PSR2_MAN_TRK_CTL_ENABLE REG_BIT(31) #define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK REG_GENMASK(30, 21) #define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val) REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val) @@ -249,7 +249,7 @@ /* PSR2 Early transport */ #define _PIPE_SRCSZ_ERLY_TPT_A 0x70074 -#define PIPE_SRCSZ_ERLY_TPT(trans) _MMIO_TRANS2(trans, _PIPE_SRCSZ_ERLY_TPT_A) +#define PIPE_SRCSZ_ERLY_TPT(trans) _MMIO_TRANS2(dev_priv, trans, _PIPE_SRCSZ_ERLY_TPT_A) #define _SEL_FETCH_PLANE_BASE_1_A 0x70890 #define _SEL_FETCH_PLANE_BASE_2_A 0x708B0 @@ -297,7 +297,7 @@ _SEL_FETCH_PLANE_BASE_1_A) #define _ALPM_CTL_A 0x60950 -#define ALPM_CTL(tran) _MMIO_TRANS2(tran, _ALPM_CTL_A) +#define ALPM_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _ALPM_CTL_A) #define ALPM_CTL_ALPM_ENABLE REG_BIT(31) #define ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(30) #define ALPM_CTL_LOBF_ENABLE REG_BIT(29) @@ -321,7 +321,7 @@ #define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val) #define _ALPM_CTL2_A 0x60954 -#define ALPM_CTL2(tran) _MMIO_TRANS2(tran, _ALPM_CTL2_A) +#define ALPM_CTL2(tran) _MMIO_TRANS2(dev_priv, tran, _ALPM_CTL2_A) #define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK REG_GENMASK(28, 24) #define ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY(val) REG_FIELD_PREP(ALPM_CTL2_SWITCH_TO_ACTIVE_LATENCY_MASK, val) #define ALPM_CTL2_AUX_LESS_WAKE_TIME_EXTENSION_MASK REG_GENMASK(19, 16) @@ -335,7 +335,7 @@ #define ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES(val) REG_FIELD_PREP(ALPM_CTL2_NUMBER_AUX_LESS_ML_PHY_SLEEP_SEQUENCES_MASK, val) #define _PORT_ALPM_CTL_A 0x16fa2c -#define PORT_ALPM_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_CTL_A) +#define PORT_ALPM_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PORT_ALPM_CTL_A) #define PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(31) #define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK REG_GENMASK(23, 20) #define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK, val) @@ -345,12 +345,16 @@ #define PORT_ALPM_CTL_SILENCE_PERIOD(val) REG_FIELD_PREP(PORT_ALPM_CTL_SILENCE_PERIOD_MASK, val) #define _PORT_ALPM_LFPS_CTL_A 0x16fa30 -#define PORT_ALPM_LFPS_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_LFPS_CTL_A) +#define PORT_ALPM_LFPS_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _PORT_ALPM_LFPS_CTL_A) #define PORT_ALPM_LFPS_CTL_LFPS_START_POLARITY REG_BIT(31) #define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK REG_GENMASK(27, 24) -#define ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES 5 -#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME(lines) REG_FIELD_PREP(ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK, (lines) - ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES) -#define ALPM_CTL_AUX_LESS_WAKE_TIME_MASK REG_GENMASK(5, 0) -#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val) +#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MIN 7 +#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK, (val) - PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MIN) +#define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(20, 16) +#define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val) +#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(12, 8) +#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val) +#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(4, 0) +#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val) #endif /* __INTEL_PSR_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index a280448df7..14d5fefc9c 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -9,72 +9,72 @@ #include "intel_display_types.h" #include "intel_quirks.h" -static void intel_set_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk) +static void intel_set_quirk(struct intel_display *display, enum intel_quirk_id quirk) { - i915->display.quirks.mask |= BIT(quirk); + display->quirks.mask |= BIT(quirk); } /* * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason */ -static void quirk_ssc_force_disable(struct drm_i915_private *i915) +static void quirk_ssc_force_disable(struct intel_display *display) { - intel_set_quirk(i915, QUIRK_LVDS_SSC_DISABLE); - drm_info(&i915->drm, "applying lvds SSC disable quirk\n"); + intel_set_quirk(display, QUIRK_LVDS_SSC_DISABLE); + drm_info(display->drm, "applying lvds SSC disable quirk\n"); } /* * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight * brightness value */ -static void quirk_invert_brightness(struct drm_i915_private *i915) +static void quirk_invert_brightness(struct intel_display *display) { - intel_set_quirk(i915, QUIRK_INVERT_BRIGHTNESS); - drm_info(&i915->drm, "applying inverted panel brightness quirk\n"); + intel_set_quirk(display, QUIRK_INVERT_BRIGHTNESS); + drm_info(display->drm, "applying inverted panel brightness quirk\n"); } /* Some VBT's incorrectly indicate no backlight is present */ -static void quirk_backlight_present(struct drm_i915_private *i915) +static void quirk_backlight_present(struct intel_display *display) { - intel_set_quirk(i915, QUIRK_BACKLIGHT_PRESENT); - drm_info(&i915->drm, "applying backlight present quirk\n"); + intel_set_quirk(display, QUIRK_BACKLIGHT_PRESENT); + drm_info(display->drm, "applying backlight present quirk\n"); } /* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms * which is 300 ms greater than eDP spec T12 min. */ -static void quirk_increase_t12_delay(struct drm_i915_private *i915) +static void quirk_increase_t12_delay(struct intel_display *display) { - intel_set_quirk(i915, QUIRK_INCREASE_T12_DELAY); - drm_info(&i915->drm, "Applying T12 delay quirk\n"); + intel_set_quirk(display, QUIRK_INCREASE_T12_DELAY); + drm_info(display->drm, "Applying T12 delay quirk\n"); } /* * GeminiLake NUC HDMI outputs require additional off time * this allows the onboard retimer to correctly sync to signal */ -static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915) +static void quirk_increase_ddi_disabled_time(struct intel_display *display) { - intel_set_quirk(i915, QUIRK_INCREASE_DDI_DISABLED_TIME); - drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n"); + intel_set_quirk(display, QUIRK_INCREASE_DDI_DISABLED_TIME); + drm_info(display->drm, "Applying Increase DDI Disabled quirk\n"); } -static void quirk_no_pps_backlight_power_hook(struct drm_i915_private *i915) +static void quirk_no_pps_backlight_power_hook(struct intel_display *display) { - intel_set_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK); - drm_info(&i915->drm, "Applying no pps backlight power quirk\n"); + intel_set_quirk(display, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK); + drm_info(display->drm, "Applying no pps backlight power quirk\n"); } struct intel_quirk { int device; int subsystem_vendor; int subsystem_device; - void (*hook)(struct drm_i915_private *i915); + void (*hook)(struct intel_display *display); }; /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ struct intel_dmi_quirk { - void (*hook)(struct drm_i915_private *i915); + void (*hook)(struct intel_display *display); const struct dmi_system_id (*dmi_id_list)[]; }; @@ -203,9 +203,9 @@ static struct intel_quirk intel_quirks[] = { { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness }, }; -void intel_init_quirks(struct drm_i915_private *i915) +void intel_init_quirks(struct intel_display *display) { - struct pci_dev *d = to_pci_dev(i915->drm.dev); + struct pci_dev *d = to_pci_dev(display->drm->dev); int i; for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { @@ -216,15 +216,15 @@ void intel_init_quirks(struct drm_i915_private *i915) q->subsystem_vendor == PCI_ANY_ID) && (d->subsystem_device == q->subsystem_device || q->subsystem_device == PCI_ANY_ID)) - q->hook(i915); + q->hook(display); } for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) - intel_dmi_quirks[i].hook(i915); + intel_dmi_quirks[i].hook(display); } } -bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk) +bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk) { - return i915->display.quirks.mask & BIT(quirk); + return display->quirks.mask & BIT(quirk); } diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h index 10a4d16314..151c8f4ae5 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.h +++ b/drivers/gpu/drm/i915/display/intel_quirks.h @@ -8,7 +8,7 @@ #include -struct drm_i915_private; +struct intel_display; enum intel_quirk_id { QUIRK_BACKLIGHT_PRESENT, @@ -19,7 +19,7 @@ enum intel_quirk_id { QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK, }; -void intel_init_quirks(struct drm_i915_private *i915); -bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk); +void intel_init_quirks(struct intel_display *display); +bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk); #endif /* __INTEL_QUIRKS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 0cd9c183f6..d0d7124051 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -193,7 +193,7 @@ to_intel_sdvo_connector(struct drm_connector *connector) } #define to_intel_sdvo_connector_state(conn_state) \ - container_of((conn_state), struct intel_sdvo_connector_state, base.base) + container_of_const((conn_state), struct intel_sdvo_connector_state, base.base) static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo); @@ -1944,7 +1944,7 @@ intel_sdvo_mode_valid(struct drm_connector *connector, struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, connector->state); - int max_dotclk = i915->max_dotclk_freq; + int max_dotclk = i915->display.cdclk.max_dotclk_freq; enum drm_mode_status status; int clock = mode->clock; @@ -1952,9 +1952,6 @@ intel_sdvo_mode_valid(struct drm_connector *connector, if (status != MODE_OK) return status; - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) - return MODE_NO_DBLESCAN; - if (clock > max_dotclk) return MODE_CLOCK_HIGH; @@ -2378,7 +2375,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector, u64 *val) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); - const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state((void *)state); + const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state); if (property == intel_sdvo_connector->tv_format) { int i; diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index bc61e736f9..e6df1f92de 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -44,12 +44,14 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915) } } -void intel_snps_phy_update_psr_power_state(struct drm_i915_private *i915, - enum phy phy, bool enable) +void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder, + bool enable) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_encoder_to_phy(encoder); u32 val; - if (!intel_phy_is_snps(i915, phy)) + if (!intel_encoder_is_snps(encoder)) return; val = REG_FIELD_PREP(SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR, @@ -63,7 +65,7 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); const struct intel_ddi_buf_trans *trans; - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); int n_entries, ln; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); @@ -1809,7 +1811,7 @@ int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state, for (i = 0; tables[i]; i++) { if (crtc_state->port_clock == tables[i]->clock) { - crtc_state->mpllb_state = *tables[i]; + crtc_state->dpll_hw_state.mpllb = *tables[i]; return 0; } } @@ -1821,8 +1823,8 @@ void intel_mpllb_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - const struct intel_mpllb_state *pll_state = &crtc_state->mpllb_state; - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + const struct intel_mpllb_state *pll_state = &crtc_state->dpll_hw_state.mpllb; + enum phy phy = intel_encoder_to_phy(encoder); i915_reg_t enable_reg = (phy <= PHY_D ? DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0)); @@ -1879,7 +1881,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder, void intel_mpllb_disable(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); i915_reg_t enable_reg = (phy <= PHY_D ? DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0)); @@ -1951,7 +1953,7 @@ void intel_mpllb_readout_hw_state(struct intel_encoder *encoder, struct intel_mpllb_state *pll_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + enum phy phy = intel_encoder_to_phy(encoder); pll_state->mpllb_cp = intel_de_read(dev_priv, SNPS_PHY_MPLLB_CP(phy)); pll_state->mpllb_div = intel_de_read(dev_priv, SNPS_PHY_MPLLB_DIV(phy)); @@ -1999,7 +2001,7 @@ void intel_mpllb_state_verify(struct intel_atomic_state *state, const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_mpllb_state mpllb_hw_state = {}; - const struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state; + const struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->dpll_hw_state.mpllb; struct intel_encoder *encoder; if (!IS_DG2(i915)) diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.h b/drivers/gpu/drm/i915/display/intel_snps_phy.h index 515abf7c59..bc08b92a7c 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.h +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.h @@ -17,8 +17,8 @@ struct intel_mpllb_state; enum phy; void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv); -void intel_snps_phy_update_psr_power_state(struct drm_i915_private *dev_priv, - enum phy phy, bool enable); +void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder, + bool enable); int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder); diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index d7b440c8ca..36a253a19c 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -47,6 +47,7 @@ #include "intel_fb.h" #include "intel_frontbuffer.h" #include "intel_sprite.h" +#include "intel_sprite_regs.h" static char sprite_name(struct drm_i915_private *i915, enum pipe pipe, int sprite) { diff --git a/drivers/gpu/drm/i915/display/intel_sprite_regs.h b/drivers/gpu/drm/i915/display/intel_sprite_regs.h new file mode 100644 index 0000000000..bb67705652 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_sprite_regs.h @@ -0,0 +1,348 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2024 Intel Corporation */ + +#ifndef __INTEL_SPRITE_REGS__ +#define __INTEL_SPRITE_REGS__ + +#include "intel_display_reg_defs.h" + +#define _DVSACNTR 0x72180 +#define DVS_ENABLE REG_BIT(31) +#define DVS_PIPE_GAMMA_ENABLE REG_BIT(30) +#define DVS_YUV_RANGE_CORRECTION_DISABLE REG_BIT(27) +#define DVS_FORMAT_MASK REG_GENMASK(26, 25) +#define DVS_FORMAT_YUV422 REG_FIELD_PREP(DVS_FORMAT_MASK, 0) +#define DVS_FORMAT_RGBX101010 REG_FIELD_PREP(DVS_FORMAT_MASK, 1) +#define DVS_FORMAT_RGBX888 REG_FIELD_PREP(DVS_FORMAT_MASK, 2) +#define DVS_FORMAT_RGBX161616 REG_FIELD_PREP(DVS_FORMAT_MASK, 3) +#define DVS_PIPE_CSC_ENABLE REG_BIT(24) +#define DVS_SOURCE_KEY REG_BIT(22) +#define DVS_RGB_ORDER_XBGR REG_BIT(20) +#define DVS_YUV_FORMAT_BT709 REG_BIT(18) +#define DVS_YUV_ORDER_MASK REG_GENMASK(17, 16) +#define DVS_YUV_ORDER_YUYV REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 0) +#define DVS_YUV_ORDER_UYVY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 1) +#define DVS_YUV_ORDER_YVYU REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 2) +#define DVS_YUV_ORDER_VYUY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 3) +#define DVS_ROTATE_180 REG_BIT(15) +#define DVS_TRICKLE_FEED_DISABLE REG_BIT(14) +#define DVS_TILED REG_BIT(10) +#define DVS_DEST_KEY REG_BIT(2) +#define _DVSALINOFF 0x72184 +#define _DVSASTRIDE 0x72188 +#define _DVSAPOS 0x7218c +#define DVS_POS_Y_MASK REG_GENMASK(31, 16) +#define DVS_POS_Y(y) REG_FIELD_PREP(DVS_POS_Y_MASK, (y)) +#define DVS_POS_X_MASK REG_GENMASK(15, 0) +#define DVS_POS_X(x) REG_FIELD_PREP(DVS_POS_X_MASK, (x)) +#define _DVSASIZE 0x72190 +#define DVS_HEIGHT_MASK REG_GENMASK(31, 16) +#define DVS_HEIGHT(h) REG_FIELD_PREP(DVS_HEIGHT_MASK, (h)) +#define DVS_WIDTH_MASK REG_GENMASK(15, 0) +#define DVS_WIDTH(w) REG_FIELD_PREP(DVS_WIDTH_MASK, (w)) +#define _DVSAKEYVAL 0x72194 +#define _DVSAKEYMSK 0x72198 +#define _DVSASURF 0x7219c +#define DVS_ADDR_MASK REG_GENMASK(31, 12) +#define _DVSAKEYMAXVAL 0x721a0 +#define _DVSATILEOFF 0x721a4 +#define DVS_OFFSET_Y_MASK REG_GENMASK(31, 16) +#define DVS_OFFSET_Y(y) REG_FIELD_PREP(DVS_OFFSET_Y_MASK, (y)) +#define DVS_OFFSET_X_MASK REG_GENMASK(15, 0) +#define DVS_OFFSET_X(x) REG_FIELD_PREP(DVS_OFFSET_X_MASK, (x)) +#define _DVSASURFLIVE 0x721ac +#define _DVSAGAMC_G4X 0x721e0 /* g4x */ +#define _DVSASCALE 0x72204 +#define DVS_SCALE_ENABLE REG_BIT(31) +#define DVS_FILTER_MASK REG_GENMASK(30, 29) +#define DVS_FILTER_MEDIUM REG_FIELD_PREP(DVS_FILTER_MASK, 0) +#define DVS_FILTER_ENHANCING REG_FIELD_PREP(DVS_FILTER_MASK, 1) +#define DVS_FILTER_SOFTENING REG_FIELD_PREP(DVS_FILTER_MASK, 2) +#define DVS_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */ +#define DVS_VERTICAL_OFFSET_ENABLE REG_BIT(27) +#define DVS_SRC_WIDTH_MASK REG_GENMASK(26, 16) +#define DVS_SRC_WIDTH(w) REG_FIELD_PREP(DVS_SRC_WIDTH_MASK, (w)) +#define DVS_SRC_HEIGHT_MASK REG_GENMASK(10, 0) +#define DVS_SRC_HEIGHT(h) REG_FIELD_PREP(DVS_SRC_HEIGHT_MASK, (h)) +#define _DVSAGAMC_ILK 0x72300 /* ilk/snb */ +#define _DVSAGAMCMAX_ILK 0x72340 /* ilk/snb */ + +#define _DVSBCNTR 0x73180 +#define _DVSBLINOFF 0x73184 +#define _DVSBSTRIDE 0x73188 +#define _DVSBPOS 0x7318c +#define _DVSBSIZE 0x73190 +#define _DVSBKEYVAL 0x73194 +#define _DVSBKEYMSK 0x73198 +#define _DVSBSURF 0x7319c +#define _DVSBKEYMAXVAL 0x731a0 +#define _DVSBTILEOFF 0x731a4 +#define _DVSBSURFLIVE 0x731ac +#define _DVSBGAMC_G4X 0x731e0 /* g4x */ +#define _DVSBSCALE 0x73204 +#define _DVSBGAMC_ILK 0x73300 /* ilk/snb */ +#define _DVSBGAMCMAX_ILK 0x73340 /* ilk/snb */ + +#define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR) +#define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF) +#define DVSSTRIDE(pipe) _MMIO_PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE) +#define DVSPOS(pipe) _MMIO_PIPE(pipe, _DVSAPOS, _DVSBPOS) +#define DVSSURF(pipe) _MMIO_PIPE(pipe, _DVSASURF, _DVSBSURF) +#define DVSKEYMAX(pipe) _MMIO_PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL) +#define DVSSIZE(pipe) _MMIO_PIPE(pipe, _DVSASIZE, _DVSBSIZE) +#define DVSSCALE(pipe) _MMIO_PIPE(pipe, _DVSASCALE, _DVSBSCALE) +#define DVSTILEOFF(pipe) _MMIO_PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) +#define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) +#define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) +#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) +#define DVSGAMC_G4X(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_G4X, _DVSBGAMC_G4X) + (5 - (i)) * 4) /* 6 x u0.8 */ +#define DVSGAMC_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_ILK, _DVSBGAMC_ILK) + (i) * 4) /* 16 x u0.10 */ +#define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */ + +#define _SPRA_CTL 0x70280 +#define SPRITE_ENABLE REG_BIT(31) +#define SPRITE_PIPE_GAMMA_ENABLE REG_BIT(30) +#define SPRITE_YUV_RANGE_CORRECTION_DISABLE REG_BIT(28) +#define SPRITE_FORMAT_MASK REG_GENMASK(27, 25) +#define SPRITE_FORMAT_YUV422 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 0) +#define SPRITE_FORMAT_RGBX101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 1) +#define SPRITE_FORMAT_RGBX888 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 2) +#define SPRITE_FORMAT_RGBX161616 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 3) +#define SPRITE_FORMAT_YUV444 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 4) +#define SPRITE_FORMAT_XR_BGR101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 5) /* Extended range */ +#define SPRITE_PIPE_CSC_ENABLE REG_BIT(24) +#define SPRITE_SOURCE_KEY REG_BIT(22) +#define SPRITE_RGB_ORDER_RGBX REG_BIT(20) /* only for 888 and 161616 */ +#define SPRITE_YUV_TO_RGB_CSC_DISABLE REG_BIT(19) +#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 REG_BIT(18) /* 0 is BT601 */ +#define SPRITE_YUV_ORDER_MASK REG_GENMASK(17, 16) +#define SPRITE_YUV_ORDER_YUYV REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 0) +#define SPRITE_YUV_ORDER_UYVY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 1) +#define SPRITE_YUV_ORDER_YVYU REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 2) +#define SPRITE_YUV_ORDER_VYUY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 3) +#define SPRITE_ROTATE_180 REG_BIT(15) +#define SPRITE_TRICKLE_FEED_DISABLE REG_BIT(14) +#define SPRITE_PLANE_GAMMA_DISABLE REG_BIT(13) +#define SPRITE_TILED REG_BIT(10) +#define SPRITE_DEST_KEY REG_BIT(2) +#define _SPRA_LINOFF 0x70284 +#define _SPRA_STRIDE 0x70288 +#define _SPRA_POS 0x7028c +#define SPRITE_POS_Y_MASK REG_GENMASK(31, 16) +#define SPRITE_POS_Y(y) REG_FIELD_PREP(SPRITE_POS_Y_MASK, (y)) +#define SPRITE_POS_X_MASK REG_GENMASK(15, 0) +#define SPRITE_POS_X(x) REG_FIELD_PREP(SPRITE_POS_X_MASK, (x)) +#define _SPRA_SIZE 0x70290 +#define SPRITE_HEIGHT_MASK REG_GENMASK(31, 16) +#define SPRITE_HEIGHT(h) REG_FIELD_PREP(SPRITE_HEIGHT_MASK, (h)) +#define SPRITE_WIDTH_MASK REG_GENMASK(15, 0) +#define SPRITE_WIDTH(w) REG_FIELD_PREP(SPRITE_WIDTH_MASK, (w)) +#define _SPRA_KEYVAL 0x70294 +#define _SPRA_KEYMSK 0x70298 +#define _SPRA_SURF 0x7029c +#define SPRITE_ADDR_MASK REG_GENMASK(31, 12) +#define _SPRA_KEYMAX 0x702a0 +#define _SPRA_TILEOFF 0x702a4 +#define SPRITE_OFFSET_Y_MASK REG_GENMASK(31, 16) +#define SPRITE_OFFSET_Y(y) REG_FIELD_PREP(SPRITE_OFFSET_Y_MASK, (y)) +#define SPRITE_OFFSET_X_MASK REG_GENMASK(15, 0) +#define SPRITE_OFFSET_X(x) REG_FIELD_PREP(SPRITE_OFFSET_X_MASK, (x)) +#define _SPRA_OFFSET 0x702a4 +#define _SPRA_SURFLIVE 0x702ac +#define _SPRA_SCALE 0x70304 +#define SPRITE_SCALE_ENABLE REG_BIT(31) +#define SPRITE_FILTER_MASK REG_GENMASK(30, 29) +#define SPRITE_FILTER_MEDIUM REG_FIELD_PREP(SPRITE_FILTER_MASK, 0) +#define SPRITE_FILTER_ENHANCING REG_FIELD_PREP(SPRITE_FILTER_MASK, 1) +#define SPRITE_FILTER_SOFTENING REG_FIELD_PREP(SPRITE_FILTER_MASK, 2) +#define SPRITE_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */ +#define SPRITE_VERTICAL_OFFSET_ENABLE REG_BIT(27) +#define SPRITE_SRC_WIDTH_MASK REG_GENMASK(26, 16) +#define SPRITE_SRC_WIDTH(w) REG_FIELD_PREP(SPRITE_SRC_WIDTH_MASK, (w)) +#define SPRITE_SRC_HEIGHT_MASK REG_GENMASK(10, 0) +#define SPRITE_SRC_HEIGHT(h) REG_FIELD_PREP(SPRITE_SRC_HEIGHT_MASK, (h)) +#define _SPRA_GAMC 0x70400 +#define _SPRA_GAMC16 0x70440 +#define _SPRA_GAMC17 0x7044c + +#define _SPRB_CTL 0x71280 +#define _SPRB_LINOFF 0x71284 +#define _SPRB_STRIDE 0x71288 +#define _SPRB_POS 0x7128c +#define _SPRB_SIZE 0x71290 +#define _SPRB_KEYVAL 0x71294 +#define _SPRB_KEYMSK 0x71298 +#define _SPRB_SURF 0x7129c +#define _SPRB_KEYMAX 0x712a0 +#define _SPRB_TILEOFF 0x712a4 +#define _SPRB_OFFSET 0x712a4 +#define _SPRB_SURFLIVE 0x712ac +#define _SPRB_SCALE 0x71304 +#define _SPRB_GAMC 0x71400 +#define _SPRB_GAMC16 0x71440 +#define _SPRB_GAMC17 0x7144c + +#define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL) +#define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF) +#define SPRSTRIDE(pipe) _MMIO_PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE) +#define SPRPOS(pipe) _MMIO_PIPE(pipe, _SPRA_POS, _SPRB_POS) +#define SPRSIZE(pipe) _MMIO_PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE) +#define SPRKEYVAL(pipe) _MMIO_PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL) +#define SPRKEYMSK(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK) +#define SPRSURF(pipe) _MMIO_PIPE(pipe, _SPRA_SURF, _SPRB_SURF) +#define SPRKEYMAX(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) +#define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) +#define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET) +#define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) +#define SPRGAMC(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) + (i) * 4) /* 16 x u0.10 */ +#define SPRGAMC16(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC16, _SPRB_GAMC16) + (i) * 4) /* 3 x u1.10 */ +#define SPRGAMC17(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC17, _SPRB_GAMC17) + (i) * 4) /* 3 x u2.10 */ +#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) + +#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180) +#define SP_ENABLE REG_BIT(31) +#define SP_PIPE_GAMMA_ENABLE REG_BIT(30) +#define SP_FORMAT_MASK REG_GENMASK(29, 26) +#define SP_FORMAT_YUV422 REG_FIELD_PREP(SP_FORMAT_MASK, 0) +#define SP_FORMAT_8BPP REG_FIELD_PREP(SP_FORMAT_MASK, 2) +#define SP_FORMAT_BGR565 REG_FIELD_PREP(SP_FORMAT_MASK, 5) +#define SP_FORMAT_BGRX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 6) +#define SP_FORMAT_BGRA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 7) +#define SP_FORMAT_RGBX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 8) +#define SP_FORMAT_RGBA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 9) +#define SP_FORMAT_BGRX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 10) /* CHV pipe B */ +#define SP_FORMAT_BGRA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 11) /* CHV pipe B */ +#define SP_FORMAT_RGBX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 14) +#define SP_FORMAT_RGBA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 15) +#define SP_ALPHA_PREMULTIPLY REG_BIT(23) /* CHV pipe B */ +#define SP_SOURCE_KEY REG_BIT(22) +#define SP_YUV_FORMAT_BT709 REG_BIT(18) +#define SP_YUV_ORDER_MASK REG_GENMASK(17, 16) +#define SP_YUV_ORDER_YUYV REG_FIELD_PREP(SP_YUV_ORDER_MASK, 0) +#define SP_YUV_ORDER_UYVY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 1) +#define SP_YUV_ORDER_YVYU REG_FIELD_PREP(SP_YUV_ORDER_MASK, 2) +#define SP_YUV_ORDER_VYUY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 3) +#define SP_ROTATE_180 REG_BIT(15) +#define SP_TILED REG_BIT(10) +#define SP_MIRROR REG_BIT(8) /* CHV pipe B */ +#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184) +#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188) +#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c) +#define SP_POS_Y_MASK REG_GENMASK(31, 16) +#define SP_POS_Y(y) REG_FIELD_PREP(SP_POS_Y_MASK, (y)) +#define SP_POS_X_MASK REG_GENMASK(15, 0) +#define SP_POS_X(x) REG_FIELD_PREP(SP_POS_X_MASK, (x)) +#define _SPASIZE (VLV_DISPLAY_BASE + 0x72190) +#define SP_HEIGHT_MASK REG_GENMASK(31, 16) +#define SP_HEIGHT(h) REG_FIELD_PREP(SP_HEIGHT_MASK, (h)) +#define SP_WIDTH_MASK REG_GENMASK(15, 0) +#define SP_WIDTH(w) REG_FIELD_PREP(SP_WIDTH_MASK, (w)) +#define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194) +#define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198) +#define _SPASURF (VLV_DISPLAY_BASE + 0x7219c) +#define SP_ADDR_MASK REG_GENMASK(31, 12) +#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0) +#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4) +#define SP_OFFSET_Y_MASK REG_GENMASK(31, 16) +#define SP_OFFSET_Y(y) REG_FIELD_PREP(SP_OFFSET_Y_MASK, (y)) +#define SP_OFFSET_X_MASK REG_GENMASK(15, 0) +#define SP_OFFSET_X(x) REG_FIELD_PREP(SP_OFFSET_X_MASK, (x)) +#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8) +#define SP_CONST_ALPHA_ENABLE REG_BIT(31) +#define SP_CONST_ALPHA_MASK REG_GENMASK(7, 0) +#define SP_CONST_ALPHA(alpha) REG_FIELD_PREP(SP_CONST_ALPHA_MASK, (alpha)) +#define _SPASURFLIVE (VLV_DISPLAY_BASE + 0x721ac) +#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0) +#define SP_CONTRAST_MASK REG_GENMASK(26, 18) +#define SP_CONTRAST(x) REG_FIELD_PREP(SP_CONTRAST_MASK, (x)) /* u3.6 */ +#define SP_BRIGHTNESS_MASK REG_GENMASK(7, 0) +#define SP_BRIGHTNESS(x) REG_FIELD_PREP(SP_BRIGHTNESS_MASK, (x)) /* s8 */ +#define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4) +#define SP_SH_SIN_MASK REG_GENMASK(26, 16) +#define SP_SH_SIN(x) REG_FIELD_PREP(SP_SH_SIN_MASK, (x)) /* s4.7 */ +#define SP_SH_COS_MASK REG_GENMASK(9, 0) +#define SP_SH_COS(x) REG_FIELD_PREP(SP_SH_COS_MASK, (x)) /* u3.7 */ +#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721e0) + +#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280) +#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284) +#define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288) +#define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c) +#define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290) +#define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294) +#define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298) +#define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c) +#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0) +#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4) +#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) +#define _SPBSURFLIVE (VLV_DISPLAY_BASE + 0x722ac) +#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0) +#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4) +#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0) + +#define _VLV_SPR(pipe, plane_id, reg_a, reg_b) \ + _PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b)) +#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \ + _MMIO(_VLV_SPR((pipe), (plane_id), (reg_a), (reg_b))) + +#define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR) +#define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF) +#define SPSTRIDE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASTRIDE, _SPBSTRIDE) +#define SPPOS(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAPOS, _SPBPOS) +#define SPSIZE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASIZE, _SPBSIZE) +#define SPKEYMINVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMINVAL, _SPBKEYMINVAL) +#define SPKEYMSK(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMSK, _SPBKEYMSK) +#define SPSURF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURF, _SPBSURF) +#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL) +#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF) +#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA) +#define SPSURFLIVE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURFLIVE, _SPBSURFLIVE) +#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0) +#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1) +#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */ + +/* + * CHV pipe B sprite CSC + * + * |cr| |c0 c1 c2| |cr + cr_ioff| |cr_ooff| + * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff| + * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff| + */ +#define _MMIO_CHV_SPCSC(plane_id, reg) \ + _MMIO(VLV_DISPLAY_BASE + ((plane_id) - PLANE_SPRITE0) * 0x1000 + (reg)) + +#define SPCSCYGOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d900) +#define SPCSCCBOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d904) +#define SPCSCCROFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d908) +#define SPCSC_OOFF_MASK REG_GENMASK(26, 16) +#define SPCSC_OOFF(x) REG_FIELD_PREP(SPCSC_OOFF_MASK, (x) & 0x7ff) /* s11 */ +#define SPCSC_IOFF_MASK REG_GENMASK(10, 0) +#define SPCSC_IOFF(x) REG_FIELD_PREP(SPCSC_IOFF_MASK, (x) & 0x7ff) /* s11 */ + +#define SPCSCC01(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d90c) +#define SPCSCC23(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d910) +#define SPCSCC45(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d914) +#define SPCSCC67(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d918) +#define SPCSCC8(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d91c) +#define SPCSC_C1_MASK REG_GENMASK(30, 16) +#define SPCSC_C1(x) REG_FIELD_PREP(SPCSC_C1_MASK, (x) & 0x7fff) /* s3.12 */ +#define SPCSC_C0_MASK REG_GENMASK(14, 0) +#define SPCSC_C0(x) REG_FIELD_PREP(SPCSC_C0_MASK, (x) & 0x7fff) /* s3.12 */ + +#define SPCSCYGICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d920) +#define SPCSCCBICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d924) +#define SPCSCCRICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d928) +#define SPCSC_IMAX_MASK REG_GENMASK(26, 16) +#define SPCSC_IMAX(x) REG_FIELD_PREP(SPCSC_IMAX_MASK, (x) & 0x7ff) /* s11 */ +#define SPCSC_IMIN_MASK REG_GENMASK(10, 0) +#define SPCSC_IMIN(x) REG_FIELD_PREP(SPCSC_IMIN_MASK, (x) & 0x7ff) /* s11 */ + +#define SPCSCYGOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d92c) +#define SPCSCCBOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d930) +#define SPCSCCROCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d934) +#define SPCSC_OMAX_MASK REG_GENMASK(25, 16) +#define SPCSC_OMAX(x) REG_FIELD_PREP(SPCSC_OMAX_MASK, (x)) /* u10 */ +#define SPCSC_OMIN_MASK REG_GENMASK(9, 0) +#define SPCSC_OMIN(x) REG_FIELD_PREP(SPCSC_OMIN_MASK, (x)) /* u10 */ + +#endif /* __INTEL_SPRITE_REGS__ */ diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 6b374d481c..9887967b2c 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -100,11 +100,9 @@ static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc) static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(i915, dig_port->base.port); struct intel_tc_port *tc = to_tc_port(dig_port); - return intel_phy_is_tc(i915, phy) && tc->mode == mode; + return intel_encoder_is_tc(&dig_port->base) && tc->mode == mode; } bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port) @@ -124,11 +122,9 @@ bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port) bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(i915, dig_port->base.port); struct intel_tc_port *tc = to_tc_port(dig_port); - return intel_phy_is_tc(i915, phy) && !tc->legacy_port; + return intel_encoder_is_tc(&dig_port->base) && !tc->legacy_port; } /* @@ -254,8 +250,7 @@ assert_tc_cold_blocked(struct intel_tc_port *tc) static enum intel_display_power_domain tc_port_power_domain(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = tc_to_i915(tc); - enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port); + enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base); return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1; } @@ -302,7 +297,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); + enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base); intel_wakeref_t wakeref; u32 val, pin_assignment; @@ -375,9 +370,8 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_tc_port *tc = to_tc_port(dig_port); - enum phy phy = intel_port_to_phy(i915, dig_port->base.port); - if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT) + if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT) return 4; assert_tc_cold_blocked(tc); @@ -458,9 +452,7 @@ static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc, static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia) { - struct drm_i915_private *i915 = tc_to_i915(tc); - enum port port = tc->dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(i915, port); + enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base); /* * Each Modular FIA instance houses 2 TC ports. In SOC that has more @@ -812,7 +804,7 @@ static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc) static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); - enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port); + enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base); u32 val; assert_display_core_power_enabled(tc); @@ -1635,10 +1627,7 @@ static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc) bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(i915, dig_port->base.port); - - if (!intel_phy_is_tc(i915, phy)) + if (!intel_encoder_is_tc(&dig_port->base)) return false; return __intel_tc_port_link_needs_reset(to_tc_port(dig_port)); @@ -1740,11 +1729,9 @@ bool intel_tc_port_link_reset(struct intel_digital_port *dig_port) void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(i915, dig_port->base.port); struct intel_tc_port *tc = to_tc_port(dig_port); - if (!intel_phy_is_tc(i915, phy)) + if (!intel_encoder_is_tc(&dig_port->base)) return; cancel_delayed_work(&tc->link_reset_work); @@ -1861,7 +1848,7 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_tc_port *tc; enum port port = dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(i915, port); + enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base); if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE)) return -EINVAL; diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 2b77d399f1..9df0f12639 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -885,7 +885,8 @@ struct intel_tv_connector_state { bool bypass_vfilter; }; -#define to_intel_tv_connector_state(x) container_of(x, struct intel_tv_connector_state, base) +#define to_intel_tv_connector_state(conn_state) \ + container_of_const((conn_state), struct intel_tv_connector_state, base) static struct drm_connector_state * intel_tv_connector_duplicate_state(struct drm_connector *connector) @@ -961,16 +962,13 @@ intel_tv_mode_valid(struct drm_connector *connector, { struct drm_i915_private *i915 = to_i915(connector->dev); const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); - int max_dotclk = i915->max_dotclk_freq; + int max_dotclk = i915->display.cdclk.max_dotclk_freq; enum drm_mode_status status; status = intel_cpu_transcoder_mode_valid(i915, mode); if (status != MODE_OK) return status; - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) - return MODE_NO_DBLESCAN; - if (mode->clock > max_dotclk) return MODE_CLOCK_HIGH; diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index b50cd0dcab..228702c0e4 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -485,6 +485,7 @@ struct child_device_config { u8 hdmi_iboost_level:4; /* 196+ */ u8 dp_max_link_rate:3; /* 216+ */ u8 dp_max_link_rate_reserved:5; /* 216+ */ + u8 efp_index; /* 256+ */ } __packed; struct bdb_general_definitions { @@ -602,22 +603,22 @@ struct bdb_driver_features { u8 custom_vbt_version; /* 155+ */ /* Driver Feature Flags */ - u16 rmpm_enabled:1; /* 165+ */ - u16 s2ddt_enabled:1; /* 165+ */ - u16 dpst_enabled:1; /* 165-227 */ - u16 bltclt_enabled:1; /* 165+ */ - u16 adb_enabled:1; /* 165-227 */ - u16 drrs_enabled:1; /* 165-227 */ - u16 grs_enabled:1; /* 165+ */ - u16 gpmt_enabled:1; /* 165+ */ - u16 tbt_enabled:1; /* 165+ */ + u16 rmpm_enabled:1; /* 159+ */ + u16 s2ddt_enabled:1; /* 159+ */ + u16 dpst_enabled:1; /* 159-227 */ + u16 bltclt_enabled:1; /* 159+ */ + u16 adb_enabled:1; /* 159-227 */ + u16 drrs_enabled:1; /* 159-227 */ + u16 grs_enabled:1; /* 159+ */ + u16 gpmt_enabled:1; /* 159+ */ + u16 tbt_enabled:1; /* 159+ */ u16 psr_enabled:1; /* 165-227 */ u16 ips_enabled:1; /* 165+ */ - u16 dpfs_enabled:1; /* 165+ */ + u16 dfps_enabled:1; /* 165+ */ u16 dmrrs_enabled:1; /* 174-227 */ u16 adt_enabled:1; /* ???-228 */ u16 hpd_wake:1; /* 201-240 */ - u16 pc_feature_valid:1; + u16 pc_feature_valid:1; /* 159+ */ } __packed; /* @@ -880,11 +881,12 @@ struct bdb_lvds_lfp_data_tail { struct lfp_backlight_data_entry { u8 type:2; u8 active_low_pwm:1; - u8 obsolete1:5; + u8 i2c_pin:3; /* obsolete since ? */ + u8 i2c_speed:2; /* obsolete since ? */ u16 pwm_freq_hz; u8 min_brightness; /* ???-233 */ - u8 obsolete2; - u8 obsolete3; + u8 i2c_address; /* obsolete since ? */ + u8 i2c_command; /* obsolete since ? */ } __packed; struct lfp_backlight_control_method { @@ -900,8 +902,8 @@ struct lfp_brightness_level { struct bdb_lfp_backlight_data { u8 entry_size; struct lfp_backlight_data_entry data[16]; - u8 level[16]; /* ???-233 */ - struct lfp_backlight_control_method backlight_control[16]; + u8 level[16]; /* 162-233 */ + struct lfp_backlight_control_method backlight_control[16]; /* 191+ */ struct lfp_brightness_level brightness_level[16]; /* 234+ */ struct lfp_brightness_level brightness_min_level[16]; /* 234+ */ u8 brightness_precision_bits[16]; /* 236+ */ @@ -912,7 +914,7 @@ struct bdb_lfp_backlight_data { * Block 44 - LFP Power Conservation Features Block */ struct lfp_power_features { - u8 reserved1:1; + u8 dpst_support:1; /* ???-159 */ u8 power_conservation_pref:3; u8 reserved2:1; u8 lace_enabled_status:1; /* 210+ */ diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index f542ee1db1..894ee97b3e 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -9,6 +9,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_vrr.h" +#include "intel_dp.h" bool intel_vrr_is_capable(struct intel_connector *connector) { @@ -113,6 +114,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; const struct drm_display_info *info = &connector->base.display_info; int vmin, vmax; @@ -172,6 +174,14 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, if (crtc_state->uapi.vrr_enabled) { crtc_state->vrr.enable = true; crtc_state->mode_flags |= I915_MODE_FLAG_VRR; + if (intel_dp_as_sdp_supported(intel_dp)) { + crtc_state->vrr.vsync_start = + (crtc_state->hw.adjusted_mode.crtc_vtotal - + crtc_state->hw.adjusted_mode.vsync_start); + crtc_state->vrr.vsync_end = + (crtc_state->hw.adjusted_mode.crtc_vtotal - + crtc_state->hw.adjusted_mode.vsync_end); + } } } @@ -247,6 +257,12 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state) return; intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN); + + if (HAS_AS_SDP(dev_priv)) + intel_de_write(dev_priv, TRANS_VRR_VSYNC(cpu_transcoder), + VRR_VSYNC_END(crtc_state->vrr.vsync_end) | + VRR_VSYNC_START(crtc_state->vrr.vsync_start)); + intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state)); } @@ -265,13 +281,16 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state) intel_de_wait_for_clear(dev_priv, TRANS_VRR_STATUS(cpu_transcoder), VRR_STATUS_VRR_EN_LIVE, 1000); intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0); + + if (HAS_AS_SDP(dev_priv)) + intel_de_write(dev_priv, TRANS_VRR_VSYNC(cpu_transcoder), 0); } void intel_vrr_get_config(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - u32 trans_vrr_ctl; + u32 trans_vrr_ctl, trans_vrr_vsync; trans_vrr_ctl = intel_de_read(dev_priv, TRANS_VRR_CTL(cpu_transcoder)); @@ -291,6 +310,16 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state) crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1; } - if (crtc_state->vrr.enable) + if (crtc_state->vrr.enable) { crtc_state->mode_flags |= I915_MODE_FLAG_VRR; + + if (HAS_AS_SDP(dev_priv)) { + trans_vrr_vsync = + intel_de_read(dev_priv, TRANS_VRR_VSYNC(cpu_transcoder)); + crtc_state->vrr.vsync_start = + REG_FIELD_GET(VRR_VSYNC_START_MASK, trans_vrr_vsync); + crtc_state->vrr.vsync_end = + REG_FIELD_GET(VRR_VSYNC_END_MASK, trans_vrr_vsync); + } + } } diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c index 8a934bada6..baa601d278 100644 --- a/drivers/gpu/drm/i915/display/skl_scaler.c +++ b/drivers/gpu/drm/i915/display/skl_scaler.c @@ -213,10 +213,11 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, * The pipe scaler does not use all the bits of PIPESRC, at least * on the earlier platforms. So even when we're scaling a plane * the *pipe* source size must not be too large. For simplicity - * we assume the limits match the scaler source size limits. Might - * not be 100% accurate on all platforms, but good enough for now. + * we assume the limits match the scaler destination size limits. + * Might not be 100% accurate on all platforms, but good enough for + * now. */ - if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) { + if (pipe_src_w > max_dst_w || pipe_src_h > max_dst_h) { drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: pipe src size %ux%u " "is out of scaler range\n", diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index c6b9be80d8..7c6187b447 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -6,18 +6,19 @@ #include #include "i915_drv.h" -#include "i915_fixed.h" #include "i915_reg.h" #include "i9xx_wm.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_bw.h" +#include "intel_cdclk.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display.h" #include "intel_display_power.h" #include "intel_display_types.h" #include "intel_fb.h" +#include "intel_fixed.h" #include "intel_pcode.h" #include "intel_wm.h" #include "skl_watermark.h" @@ -69,7 +70,7 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915) return DISPLAY_VER(i915) == 9; } -static bool +bool intel_has_sagv(struct drm_i915_private *i915) { return HAS_SAGV(i915) && @@ -2601,10 +2602,17 @@ skl_compute_ddb(struct intel_atomic_state *state) return ret; } - if (HAS_MBUS_JOINING(i915)) + if (HAS_MBUS_JOINING(i915)) { new_dbuf_state->joined_mbus = adlp_check_mbus_joined(new_dbuf_state->active_pipes); + if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { + ret = intel_cdclk_state_set_joined_mbus(state, new_dbuf_state->joined_mbus); + if (ret) + return ret; + } + } + for_each_intel_crtc(&i915->drm, crtc) { enum pipe pipe = crtc->pipe; @@ -2628,13 +2636,6 @@ skl_compute_ddb(struct intel_atomic_state *state) if (ret) return ret; - if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { - /* TODO: Implement vblank synchronized MBUS joining changes */ - ret = intel_modeset_all_pipes_late(state, "MBUS joining change"); - if (ret) - return ret; - } - drm_dbg_kms(&i915->drm, "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n", old_dbuf_state->enabled_slices, @@ -3057,6 +3058,8 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915) if (HAS_MBUS_JOINING(i915)) dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN; + dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(i915, &i915->display.cdclk.hw); + for_each_intel_crtc(&i915->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); @@ -3530,85 +3533,6 @@ int intel_dbuf_init(struct drm_i915_private *i915) return 0; } -/* - * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before - * update the request state of all DBUS slices. - */ -static void update_mbus_pre_enable(struct intel_atomic_state *state) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - u32 mbus_ctl, dbuf_min_tracker_val; - enum dbuf_slice slice; - const struct intel_dbuf_state *dbuf_state = - intel_atomic_get_new_dbuf_state(state); - - if (!HAS_MBUS_JOINING(i915)) - return; - - /* - * TODO: Implement vblank synchronized MBUS joining changes. - * Must be properly coordinated with dbuf reprogramming. - */ - if (dbuf_state->joined_mbus) { - mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN | - MBUS_JOIN_PIPE_SELECT_NONE; - dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3); - } else { - mbus_ctl = MBUS_HASHING_MODE_2x2 | - MBUS_JOIN_PIPE_SELECT_NONE; - dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1); - } - - intel_de_rmw(i915, MBUS_CTL, - MBUS_HASHING_MODE_MASK | MBUS_JOIN | - MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl); - - for_each_dbuf_slice(i915, slice) - intel_de_rmw(i915, DBUF_CTL_S(slice), - DBUF_MIN_TRACKER_STATE_SERVICE_MASK, - dbuf_min_tracker_val); -} - -void intel_dbuf_pre_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - const struct intel_dbuf_state *new_dbuf_state = - intel_atomic_get_new_dbuf_state(state); - const struct intel_dbuf_state *old_dbuf_state = - intel_atomic_get_old_dbuf_state(state); - - if (!new_dbuf_state || - (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices && - new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)) - return; - - WARN_ON(!new_dbuf_state->base.changed); - - update_mbus_pre_enable(state); - gen9_dbuf_slices_update(i915, - old_dbuf_state->enabled_slices | - new_dbuf_state->enabled_slices); -} - -void intel_dbuf_post_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - const struct intel_dbuf_state *new_dbuf_state = - intel_atomic_get_new_dbuf_state(state); - const struct intel_dbuf_state *old_dbuf_state = - intel_atomic_get_old_dbuf_state(state); - - if (!new_dbuf_state || - (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices && - new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)) - return; - - WARN_ON(!new_dbuf_state->base.changed); - - gen9_dbuf_slices_update(i915, - new_dbuf_state->enabled_slices); -} - static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes) { switch (pipe) { @@ -3628,14 +3552,12 @@ static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes) return false; } -void intel_mbus_dbox_update(struct intel_atomic_state *state) +static void intel_mbus_dbox_update(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state; - const struct intel_crtc_state *new_crtc_state; const struct intel_crtc *crtc; u32 val = 0; - int i; if (DISPLAY_VER(i915) < 11) return; @@ -3679,12 +3601,9 @@ void intel_mbus_dbox_update(struct intel_atomic_state *state) val |= MBUS_DBOX_B_CREDIT(8); } - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, new_dbuf_state->active_pipes) { u32 pipe_val = val; - if (!new_crtc_state->hw.active) - continue; - if (DISPLAY_VER(i915) >= 14) { if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, new_dbuf_state->active_pipes)) @@ -3697,6 +3616,217 @@ void intel_mbus_dbox_update(struct intel_atomic_state *state) } } +int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state, + int ratio) +{ + struct intel_dbuf_state *dbuf_state; + + dbuf_state = intel_atomic_get_dbuf_state(state); + if (IS_ERR(dbuf_state)) + return PTR_ERR(dbuf_state); + + dbuf_state->mdclk_cdclk_ratio = ratio; + + return intel_atomic_lock_global_state(&dbuf_state->base); +} + +void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915, + int ratio, bool joined_mbus) +{ + enum dbuf_slice slice; + + if (!HAS_MBUS_JOINING(i915)) + return; + + if (DISPLAY_VER(i915) >= 20) + intel_de_rmw(i915, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK, + MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1)); + + if (joined_mbus) + ratio *= 2; + + drm_dbg_kms(&i915->drm, "Updating dbuf ratio to %d (mbus joined: %s)\n", + ratio, str_yes_no(joined_mbus)); + + for_each_dbuf_slice(i915, slice) + intel_de_rmw(i915, DBUF_CTL_S(slice), + DBUF_MIN_TRACKER_STATE_SERVICE_MASK, + DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1)); +} + +static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dbuf_state *old_dbuf_state = + intel_atomic_get_old_dbuf_state(state); + const struct intel_dbuf_state *new_dbuf_state = + intel_atomic_get_new_dbuf_state(state); + int mdclk_cdclk_ratio; + + if (intel_cdclk_is_decreasing_later(state)) { + /* cdclk/mdclk will be changed later by intel_set_cdclk_post_plane_update() */ + mdclk_cdclk_ratio = old_dbuf_state->mdclk_cdclk_ratio; + } else { + /* cdclk/mdclk already changed by intel_set_cdclk_pre_plane_update() */ + mdclk_cdclk_ratio = new_dbuf_state->mdclk_cdclk_ratio; + } + + intel_dbuf_mdclk_cdclk_ratio_update(i915, mdclk_cdclk_ratio, + new_dbuf_state->joined_mbus); +} + +static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state, + const struct intel_dbuf_state *dbuf_state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + enum pipe pipe = ffs(dbuf_state->active_pipes) - 1; + const struct intel_crtc_state *new_crtc_state; + struct intel_crtc *crtc; + + drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus); + drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes)); + + crtc = intel_crtc_for_pipe(i915, pipe); + new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + + if (new_crtc_state && !intel_crtc_needs_modeset(new_crtc_state)) + return pipe; + else + return INVALID_PIPE; +} + +static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state, + enum pipe pipe) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dbuf_state *old_dbuf_state = + intel_atomic_get_old_dbuf_state(state); + const struct intel_dbuf_state *new_dbuf_state = + intel_atomic_get_new_dbuf_state(state); + u32 mbus_ctl; + + drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n", + str_yes_no(old_dbuf_state->joined_mbus), + str_yes_no(new_dbuf_state->joined_mbus), + pipe != INVALID_PIPE ? pipe_name(pipe) : '*'); + + if (new_dbuf_state->joined_mbus) + mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN; + else + mbus_ctl = MBUS_HASHING_MODE_2x2; + + if (pipe != INVALID_PIPE) + mbus_ctl |= MBUS_JOIN_PIPE_SELECT(pipe); + else + mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE; + + intel_de_rmw(i915, MBUS_CTL, + MBUS_HASHING_MODE_MASK | MBUS_JOIN | + MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl); +} + +void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state) +{ + const struct intel_dbuf_state *new_dbuf_state = + intel_atomic_get_new_dbuf_state(state); + const struct intel_dbuf_state *old_dbuf_state = + intel_atomic_get_old_dbuf_state(state); + + if (!new_dbuf_state) + return; + + if (!old_dbuf_state->joined_mbus && new_dbuf_state->joined_mbus) { + enum pipe pipe = intel_mbus_joined_pipe(state, new_dbuf_state); + + WARN_ON(!new_dbuf_state->base.changed); + + intel_dbuf_mbus_join_update(state, pipe); + intel_mbus_dbox_update(state); + intel_dbuf_mdclk_min_tracker_update(state); + } +} + +void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dbuf_state *new_dbuf_state = + intel_atomic_get_new_dbuf_state(state); + const struct intel_dbuf_state *old_dbuf_state = + intel_atomic_get_old_dbuf_state(state); + + if (!new_dbuf_state) + return; + + if (old_dbuf_state->joined_mbus && !new_dbuf_state->joined_mbus) { + enum pipe pipe = intel_mbus_joined_pipe(state, old_dbuf_state); + + WARN_ON(!new_dbuf_state->base.changed); + + intel_dbuf_mdclk_min_tracker_update(state); + intel_mbus_dbox_update(state); + intel_dbuf_mbus_join_update(state, pipe); + + if (pipe != INVALID_PIPE) { + struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); + + intel_crtc_wait_for_next_vblank(crtc); + } + } else if (old_dbuf_state->joined_mbus == new_dbuf_state->joined_mbus && + old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) { + WARN_ON(!new_dbuf_state->base.changed); + + intel_dbuf_mdclk_min_tracker_update(state); + intel_mbus_dbox_update(state); + } + +} + +void intel_dbuf_pre_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dbuf_state *new_dbuf_state = + intel_atomic_get_new_dbuf_state(state); + const struct intel_dbuf_state *old_dbuf_state = + intel_atomic_get_old_dbuf_state(state); + u8 old_slices, new_slices; + + if (!new_dbuf_state) + return; + + old_slices = old_dbuf_state->enabled_slices; + new_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices; + + if (old_slices == new_slices) + return; + + WARN_ON(!new_dbuf_state->base.changed); + + gen9_dbuf_slices_update(i915, new_slices); +} + +void intel_dbuf_post_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dbuf_state *new_dbuf_state = + intel_atomic_get_new_dbuf_state(state); + const struct intel_dbuf_state *old_dbuf_state = + intel_atomic_get_old_dbuf_state(state); + u8 old_slices, new_slices; + + if (!new_dbuf_state) + return; + + old_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices; + new_slices = new_dbuf_state->enabled_slices; + + if (old_slices == new_slices) + return; + + WARN_ON(!new_dbuf_state->base.changed); + + gen9_dbuf_slices_update(i915, new_slices); +} + static int skl_watermark_ipc_status_show(struct seq_file *m, void *data) { struct drm_i915_private *i915 = m->private; diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h index e3d1d74a7b..91f92c0e70 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.h +++ b/drivers/gpu/drm/i915/display/skl_watermark.h @@ -25,6 +25,7 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state); void intel_sagv_post_plane_update(struct intel_atomic_state *state); bool intel_can_enable_sagv(struct drm_i915_private *i915, const struct intel_bw_state *bw_state); +bool intel_has_sagv(struct drm_i915_private *i915); u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915, const struct skl_ddb_entry *entry); @@ -58,22 +59,31 @@ struct intel_dbuf_state { u8 slices[I915_MAX_PIPES]; u8 enabled_slices; u8 active_pipes; + u8 mdclk_cdclk_ratio; bool joined_mbus; }; struct intel_dbuf_state * intel_atomic_get_dbuf_state(struct intel_atomic_state *state); -#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base) +#define to_intel_dbuf_state(global_state) \ + container_of_const((global_state), struct intel_dbuf_state, base) + #define intel_atomic_get_old_dbuf_state(state) \ to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj)) #define intel_atomic_get_new_dbuf_state(state) \ to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj)) int intel_dbuf_init(struct drm_i915_private *i915); +int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state, + int ratio); + void intel_dbuf_pre_plane_update(struct intel_atomic_state *state); void intel_dbuf_post_plane_update(struct intel_atomic_state *state); -void intel_mbus_dbox_update(struct intel_atomic_state *state); +void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915, + int ratio, bool joined_mbus); +void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state); +void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state); #endif /* __SKL_WATERMARK_H__ */ diff --git a/drivers/gpu/drm/i915/display/skl_watermark_regs.h b/drivers/gpu/drm/i915/display/skl_watermark_regs.h index 20b30c9a66..269163fa33 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark_regs.h +++ b/drivers/gpu/drm/i915/display/skl_watermark_regs.h @@ -32,14 +32,16 @@ #define MBUS_BBOX_CTL_S1 _MMIO(0x45040) #define MBUS_BBOX_CTL_S2 _MMIO(0x45044) -#define MBUS_CTL _MMIO(0x4438C) -#define MBUS_JOIN REG_BIT(31) -#define MBUS_HASHING_MODE_MASK REG_BIT(30) -#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0) -#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1) -#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26) -#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe) -#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7) +#define MBUS_CTL _MMIO(0x4438C) +#define MBUS_JOIN REG_BIT(31) +#define MBUS_HASHING_MODE_MASK REG_BIT(30) +#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0) +#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1) +#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26) +#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe) +#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7) +#define MBUS_TRANSLATION_THROTTLE_MIN_MASK REG_GENMASK(15, 13) +#define MBUS_TRANSLATION_THROTTLE_MIN(val) REG_FIELD_PREP(MBUS_TRANSLATION_THROTTLE_MIN_MASK, val) /* Watermark register definitions for SKL */ #define _CUR_WM_A_0 0x70140 diff --git a/drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h b/drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h new file mode 100644 index 0000000000..2b83f334b1 --- /dev/null +++ b/drivers/gpu/drm/i915/display/vlv_dpio_phy_regs.h @@ -0,0 +1,309 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __VLV_DPIO_PHY_REGS_H__ +#define __VLV_DPIO_PHY_REGS_H__ + +#include "intel_display_reg_defs.h" + +#define _VLV_CMN(dw) (0x8100 + (dw) * 4) +#define _CHV_CMN(cl, dw) (0x8100 - (cl) * 0x80 + (dw) * 4) +#define _VLV_PLL(ch, dw) (0x8000 + (ch) * 0x20 + (dw) * 4) /* dw 0-7,16-23 */ +#define _CHV_PLL(ch, dw) (0x8000 + (ch) * 0x180 + (dw) * 4) +#define _VLV_REF(dw) (0x80a0 + ((dw) - 8) * 4) /* dw 8-15 */ +#define _VLV_PCS(ch, spline, dw) (0x200 + (ch) * 0x2400 + (spline) * 0x200 + (dw) * 4) +#define _VLV_PCS_GRP(ch, dw) (0x8200 + (ch) * 0x200 + (dw) * 4) +#define _VLV_PCS_BCAST(dw) (0xc000 + (dw) * 4) +#define _VLV_TX(ch, lane, dw) (0x80 + (ch) * 0x2400 + (lane) * 0x200 + (dw) * 4) +#define _VLV_TX_GRP(ch, dw) (0x8280 + (ch) * 0x200 + (dw) * 4) +#define _VLV_TX_BCAST(dw) (0xc080 + (dw) * 4) + +/* + * Per pipe/PLL DPIO regs + */ +#define VLV_PLL_DW3(ch) _VLV_PLL((ch), 3) +#define DPIO_S1_DIV_MASK REG_GENMASK(30, 28) +#define DPIO_S1_DIV(s1) REG_FIELD_PREP(DPIO_S1_DIV_MASK, (s1)) +#define DPIO_S1_DIV_DAC 0 /* 10, DAC 25-225M rate */ +#define DPIO_S1_DIV_HDMIDP 1 /* 5, DAC 225-400M rate */ +#define DPIO_S1_DIV_LVDS1 2 /* 14 */ +#define DPIO_S1_DIV_LVDS2 3 /* 7 */ +#define DPIO_K_DIV_MASK REG_GENMASK(27, 24) +#define DPIO_K_DIV(k) REG_FIELD_PREP(DPIO_K_DIV_MASK, (k)) +#define DPIO_P1_DIV_MASK REG_GENMASK(23, 21) +#define DPIO_P1_DIV(p1) REG_FIELD_PREP(DPIO_P1_DIV_MASK, (p1)) +#define DPIO_P2_DIV_MASK REG_GENMASK(20, 16) +#define DPIO_P2_DIV(p2) REG_FIELD_PREP(DPIO_P2_DIV_MASK, (p2)) +#define DPIO_N_DIV_MASK REG_GENMASK(15, 12) +#define DPIO_N_DIV(n) REG_FIELD_PREP(DPIO_N_DIV_MASK, (n)) +#define DPIO_ENABLE_CALIBRATION REG_BIT(11) +#define DPIO_M1_DIV_MASK REG_GENMASK(10, 8) +#define DPIO_M1_DIV(m1) REG_FIELD_PREP(DPIO_M1_DIV_MASK, (m1)) +#define DPIO_M2_DIV_MASK REG_GENMASK(7, 0) +#define DPIO_M2_DIV(m2) REG_FIELD_PREP(DPIO_M2_DIV_MASK, (m2)) + +#define VLV_PLL_DW5(ch) _VLV_PLL((ch), 5) +#define DPIO_REFSEL_OVERRIDE REG_BIT(27) +#define DPIO_PLL_MODESEL_MASK REG_GENMASK(26, 24) +#define DPIO_BIAS_CURRENT_CTL_MASK REG_GENMASK(22, 20) /* always 0x7 */ +#define DPIO_PLL_REFCLK_SEL_MASK REG_GENMASK(17, 16) +#define DPIO_DRIVER_CTL_MASK REG_GENMASK(15, 12) /* always set to 0x8 */ +#define DPIO_CLK_BIAS_CTL_MASK REG_GENMASK(11, 8) /* always set to 0x5 */ + +#define VLV_PLL_DW7(ch) _VLV_PLL((ch), 7) + +#define VLV_PLL_DW16(ch) _VLV_PLL((ch), 16) + +#define VLV_PLL_DW17(ch) _VLV_PLL((ch), 17) + +#define VLV_PLL_DW18(ch) _VLV_PLL((ch), 18) + +#define VLV_PLL_DW19(ch) _VLV_PLL((ch), 19) + +#define VLV_REF_DW11 _VLV_REF(11) + +#define VLV_CMN_DW0 _VLV_CMN(0) + +/* + * Per DDI channel DPIO regs + */ +#define VLV_PCS_DW0_GRP(ch) _VLV_PCS_GRP((ch), 0) +#define VLV_PCS01_DW0(ch) _VLV_PCS((ch), 0, 0) +#define VLV_PCS23_DW0(ch) _VLV_PCS((ch), 1, 0) +#define DPIO_PCS_TX_LANE2_RESET REG_BIT(16) +#define DPIO_PCS_TX_LANE1_RESET REG_BIT(7) +#define DPIO_LEFT_TXFIFO_RST_MASTER2 REG_BIT(4) +#define DPIO_RIGHT_TXFIFO_RST_MASTER2 REG_BIT(3) + +#define VLV_PCS_DW1_GRP(ch) _VLV_PCS_GRP((ch), 1) +#define VLV_PCS01_DW1(ch) _VLV_PCS((ch), 0, 1) +#define VLV_PCS23_DW1(ch) _VLV_PCS((ch), 1, 1) +#define CHV_PCS_REQ_SOFTRESET_EN REG_BIT(23) +#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN REG_BIT(22) +#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN REG_BIT(21) +#define DPIO_PCS_CLK_DATAWIDTH_MASK REG_GENMASK(7, 6) +#define DPIO_PCS_CLK_DATAWIDTH_8_10 REG_FIELD_PREP(DPIO_PCS_CLK_DATAWIDTH_MASK, 1) +#define DPIO_PCS_CLK_DATAWIDTH_16_20 REG_FIELD_PREP(DPIO_PCS_CLK_DATAWIDTH_MASK, 2) +#define DPIO_PCS_CLK_DATAWIDTH_32_40 REG_FIELD_PREP(DPIO_PCS_CLK_DATAWIDTH_MASK, 3) +#define DPIO_PCS_CLK_SOFT_RESET REG_BIT(5) + +#define VLV_PCS_DW8_GRP(ch) _VLV_PCS_GRP((ch), 8) +#define VLV_PCS01_DW8(ch) _VLV_PCS((ch), 0, 8) +#define VLV_PCS23_DW8(ch) _VLV_PCS((ch), 1, 8) +#define DPIO_PCS_USEDCLKCHANNEL REG_BIT(21) +#define DPIO_PCS_USEDCLKCHANNEL_OVRRIDE REG_BIT(20) + +#define VLV_PCS_DW9_GRP(ch) _VLV_PCS_GRP((ch), 9) +#define VLV_PCS01_DW9(ch) _VLV_PCS((ch), 0, 9) +#define VLV_PCS23_DW9(ch) _VLV_PCS((ch), 1, 9) +#define DPIO_PCS_TX2MARGIN_MASK REG_GENMASK(15, 13) +#define DPIO_PCS_TX2MARGIN_000 REG_FIELD_PREP(DPIO_PCS_TX2MARGIN_MASK, 0) +#define DPIO_PCS_TX2MARGIN_101 REG_FIELD_PREP(DPIO_PCS_TX2MARGIN_MASK, 1) +#define DPIO_PCS_TX1MARGIN_MASK REG_GENMASK(12, 10) +#define DPIO_PCS_TX1MARGIN_000 REG_FIELD_PREP(DPIO_PCS_TX1MARGIN_MASK, 0) +#define DPIO_PCS_TX1MARGIN_101 REG_FIELD_PREP(DPIO_PCS_TX1MARGIN_MASK, 1) + +#define VLV_PCS_DW10_GRP(ch) _VLV_PCS_GRP((ch), 10) +#define VLV_PCS01_DW10(ch) _VLV_PCS((ch), 0, 10) +#define VLV_PCS23_DW10(ch) _VLV_PCS((ch), 1, 10) +#define DPIO_PCS_SWING_CALC_TX1_TX3 REG_BIT(31) +#define DPIO_PCS_SWING_CALC_TX0_TX2 REG_BIT(30) +#define DPIO_PCS_TX2DEEMP_MASK REG_GENMASK(27, 24) +#define DPIO_PCS_TX2DEEMP_9P5 REG_FIELD_PREP(DPIO_PCS_TX2DEEMP_MASK, 0) +#define DPIO_PCS_TX2DEEMP_6P0 REG_FIELD_PREP(DPIO_PCS_TX2DEEMP_MASK, 2) +#define DPIO_PCS_TX1DEEMP_MASK REG_GENMASK(19, 16) +#define DPIO_PCS_TX1DEEMP_9P5 REG_FIELD_PREP(DPIO_PCS_TX1DEEMP_MASK, 0) +#define DPIO_PCS_TX1DEEMP_6P0 REG_FIELD_PREP(DPIO_PCS_TX1DEEMP_MASK, 2) + +#define VLV_PCS_DW11_GRP(ch) _VLV_PCS_GRP((ch), 11) +#define VLV_PCS01_DW11(ch) _VLV_PCS((ch), 0, 11) +#define VLV_PCS23_DW11(ch) _VLV_PCS((ch), 1, 11) +#define DPIO_TX2_STAGGER_MASK_MASK REG_GENMASK(28, 24) +#define DPIO_TX2_STAGGER_MASK(x) REG_FIELD_PREP(DPIO_TX2_STAGGER_MASK_MASK, (x)) +#define DPIO_LANEDESKEW_STRAP_OVRD REG_BIT(3) +#define DPIO_LEFT_TXFIFO_RST_MASTER REG_BIT(1) +#define DPIO_RIGHT_TXFIFO_RST_MASTER REG_BIT(0) + +#define VLV_PCS_DW12_GRP(ch) _VLV_PCS_GRP((ch), 12) +#define VLV_PCS01_DW12(ch) _VLV_PCS((ch), 0, 12) +#define VLV_PCS23_DW12(ch) _VLV_PCS((ch), 1, 12) +#define DPIO_TX2_STAGGER_MULT_MASK REG_GENMASK(22, 20) +#define DPIO_TX2_STAGGER_MULT(x) REG_FIELD_PREP(DPIO_TX2_STAGGER_MULT_MASK, (x)) +#define DPIO_TX1_STAGGER_MULT_MASK REG_GENMASK(20, 16) +#define DPIO_TX1_STAGGER_MULT(x) REG_FIELD_PREP(DPIO_TX1_STAGGER_MULT_MASK, (x)) +#define DPIO_TX1_STAGGER_MASK_MASK REG_GENMASK(12, 8) +#define DPIO_TX1_STAGGER_MASK(x) REG_FIELD_PREP(DPIO_TX1_STAGGER_MASK_MASK, (x)) +#define DPIO_LANESTAGGER_STRAP_OVRD REG_BIT(6) +#define DPIO_LANESTAGGER_STRAP_MASK REG_GENMASK(4, 0) +#define DPIO_LANESTAGGER_STRAP(x) REG_FIELD_PREP(DPIO_LANESTAGGER_STRAP_MASK, (x)) + +#define VLV_PCS_DW14_GRP(ch) _VLV_PCS_GRP((ch), 14) +#define VLV_PCS01_DW14(ch) _VLV_PCS((ch), 0, 14) +#define VLV_PCS23_DW14(ch) _VLV_PCS((ch), 1, 14) + +#define VLV_PCS_DW17_BCAST _VLV_PCS_BCAST(17) +#define VLV_PCS_DW17_GRP(ch) _VLV_PCS_GRP((ch), 17) +#define VLV_PCS01_DW17(ch) _VLV_PCS((ch), 0, 17) +#define VLV_PCS23_DW17(ch) _VLV_PCS((ch), 1, 17) + +#define VLV_PCS_DW23_GRP(ch) _VLV_PCS_GRP((ch), 23) +#define VLV_PCS01_DW23(ch) _VLV_PCS((ch), 0, 23) +#define VLV_PCS23_DW23(ch) _VLV_PCS((ch), 1, 23) + +#define VLV_TX_DW2_GRP(ch) _VLV_TX_GRP((ch), 2) +#define VLV_TX_DW2(ch, lane) _VLV_TX((ch), (lane), 2) +#define DPIO_SWING_MARGIN000_MASK REG_GENMASK(23, 16) +#define DPIO_SWING_MARGIN000(x) REG_FIELD_PREP(DPIO_SWING_MARGIN000_MASK, (x)) +#define DPIO_UNIQ_TRANS_SCALE_MASK REG_GENMASK(15, 8) +#define DPIO_UNIQ_TRANS_SCALE(x) REG_FIELD_PREP(DPIO_UNIQ_TRANS_SCALE_MASK, (x)) + +#define VLV_TX_DW3_GRP(ch) _VLV_TX_GRP((ch), 3) +#define VLV_TX_DW3(ch, lane) _VLV_TX((ch), (lane), 3) +/* The following bit for CHV phy */ +#define DPIO_TX_UNIQ_TRANS_SCALE_EN REG_BIT(27) +#define DPIO_SWING_MARGIN101_MASK REG_GENMASK(23, 16) +#define DPIO_SWING_MARGIN101(x) REG_FIELD_PREP(DPIO_SWING_MARGIN101_MASK, (x)) + +#define VLV_TX_DW4_GRP(ch) _VLV_TX_GRP((ch), 4) +#define VLV_TX_DW4(ch, lane) _VLV_TX((ch), (lane), 4) +#define DPIO_SWING_DEEMPH9P5_MASK REG_GENMASK(31, 24) +#define DPIO_SWING_DEEMPH9P5(x) REG_FIELD_PREP(DPIO_SWING_DEEMPH9P5_MASK, (x)) +#define DPIO_SWING_DEEMPH6P0_MASK REG_GENMASK(23, 16) +#define DPIO_SWING_DEEMPH6P0_SHIFT REG_FIELD_PREP(DPIO_SWING_DEEMPH6P0_MASK, (x)) + +#define VLV_TX_DW5_GRP(ch) _VLV_TX_GRP((ch), 5) +#define VLV_TX_DW5(ch, lane) _VLV_TX((ch), (lane), 5) +#define DPIO_TX_OCALINIT_EN REG_BIT(31) + +#define VLV_TX_DW11_GRP(ch) _VLV_TX_GRP((ch), 11) +#define VLV_TX_DW11(ch, lane) _VLV_TX((ch), (lane), 11) + +#define VLV_TX_DW14_GRP(ch) _VLV_TX_GRP((ch), 14) +#define VLV_TX_DW14(ch, lane) _VLV_TX((ch), (lane), 14) + +/* CHV dpPhy registers */ +#define CHV_PLL_DW0(ch) _CHV_PLL((ch), 0) +#define DPIO_CHV_M2_DIV_MASK REG_GENMASK(7, 0) +#define DPIO_CHV_M2_DIV(m2) REG_FIELD_PREP(DPIO_CHV_M2_DIV_MASK, (m2)) + +#define CHV_PLL_DW1(ch) _CHV_PLL((ch), 1) +#define DPIO_CHV_N_DIV_MASK REG_GENMASK(11, 8) +#define DPIO_CHV_N_DIV(n) REG_FIELD_PREP(DPIO_CHV_N_DIV_MASK, (n)) +#define DPIO_CHV_M1_DIV_MASK REG_GENMASK(2, 0) +#define DPIO_CHV_M1_DIV(m1) REG_FIELD_PREP(DPIO_CHV_M1_DIV_MASK, (m1)) +#define DPIO_CHV_M1_DIV_BY_2 0 + +#define CHV_PLL_DW2(ch) _CHV_PLL((ch), 2) +#define DPIO_CHV_M2_FRAC_DIV_MASK REG_GENMASK(21, 0) +#define DPIO_CHV_M2_FRAC_DIV(m2_frac) REG_FIELD_PREP(DPIO_CHV_M2_FRAC_DIV_MASK, (m2_frac)) + +#define CHV_PLL_DW3(ch) _CHV_PLL((ch), 3) +#define DPIO_CHV_FRAC_DIV_EN REG_BIT(16) +#define DPIO_CHV_SECOND_MOD REG_BIT(8) +#define DPIO_CHV_FEEDFWD_GAIN_MASK REG_GENMASK(3, 0) +#define DPIO_CHV_FEEDFWD_GAIN(x) REG_FIELD_PREP(DPIO_CHV_FEEDFWD_GAIN_MASK, (x)) + +#define CHV_PLL_DW6(ch) _CHV_PLL((ch), 6) +#define DPIO_CHV_GAIN_CTRL_MASK REG_GENMASK(18, 16) +#define DPIO_CHV_GAIN_CTRL(x) REG_FIELD_PREP(DPIO_CHV_GAIN_CTRL_MASK, (x)) +#define DPIO_CHV_INT_COEFF_MASK REG_GENMASK(12, 8) +#define DPIO_CHV_INT_COEFF(x) REG_FIELD_PREP(DPIO_CHV_INT_COEFF_MASK, (x)) +#define DPIO_CHV_PROP_COEFF_MASK REG_GENMASK(3, 0) +#define DPIO_CHV_PROP_COEFF(x) REG_FIELD_PREP(DPIO_CHV_PROP_COEFF_MASK, (x)) + +#define CHV_PLL_DW8(ch) _CHV_PLL((ch), 8) +#define DPIO_CHV_TDC_TARGET_CNT_MASK REG_GENMASK(9, 0) +#define DPIO_CHV_TDC_TARGET_CNT(x) REG_FIELD_PREP(DPIO_CHV_TDC_TARGET_CNT_MASK, (x)) + +#define CHV_PLL_DW9(ch) _CHV_PLL((ch), 9) +#define DPIO_CHV_INT_LOCK_THRESHOLD_MASK REG_GENMASK(3, 1) +#define DPIO_CHV_INT_LOCK_THRESHOLD(x) REG_FIELD_PREP(DPIO_CHV_INT_LOCK_THRESHOLD_MASK, (x)) +#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE REG_BIT(0) /* 1: coarse & 0 : fine */ + +#define CHV_CMN_DW0_CH0 _CHV_CMN(0, 0) +#define DPIO_ALLDL_POWERDOWN_CH0 REG_BIT(19) +#define DPIO_ANYDL_POWERDOWN_CH0 REG_BIT(18) +#define DPIO_ALLDL_POWERDOWN BIT(1) +#define DPIO_ANYDL_POWERDOWN BIT(0) + +#define CHV_CMN_DW5_CH0 _CHV_CMN(0, 5) +#define CHV_BUFRIGHTENA1_MASK REG_GENMASK(21, 20) +#define CHV_BUFRIGHTENA1_DISABLE REG_FIELD_PREP(CHV_BUFRIGHTENA1_MASK, 0) +#define CHV_BUFRIGHTENA1_NORMAL REG_FIELD_PREP(CHV_BUFRIGHTENA1_MASK, 1) +#define CHV_BUFRIGHTENA1_FORCE REG_FIELD_PREP(CHV_BUFRIGHTENA1_MASK, 3) +#define CHV_BUFLEFTENA1_MASK REG_GENMASK(23, 22) +#define CHV_BUFLEFTENA1_DISABLE REG_FIELD_PREP(CHV_BUFLEFTENA1_MASK, 0) +#define CHV_BUFLEFTENA1_NORMAL REG_FIELD_PREP(CHV_BUFLEFTENA1_MASK, 1) +#define CHV_BUFLEFTENA1_FORCE REG_FIELD_PREP(CHV_BUFLEFTENA1_MASK, 3) + +#define CHV_CMN_DW13_CH0 _CHV_CMN(0, 13) +#define CHV_CMN_DW0_CH1 _CHV_CMN(1, 0) +#define DPIO_CHV_S1_DIV_MASK REG_GENMASK(23, 21) +#define DPIO_CHV_S1_DIV(s1) REG_FIELD_PREP(DPIO_CHV_S1_DIV_MASK, (s1)) +#define DPIO_CHV_P1_DIV_MASK REG_GENMASK(15, 13) +#define DPIO_CHV_P1_DIV(p1) REG_FIELD_PREP(DPIO_CHV_P1_DIV_MASK, (p1)) +#define DPIO_CHV_P2_DIV_MASK REG_GENMASK(12, 8) +#define DPIO_CHV_P2_DIV(p2) REG_FIELD_PREP(DPIO_CHV_P2_DIV_MASK, (p2)) +#define DPIO_CHV_K_DIV_MASK REG_GENMASK(7, 4) +#define DPIO_CHV_K_DIV(k) REG_FIELD_PREP(DPIO_CHV_K_DIV_MASK, (k)) +#define DPIO_PLL_FREQLOCK REG_BIT(1) +#define DPIO_PLL_LOCK REG_BIT(0) +#define CHV_CMN_DW13(ch) _PIPE(ch, CHV_CMN_DW13_CH0, CHV_CMN_DW0_CH1) + +#define CHV_CMN_DW14_CH0 _CHV_CMN(0, 14) +#define CHV_CMN_DW1_CH1 _CHV_CMN(1, 1) +#define DPIO_AFC_RECAL REG_BIT(14) +#define DPIO_DCLKP_EN REG_BIT(13) +#define CHV_BUFLEFTENA2_MASK REG_GENMASK(18, 17) /* CL2 DW1 only */ +#define CHV_BUFLEFTENA2_DISABLE REG_FIELD_PREP(CHV_BUFLEFTENA2_MASK, 0) +#define CHV_BUFLEFTENA2_NORMAL REG_FIELD_PREP(CHV_BUFLEFTENA2_MASK, 1) +#define CHV_BUFLEFTENA2_FORCE REG_FIELD_PREP(CHV_BUFLEFTENA2_MASK, 3) +#define CHV_BUFRIGHTENA2_MASK REG_GENMASK(20, 19) /* CL2 DW1 only */ +#define CHV_BUFRIGHTENA2_DISABLE REG_FIELD_PREP(CHV_BUFRIGHTENA2_MASK, 0) +#define CHV_BUFRIGHTENA2_NORMAL REG_FIELD_PREP(CHV_BUFRIGHTENA2_MASK, 1) +#define CHV_BUFRIGHTENA2_FORCE REG_FIELD_PREP(CHV_BUFRIGHTENA2_MASK, 3) +#define CHV_CMN_DW14(ch) _PIPE(ch, CHV_CMN_DW14_CH0, CHV_CMN_DW1_CH1) + +#define CHV_CMN_DW19_CH0 _CHV_CMN(0, 19) +#define CHV_CMN_DW6_CH1 _CHV_CMN(1, 6) +#define DPIO_ALLDL_POWERDOWN_CH1 REG_BIT(30) /* CL2 DW6 only */ +#define DPIO_ANYDL_POWERDOWN_CH1 REG_BIT(29) /* CL2 DW6 only */ +#define DPIO_DYNPWRDOWNEN_CH1 REG_BIT(28) /* CL2 DW6 only */ +#define CHV_CMN_USEDCLKCHANNEL REG_BIT(13) +#define CHV_CMN_DW19(ch) _PIPE(ch, CHV_CMN_DW19_CH0, CHV_CMN_DW6_CH1) + +#define CHV_CMN_DW28 _CHV_CMN(0, 28) +#define DPIO_CL1POWERDOWNEN REG_BIT(23) +#define DPIO_DYNPWRDOWNEN_CH0 REG_BIT(22) +#define DPIO_SUS_CLK_CONFIG_MASK REG_GENMASK(1, 0) +#define DPIO_SUS_CLK_CONFIG_ON REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 0) +#define DPIO_SUS_CLK_CONFIG_CLKREQ REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 1) +#define DPIO_SUS_CLK_CONFIG_GATE REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 2) +#define DPIO_SUS_CLK_CONFIG_GATE_CLKREQ REG_FIELD_PREP(DPIO_SUS_CLK_CONFIG_MASK, 3) + +#define CHV_CMN_DW30 _CHV_CMN(0, 30) +#define DPIO_CL2_LDOFUSE_PWRENB REG_BIT(6) +#define DPIO_LRC_BYPASS REG_BIT(3) + +#define CHV_TX_DW0(ch, lane) _VLV_TX((ch), (lane), 0) +#define CHV_TX_DW1(ch, lane) _VLV_TX((ch), (lane), 1) +#define CHV_TX_DW2(ch, lane) _VLV_TX((ch), (lane), 2) +#define CHV_TX_DW3(ch, lane) _VLV_TX((ch), (lane), 3) +#define CHV_TX_DW4(ch, lane) _VLV_TX((ch), (lane), 4) +#define CHV_TX_DW5(ch, lane) _VLV_TX((ch), (lane), 5) +#define CHV_TX_DW6(ch, lane) _VLV_TX((ch), (lane), 6) +#define CHV_TX_DW7(ch, lane) _VLV_TX((ch), (lane), 7) +#define CHV_TX_DW8(ch, lane) _VLV_TX((ch), (lane), 8) +#define CHV_TX_DW9(ch, lane) _VLV_TX((ch), (lane), 9) +#define CHV_TX_DW10(ch, lane) _VLV_TX((ch), (lane), 10) + +#define CHV_TX_DW11(ch, lane) _VLV_TX((ch), (lane), 11) +#define DPIO_FRC_LATENCY_MASK REG_GENMASK(10, 8) +#define DPIO_FRC_LATENCY(x) REG_FIELD_PREP(DPIO_FRC_LATENCY_MASK, (x)) + +#define CHV_TX_DW14(ch, lane) _VLV_TX((ch), (lane), 14) +#define DPIO_UPAR REG_BIT(30) + +#endif /* __VLV_DPIO_PHY_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 9b33b8a74d..ee9923c7b1 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -85,20 +85,18 @@ enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt) void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port) { - struct drm_encoder *encoder = &intel_dsi->base.base; - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); u32 mask; mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY | LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY; - if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port), + if (intel_de_wait_for_set(display, MIPI_GEN_FIFO_STAT(display, port), mask, 100)) - drm_err(&dev_priv->drm, "DPI FIFOs are not empty\n"); + drm_err(display->drm, "DPI FIFOs are not empty\n"); } -static void write_data(struct drm_i915_private *dev_priv, +static void write_data(struct intel_display *display, i915_reg_t reg, const u8 *data, u32 len) { @@ -110,18 +108,18 @@ static void write_data(struct drm_i915_private *dev_priv, for (j = 0; j < min_t(u32, len - i, 4); j++) val |= *data++ << 8 * j; - intel_de_write(dev_priv, reg, val); + intel_de_write(display, reg, val); } } -static void read_data(struct drm_i915_private *dev_priv, +static void read_data(struct intel_display *display, i915_reg_t reg, u8 *data, u32 len) { u32 i, j; for (i = 0; i < len; i += 4) { - u32 val = intel_de_read(dev_priv, reg); + u32 val = intel_de_read(display, reg); for (j = 0; j < min_t(u32, len - i, 4); j++) *data++ = val >> 8 * j; @@ -132,8 +130,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host); - struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_dsi *intel_dsi = intel_dsi_host->intel_dsi; + struct intel_display *display = to_intel_display(&intel_dsi->base); enum port port = intel_dsi_host->port; struct mipi_dsi_packet packet; ssize_t ret; @@ -148,51 +146,51 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, header = packet.header; if (msg->flags & MIPI_DSI_MSG_USE_LPM) { - data_reg = MIPI_LP_GEN_DATA(port); + data_reg = MIPI_LP_GEN_DATA(display, port); data_mask = LP_DATA_FIFO_FULL; - ctrl_reg = MIPI_LP_GEN_CTRL(port); + ctrl_reg = MIPI_LP_GEN_CTRL(display, port); ctrl_mask = LP_CTRL_FIFO_FULL; } else { - data_reg = MIPI_HS_GEN_DATA(port); + data_reg = MIPI_HS_GEN_DATA(display, port); data_mask = HS_DATA_FIFO_FULL; - ctrl_reg = MIPI_HS_GEN_CTRL(port); + ctrl_reg = MIPI_HS_GEN_CTRL(display, port); ctrl_mask = HS_CTRL_FIFO_FULL; } /* note: this is never true for reads */ if (packet.payload_length) { - if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port), + if (intel_de_wait_for_clear(display, MIPI_GEN_FIFO_STAT(display, port), data_mask, 50)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Timeout waiting for HS/LP DATA FIFO !full\n"); - write_data(dev_priv, data_reg, packet.payload, + write_data(display, data_reg, packet.payload, packet.payload_length); } if (msg->rx_len) { - intel_de_write(dev_priv, MIPI_INTR_STAT(port), + intel_de_write(display, MIPI_INTR_STAT(display, port), GEN_READ_DATA_AVAIL); } - if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port), + if (intel_de_wait_for_clear(display, MIPI_GEN_FIFO_STAT(display, port), ctrl_mask, 50)) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Timeout waiting for HS/LP CTRL FIFO !full\n"); } - intel_de_write(dev_priv, ctrl_reg, + intel_de_write(display, ctrl_reg, header[2] << 16 | header[1] << 8 | header[0]); /* ->rx_len is set only for reads */ if (msg->rx_len) { data_mask = GEN_READ_DATA_AVAIL; - if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), + if (intel_de_wait_for_set(display, MIPI_INTR_STAT(display, port), data_mask, 50)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Timeout waiting for read data.\n"); - read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len); + read_data(display, data_reg, msg->rx_buf, msg->rx_len); } /* XXX: fix for reads and writes */ @@ -225,9 +223,7 @@ static const struct mipi_dsi_host_ops intel_dsi_host_ops = { static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs, enum port port) { - struct drm_encoder *encoder = &intel_dsi->base.base; - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); u32 mask; /* XXX: pipe, hs */ @@ -237,18 +233,18 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs, cmd |= DPI_LP_MODE; /* clear bit */ - intel_de_write(dev_priv, MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT); + intel_de_write(display, MIPI_INTR_STAT(display, port), SPL_PKT_SENT_INTERRUPT); /* XXX: old code skips write if control unchanged */ - if (cmd == intel_de_read(dev_priv, MIPI_DPI_CONTROL(port))) - drm_dbg_kms(&dev_priv->drm, + if (cmd == intel_de_read(display, MIPI_DPI_CONTROL(display, port))) + drm_dbg_kms(display->drm, "Same special packet %02x twice in a row.\n", cmd); - intel_de_write(dev_priv, MIPI_DPI_CONTROL(port), cmd); + intel_de_write(display, MIPI_DPI_CONTROL(display, port), cmd); mask = SPL_PKT_SENT_INTERRUPT; - if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100)) - drm_err(&dev_priv->drm, + if (intel_de_wait_for_set(display, MIPI_INTR_STAT(display, port), mask, 100)) + drm_err(display->drm, "Video mode command 0x%08x send failed.\n", cmd); return 0; @@ -273,8 +269,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, - base); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_connector *intel_connector = intel_dsi->attached_connector; struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int ret; @@ -329,7 +324,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, static bool glk_dsi_enable_io(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; bool cold_boot = false; @@ -339,29 +334,30 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder) * Power ON MIPI IO first and then write into IO reset and LP wake bits */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, MIPI_CTRL(port), 0, GLK_MIPIIO_ENABLE); + intel_de_rmw(display, MIPI_CTRL(display, port), 0, GLK_MIPIIO_ENABLE); /* Put the IO into reset */ - intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0); + intel_de_rmw(display, MIPI_CTRL(display, PORT_A), GLK_MIPIIO_RESET_RELEASED, 0); /* Program LP Wake */ for_each_dsi_port(port, intel_dsi->ports) { - u32 tmp = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); - intel_de_rmw(dev_priv, MIPI_CTRL(port), + u32 tmp = intel_de_read(display, MIPI_DEVICE_READY(display, port)); + + intel_de_rmw(display, MIPI_CTRL(display, port), GLK_LP_WAKE, (tmp & DEVICE_READY) ? GLK_LP_WAKE : 0); } /* Wait for Pwr ACK */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port), + if (intel_de_wait_for_set(display, MIPI_CTRL(display, port), GLK_MIPIIO_PORT_POWERED, 20)) - drm_err(&dev_priv->drm, "MIPIO port is powergated\n"); + drm_err(display->drm, "MIPIO port is powergated\n"); } /* Check for cold boot scenario */ for_each_dsi_port(port, intel_dsi->ports) { cold_boot |= - !(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY); + !(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY); } return cold_boot; @@ -369,99 +365,100 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder) static void glk_dsi_device_ready(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; /* Wait for MIPI PHY status bit to set */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port), + if (intel_de_wait_for_set(display, MIPI_CTRL(display, port), GLK_PHY_STATUS_PORT_READY, 20)) - drm_err(&dev_priv->drm, "PHY is not ON\n"); + drm_err(display->drm, "PHY is not ON\n"); } /* Get IO out of reset */ - intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), 0, GLK_MIPIIO_RESET_RELEASED); + intel_de_rmw(display, MIPI_CTRL(display, PORT_A), 0, GLK_MIPIIO_RESET_RELEASED); /* Get IO out of Low power state*/ for_each_dsi_port(port, intel_dsi->ports) { - if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) { - intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), + if (!(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY)) { + intel_de_rmw(display, MIPI_DEVICE_READY(display, port), ULPS_STATE_MASK, DEVICE_READY); usleep_range(10, 15); } else { /* Enter ULPS */ - intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), + intel_de_rmw(display, MIPI_DEVICE_READY(display, port), ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY); /* Wait for ULPS active */ - if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port), + if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port), GLK_ULPS_NOT_ACTIVE, 20)) - drm_err(&dev_priv->drm, "ULPS not active\n"); + drm_err(display->drm, "ULPS not active\n"); /* Exit ULPS */ - intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), + intel_de_rmw(display, MIPI_DEVICE_READY(display, port), ULPS_STATE_MASK, ULPS_STATE_EXIT | DEVICE_READY); /* Enter Normal Mode */ - intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), + intel_de_rmw(display, MIPI_DEVICE_READY(display, port), ULPS_STATE_MASK, ULPS_STATE_NORMAL_OPERATION | DEVICE_READY); - intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_LP_WAKE, 0); + intel_de_rmw(display, MIPI_CTRL(display, port), GLK_LP_WAKE, 0); } } /* Wait for Stop state */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port), + if (intel_de_wait_for_set(display, MIPI_CTRL(display, port), GLK_DATA_LANE_STOP_STATE, 20)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Date lane not in STOP state\n"); } /* Wait for AFE LATCH */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port), + if (intel_de_wait_for_set(display, BXT_MIPI_PORT_CTRL(port), AFE_LATCHOUT, 20)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "D-PHY not entering LP-11 state\n"); } } static void bxt_dsi_device_ready(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); /* Enable MIPI PHY transparent latch */ for_each_dsi_port(port, intel_dsi->ports) { - intel_de_rmw(dev_priv, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD); + intel_de_rmw(display, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD); usleep_range(2000, 2500); } /* Clear ULPS and set device ready */ for_each_dsi_port(port, intel_dsi->ports) { - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); + val = intel_de_read(display, MIPI_DEVICE_READY(display, port)); val &= ~ULPS_STATE_MASK; - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); + intel_de_write(display, MIPI_DEVICE_READY(display, port), val); usleep_range(2000, 2500); val |= DEVICE_READY; - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); + intel_de_write(display, MIPI_DEVICE_READY(display, port), val); } } static void vlv_dsi_device_ready(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); vlv_flisdsi_get(dev_priv); /* program rcomp for compliance, reduce from 50 ohms to 45 ohms @@ -474,7 +471,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder) for_each_dsi_port(port, intel_dsi->ports) { - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), + intel_de_write(display, MIPI_DEVICE_READY(display, port), ULPS_STATE_ENTER); usleep_range(2500, 3000); @@ -482,14 +479,14 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder) * Common bit for both MIPI Port A & MIPI Port C * No similar bit in MIPI Port C reg */ - intel_de_rmw(dev_priv, MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD); + intel_de_rmw(display, VLV_MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD); usleep_range(1000, 1500); - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), + intel_de_write(display, MIPI_DEVICE_READY(display, port), ULPS_STATE_EXIT); usleep_range(2500, 3000); - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), + intel_de_write(display, MIPI_DEVICE_READY(display, port), DEVICE_READY); usleep_range(2500, 3000); } @@ -509,50 +506,50 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; /* Enter ULPS */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), + intel_de_rmw(display, MIPI_DEVICE_READY(display, port), ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY); /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port), + if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port), GLK_PHY_STATUS_PORT_READY, 20)) - drm_err(&dev_priv->drm, "PHY is not turning OFF\n"); + drm_err(display->drm, "PHY is not turning OFF\n"); } /* Wait for Pwr ACK bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port), + if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port), GLK_MIPIIO_PORT_POWERED, 20)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "MIPI IO Port is not powergated\n"); } } static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; /* Put the IO into reset */ - intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0); + intel_de_rmw(display, MIPI_CTRL(display, PORT_A), GLK_MIPIIO_RESET_RELEASED, 0); /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port), + if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port), GLK_PHY_STATUS_PORT_READY, 20)) - drm_err(&dev_priv->drm, "PHY is not turning OFF\n"); + drm_err(display->drm, "PHY is not turning OFF\n"); } /* Clear MIPI mode */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_ENABLE, 0); + intel_de_rmw(display, MIPI_CTRL(display, port), GLK_MIPIIO_ENABLE, 0); } static void glk_dsi_clear_device_ready(struct intel_encoder *encoder) @@ -564,30 +561,31 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder) static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port) { return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ? - BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); + BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(port); } static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); for_each_dsi_port(port, intel_dsi->ports) { /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ? - BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A); + BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(PORT_A); - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), + intel_de_write(display, MIPI_DEVICE_READY(display, port), DEVICE_READY | ULPS_STATE_ENTER); usleep_range(2000, 2500); - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), + intel_de_write(display, MIPI_DEVICE_READY(display, port), DEVICE_READY | ULPS_STATE_EXIT); usleep_range(2000, 2500); - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), + intel_de_write(display, MIPI_DEVICE_READY(display, port), DEVICE_READY | ULPS_STATE_ENTER); usleep_range(2000, 2500); @@ -596,15 +594,15 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) * Port A only. MIPI Port C has no similar bit for checking. */ if ((IS_BROXTON(dev_priv) || port == PORT_A) && - intel_de_wait_for_clear(dev_priv, port_ctrl, + intel_de_wait_for_clear(display, port_ctrl, AFE_LATCHOUT, 30)) - drm_err(&dev_priv->drm, "DSI LP not going Low\n"); + drm_err(display->drm, "DSI LP not going Low\n"); /* Disable MIPI PHY transparent latch */ - intel_de_rmw(dev_priv, port_ctrl, LP_OUTPUT_HOLD, 0); + intel_de_rmw(display, port_ctrl, LP_OUTPUT_HOLD, 0); usleep_range(1000, 1500); - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00); + intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x00); usleep_range(2000, 2500); } } @@ -612,6 +610,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) static void intel_dsi_port_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); @@ -622,11 +621,11 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, MIPI_CTRL(port), + intel_de_rmw(display, MIPI_CTRL(display, port), BXT_PIXEL_OVERLAP_CNT_MASK, temp << BXT_PIXEL_OVERLAP_CNT_SHIFT); } else { - intel_de_rmw(dev_priv, VLV_CHICKEN_3, + intel_de_rmw(display, VLV_CHICKEN_3, PIXEL_OVERLAP_CNT_MASK, temp << PIXEL_OVERLAP_CNT_SHIFT); } @@ -636,7 +635,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port); u32 temp; - temp = intel_de_read(dev_priv, port_ctrl); + temp = intel_de_read(display, port_ctrl); temp &= ~LANE_CONFIGURATION_MASK; temp &= ~DUAL_LINK_MODE_MASK; @@ -656,15 +655,15 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, temp |= DITHERING_ENABLE; /* assert ip_tg_enable signal */ - intel_de_write(dev_priv, port_ctrl, temp | DPI_ENABLE); - intel_de_posting_read(dev_priv, port_ctrl); + intel_de_write(display, port_ctrl, temp | DPI_ENABLE); + intel_de_posting_read(display, port_ctrl); } } static void intel_dsi_port_disable(struct intel_encoder *encoder) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(encoder); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; @@ -672,11 +671,12 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder) i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port); /* de-assert ip_tg_enable signal */ - intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0); - intel_de_posting_read(dev_priv, port_ctrl); + intel_de_rmw(display, port_ctrl, DPI_ENABLE, 0); + intel_de_posting_read(display, port_ctrl); } } -static void intel_dsi_prepare(struct intel_encoder *intel_encoder, + +static void intel_dsi_prepare(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config); static void intel_dsi_unprepare(struct intel_encoder *encoder); @@ -726,6 +726,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -733,7 +734,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, enum port port; bool glk_cold_boot = false; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); intel_dsi_wait_panel_power_cycle(intel_dsi); @@ -753,16 +754,16 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, if (IS_BROXTON(dev_priv)) { /* Add MIPI IO reset programming for modeset */ - intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL); + intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL); /* Power up DSI regulator */ - intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT); - intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL, 0); + intel_de_write(display, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT); + intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL, 0); } if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { /* Disable DPOunit clock gating, can stall pipe */ - intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), + intel_de_rmw(display, DSPCLK_GATE_D(dev_priv), 0, DPOUNIT_CLOCK_GATE_DISABLE); } @@ -798,8 +799,8 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, */ if (is_cmd_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) - intel_de_write(dev_priv, - MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4); + intel_de_write(display, + MIPI_MAX_RETURN_PKT_SIZE(display, port), 8 * 4); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_ON); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON); } else { @@ -871,11 +872,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { intel_crtc_vblank_off(old_crtc_state); @@ -906,12 +908,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, if (IS_BROXTON(dev_priv)) { /* Power down DSI regulator to save power */ - intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT); - intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL, + intel_de_write(display, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT); + intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL, HS_IO_CTRL_SELECT); /* Add MIPI IO reset programming for modeset */ - intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0); + intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0); } if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { @@ -919,7 +921,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, } else { vlv_dsi_pll_disable(encoder); - intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), + intel_de_rmw(display, DSPCLK_GATE_D(dev_priv), DPOUNIT_CLOCK_GATE_DISABLE, 0); } @@ -935,13 +937,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_wakeref_t wakeref; enum port port; bool active = false; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); wakeref = intel_display_power_get_if_enabled(dev_priv, encoder->power_domain); @@ -960,7 +963,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, /* XXX: this only works for one DSI output */ for_each_dsi_port(port, intel_dsi->ports) { i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port); - bool enabled = intel_de_read(dev_priv, port_ctrl) & DPI_ENABLE; + bool enabled = intel_de_read(display, port_ctrl) & DPI_ENABLE; /* * Due to some hardware limitations on VLV/CHV, the DPI enable @@ -969,27 +972,27 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, */ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && port == PORT_C) - enabled = intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE; + enabled = intel_de_read(display, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE; /* Try command mode if video mode not enabled */ if (!enabled) { - u32 tmp = intel_de_read(dev_priv, - MIPI_DSI_FUNC_PRG(port)); + u32 tmp = intel_de_read(display, + MIPI_DSI_FUNC_PRG(display, port)); enabled = tmp & CMD_MODE_DATA_WIDTH_MASK; } if (!enabled) continue; - if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) + if (!(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY)) continue; if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { - u32 tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); + u32 tmp = intel_de_read(display, MIPI_CTRL(display, port)); tmp &= BXT_PIPE_SELECT_MASK; tmp >>= BXT_PIPE_SELECT_SHIFT; - if (drm_WARN_ON(&dev_priv->drm, tmp > PIPE_C)) + if (drm_WARN_ON(display->drm, tmp > PIPE_C)) continue; *pipe = tmp; @@ -1010,8 +1013,7 @@ out_put_power: static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(encoder); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct drm_display_mode *adjusted_mode_sw; @@ -1033,11 +1035,11 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, * encoder->get_hw_state() returns true. */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE) + if (intel_de_read(display, BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE) break; } - fmt = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK; + fmt = intel_de_read(display, MIPI_DSI_FUNC_PRG(display, port)) & VID_MODE_FORMAT_MASK; bpp = mipi_dsi_pixel_format_to_bpp( pixel_format_from_register_bits(fmt)); @@ -1049,24 +1051,24 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, /* In terms of pixels */ adjusted_mode->crtc_hdisplay = - intel_de_read(dev_priv, + intel_de_read(display, BXT_MIPI_TRANS_HACTIVE(port)); adjusted_mode->crtc_vdisplay = - intel_de_read(dev_priv, + intel_de_read(display, BXT_MIPI_TRANS_VACTIVE(port)); adjusted_mode->crtc_vtotal = - intel_de_read(dev_priv, + intel_de_read(display, BXT_MIPI_TRANS_VTOTAL(port)); hactive = adjusted_mode->crtc_hdisplay; - hfp = intel_de_read(dev_priv, MIPI_HFP_COUNT(port)); + hfp = intel_de_read(display, MIPI_HFP_COUNT(display, port)); /* * Meaningful for video mode non-burst sync pulse mode only, * can be zero for non-burst sync events and burst modes */ - hsync = intel_de_read(dev_priv, MIPI_HSYNC_PADDING_COUNT(port)); - hbp = intel_de_read(dev_priv, MIPI_HBP_COUNT(port)); + hsync = intel_de_read(display, MIPI_HSYNC_PADDING_COUNT(display, port)); + hbp = intel_de_read(display, MIPI_HBP_COUNT(display, port)); /* harizontal values are in terms of high speed byte clock */ hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count, @@ -1083,8 +1085,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, } /* vertical values are in terms of lines */ - vfp = intel_de_read(dev_priv, MIPI_VFP_COUNT(port)); - vsync = intel_de_read(dev_priv, MIPI_VSYNC_PADDING_COUNT(port)); + vfp = intel_de_read(display, MIPI_VFP_COUNT(display, port)); + vsync = intel_de_read(display, MIPI_VSYNC_PADDING_COUNT(display, port)); adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp; adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay; @@ -1210,12 +1212,12 @@ static u16 txclkesc(u32 divider, unsigned int us) } } -static void set_dsi_timings(struct drm_encoder *encoder, +static void set_dsi_timings(struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode) { - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder)); + struct intel_display *display = to_intel_display(encoder); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); unsigned int lane_count = intel_dsi->lane_count; @@ -1256,29 +1258,29 @@ static void set_dsi_timings(struct drm_encoder *encoder, * vactive, as they are calculated per channel basis, * whereas these values should be based on resolution. */ - intel_de_write(dev_priv, BXT_MIPI_TRANS_HACTIVE(port), + intel_de_write(display, BXT_MIPI_TRANS_HACTIVE(port), adjusted_mode->crtc_hdisplay); - intel_de_write(dev_priv, BXT_MIPI_TRANS_VACTIVE(port), + intel_de_write(display, BXT_MIPI_TRANS_VACTIVE(port), adjusted_mode->crtc_vdisplay); - intel_de_write(dev_priv, BXT_MIPI_TRANS_VTOTAL(port), + intel_de_write(display, BXT_MIPI_TRANS_VTOTAL(port), adjusted_mode->crtc_vtotal); } - intel_de_write(dev_priv, MIPI_HACTIVE_AREA_COUNT(port), + intel_de_write(display, MIPI_HACTIVE_AREA_COUNT(display, port), hactive); - intel_de_write(dev_priv, MIPI_HFP_COUNT(port), hfp); + intel_de_write(display, MIPI_HFP_COUNT(display, port), hfp); /* meaningful for video mode non-burst sync pulse mode only, * can be zero for non-burst sync events and burst modes */ - intel_de_write(dev_priv, MIPI_HSYNC_PADDING_COUNT(port), + intel_de_write(display, MIPI_HSYNC_PADDING_COUNT(display, port), hsync); - intel_de_write(dev_priv, MIPI_HBP_COUNT(port), hbp); + intel_de_write(display, MIPI_HBP_COUNT(display, port), hbp); /* vertical values are in terms of lines */ - intel_de_write(dev_priv, MIPI_VFP_COUNT(port), vfp); - intel_de_write(dev_priv, MIPI_VSYNC_PADDING_COUNT(port), + intel_de_write(display, MIPI_VFP_COUNT(display, port), vfp); + intel_de_write(display, MIPI_VSYNC_PADDING_COUNT(display, port), vsync); - intel_de_write(dev_priv, MIPI_VBP_COUNT(port), vbp); + intel_de_write(display, MIPI_VBP_COUNT(display, port), vbp); } } @@ -1299,21 +1301,20 @@ static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt) } } -static void intel_dsi_prepare(struct intel_encoder *intel_encoder, +static void intel_dsi_prepare(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(encoder); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder)); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; enum port port; unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); u32 val, tmp; u16 mode_hdisplay; - drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(crtc->pipe)); + drm_dbg_kms(display->drm, "pipe %c\n", pipe_name(crtc->pipe)); mode_hdisplay = adjusted_mode->crtc_hdisplay; @@ -1329,31 +1330,31 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, * escape clock divider, 20MHz, shared for A and C. * device ready must be off when doing this! txclkesc? */ - tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A)); + tmp = intel_de_read(display, MIPI_CTRL(display, PORT_A)); tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK; - intel_de_write(dev_priv, MIPI_CTRL(PORT_A), + intel_de_write(display, MIPI_CTRL(display, PORT_A), tmp | ESCAPE_CLOCK_DIVIDER_1); /* read request priority is per pipe */ - tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); + tmp = intel_de_read(display, MIPI_CTRL(display, port)); tmp &= ~READ_REQUEST_PRIORITY_MASK; - intel_de_write(dev_priv, MIPI_CTRL(port), + intel_de_write(display, MIPI_CTRL(display, port), tmp | READ_REQUEST_PRIORITY_HIGH); } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { enum pipe pipe = crtc->pipe; - intel_de_rmw(dev_priv, MIPI_CTRL(port), + intel_de_rmw(display, MIPI_CTRL(display, port), BXT_PIPE_SELECT_MASK, BXT_PIPE_SELECT(pipe)); } /* XXX: why here, why like this? handling in irq handler?! */ - intel_de_write(dev_priv, MIPI_INTR_STAT(port), 0xffffffff); - intel_de_write(dev_priv, MIPI_INTR_EN(port), 0xffffffff); + intel_de_write(display, MIPI_INTR_STAT(display, port), 0xffffffff); + intel_de_write(display, MIPI_INTR_EN(display, port), 0xffffffff); - intel_de_write(dev_priv, MIPI_DPHY_PARAM(port), + intel_de_write(display, MIPI_DPHY_PARAM(display, port), intel_dsi->dphy_reg); - intel_de_write(dev_priv, MIPI_DPI_RESOLUTION(port), + intel_de_write(display, MIPI_DPI_RESOLUTION(display, port), adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT | mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT); } @@ -1381,7 +1382,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, } for_each_dsi_port(port, intel_dsi->ports) { - intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val); + intel_de_write(display, MIPI_DSI_FUNC_PRG(display, port), val); /* timeouts for recovery. one frame IIUC. if counter expires, * EOT and stop state. */ @@ -1402,23 +1403,23 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, if (is_vid_mode(intel_dsi) && intel_dsi->video_mode == BURST_MODE) { - intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port), + intel_de_write(display, MIPI_HS_TX_TIMEOUT(display, port), txbyteclkhs(adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1); } else { - intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port), + intel_de_write(display, MIPI_HS_TX_TIMEOUT(display, port), txbyteclkhs(adjusted_mode->crtc_vtotal * adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1); } - intel_de_write(dev_priv, MIPI_LP_RX_TIMEOUT(port), + intel_de_write(display, MIPI_LP_RX_TIMEOUT(display, port), intel_dsi->lp_rx_timeout); - intel_de_write(dev_priv, MIPI_TURN_AROUND_TIMEOUT(port), + intel_de_write(display, MIPI_TURN_AROUND_TIMEOUT(display, port), intel_dsi->turn_arnd_val); - intel_de_write(dev_priv, MIPI_DEVICE_RESET_TIMER(port), + intel_de_write(display, MIPI_DEVICE_RESET_TIMER(display, port), intel_dsi->rst_timer_val); /* dphy stuff */ /* in terms of low power clock */ - intel_de_write(dev_priv, MIPI_INIT_COUNT(port), + intel_de_write(display, MIPI_INIT_COUNT(display, port), txclkesc(intel_dsi->escape_clk_div, 100)); if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && @@ -1429,16 +1430,16 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, * getting used. So write the other port * if not in dual link mode. */ - intel_de_write(dev_priv, - MIPI_INIT_COUNT(port == PORT_A ? PORT_C : PORT_A), + intel_de_write(display, + MIPI_INIT_COUNT(display, port == PORT_A ? PORT_C : PORT_A), intel_dsi->init_count); } /* recovery disables */ - intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), tmp); + intel_de_write(display, MIPI_EOT_DISABLE(display, port), tmp); /* in terms of low power clock */ - intel_de_write(dev_priv, MIPI_INIT_COUNT(port), + intel_de_write(display, MIPI_INIT_COUNT(display, port), intel_dsi->init_count); /* in terms of txbyteclkhs. actual high to low switch + @@ -1446,7 +1447,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, * * XXX: write MIPI_STOP_STATE_STALL? */ - intel_de_write(dev_priv, MIPI_HIGH_LOW_SWITCH_COUNT(port), + intel_de_write(display, MIPI_HIGH_LOW_SWITCH_COUNT(display, port), intel_dsi->hs_to_lp_count); /* XXX: low power clock equivalence in terms of byte clock. @@ -1455,14 +1456,14 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, * txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL * ) / 105.??? */ - intel_de_write(dev_priv, MIPI_LP_BYTECLK(port), + intel_de_write(display, MIPI_LP_BYTECLK(display, port), intel_dsi->lp_byte_clk); if (IS_GEMINILAKE(dev_priv)) { - intel_de_write(dev_priv, MIPI_TLPX_TIME_COUNT(port), + intel_de_write(display, MIPI_TLPX_TIME_COUNT(display, port), intel_dsi->lp_byte_clk); /* Shadow of DPHY reg */ - intel_de_write(dev_priv, MIPI_CLK_LANE_TIMING(port), + intel_de_write(display, MIPI_CLK_LANE_TIMING(display, port), intel_dsi->dphy_reg); } @@ -1471,10 +1472,10 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, * this register in terms of byte clocks. based on dsi transfer * rate and the number of lanes configured the time taken to * transmit 16 long packets in a dsi stream varies. */ - intel_de_write(dev_priv, MIPI_DBI_BW_CTRL(port), + intel_de_write(display, MIPI_DBI_BW_CTRL(display, port), intel_dsi->bw_timer); - intel_de_write(dev_priv, MIPI_CLK_LANE_SWITCH_TIME_CNT(port), + intel_de_write(display, MIPI_CLK_LANE_SWITCH_TIME_CNT(display, port), intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT); if (is_vid_mode(intel_dsi)) { @@ -1502,13 +1503,14 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, break; } - intel_de_write(dev_priv, MIPI_VIDEO_MODE_FORMAT(port), fmt); + intel_de_write(display, MIPI_VIDEO_MODE_FORMAT(display, port), fmt); } } } static void intel_dsi_unprepare(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; @@ -1518,17 +1520,17 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder) for_each_dsi_port(port, intel_dsi->ports) { /* Panel commands can be sent when clock is in LP11 */ - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x0); + intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x0); if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) bxt_dsi_reset_clocks(encoder, port); else vlv_dsi_reset_clocks(encoder, port); - intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP); + intel_de_write(display, MIPI_EOT_DISABLE(display, port), CLOCKSTOP); - intel_de_rmw(dev_priv, MIPI_DSI_FUNC_PRG(port), VID_MODE_FORMAT_MASK, 0); + intel_de_rmw(display, MIPI_DSI_FUNC_PRG(display, port), VID_MODE_FORMAT_MASK, 0); - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1); + intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x1); } } @@ -1592,8 +1594,7 @@ static void vlv_dsi_add_properties(struct intel_connector *connector) static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); struct intel_connector *connector = intel_dsi->attached_connector; struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; u32 tlpx_ns, extra_byte_count, tlpx_ui; @@ -1879,10 +1880,8 @@ static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = { void vlv_dsi_init(struct drm_i915_private *dev_priv) { struct intel_dsi *intel_dsi; - struct intel_encoder *intel_encoder; - struct drm_encoder *encoder; - struct intel_connector *intel_connector; - struct drm_connector *connector; + struct intel_encoder *encoder; + struct intel_connector *connector; struct drm_display_mode *current_mode; const struct dmi_system_id *dmi_id; enum port port; @@ -1903,64 +1902,61 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) if (!intel_dsi) return; - intel_connector = intel_connector_alloc(); - if (!intel_connector) { + connector = intel_connector_alloc(); + if (!connector) { kfree(intel_dsi); return; } - intel_encoder = &intel_dsi->base; - encoder = &intel_encoder->base; - intel_dsi->attached_connector = intel_connector; - - connector = &intel_connector->base; + encoder = &intel_dsi->base; + intel_dsi->attached_connector = connector; - drm_encoder_init(&dev_priv->drm, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI, - "DSI %c", port_name(port)); + drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_dsi_funcs, + DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port)); - intel_encoder->compute_config = intel_dsi_compute_config; - intel_encoder->pre_enable = intel_dsi_pre_enable; + encoder->compute_config = intel_dsi_compute_config; + encoder->pre_enable = intel_dsi_pre_enable; if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - intel_encoder->enable = bxt_dsi_enable; - intel_encoder->disable = intel_dsi_disable; - intel_encoder->post_disable = intel_dsi_post_disable; - intel_encoder->get_hw_state = intel_dsi_get_hw_state; - intel_encoder->get_config = intel_dsi_get_config; - intel_encoder->update_pipe = intel_backlight_update; - intel_encoder->shutdown = intel_dsi_shutdown; + encoder->enable = bxt_dsi_enable; + encoder->disable = intel_dsi_disable; + encoder->post_disable = intel_dsi_post_disable; + encoder->get_hw_state = intel_dsi_get_hw_state; + encoder->get_config = intel_dsi_get_config; + encoder->update_pipe = intel_backlight_update; + encoder->shutdown = intel_dsi_shutdown; - intel_connector->get_hw_state = intel_connector_get_hw_state; + connector->get_hw_state = intel_connector_get_hw_state; - intel_encoder->port = port; - intel_encoder->type = INTEL_OUTPUT_DSI; - intel_encoder->power_domain = POWER_DOMAIN_PORT_DSI; - intel_encoder->cloneable = 0; + encoder->port = port; + encoder->type = INTEL_OUTPUT_DSI; + encoder->power_domain = POWER_DOMAIN_PORT_DSI; + encoder->cloneable = 0; /* * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI * port C. BXT isn't limited like this. */ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - intel_encoder->pipe_mask = ~0; + encoder->pipe_mask = ~0; else if (port == PORT_A) - intel_encoder->pipe_mask = BIT(PIPE_A); + encoder->pipe_mask = BIT(PIPE_A); else - intel_encoder->pipe_mask = BIT(PIPE_B); + encoder->pipe_mask = BIT(PIPE_B); intel_dsi->panel_power_off_time = ktime_get_boottime(); - intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, NULL); + intel_bios_init_panel_late(dev_priv, &connector->panel, NULL, NULL); - if (intel_connector->panel.vbt.dsi.config->dual_link) + if (connector->panel.vbt.dsi.config->dual_link) intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); else intel_dsi->ports = BIT(port); - if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) - intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports; + if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) + connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports; - if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) - intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports; + if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) + connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports; /* Create a DSI host (and a device) for each port. */ for_each_dsi_port(port, intel_dsi->ports) { @@ -1980,7 +1976,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) } /* Use clock read-back from current hw-state for fastboot */ - current_mode = intel_encoder_current_mode(intel_encoder); + current_mode = intel_encoder_current_mode(encoder); if (current_mode) { drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n", intel_dsi->pclk, current_mode->clock); @@ -1996,22 +1992,22 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) vlv_dphy_param_init(intel_dsi); intel_dsi_vbt_gpio_init(intel_dsi, - intel_dsi_get_hw_state(intel_encoder, &pipe)); + intel_dsi_get_hw_state(encoder, &pipe)); - drm_connector_init(&dev_priv->drm, connector, &intel_dsi_connector_funcs, + drm_connector_init(&dev_priv->drm, &connector->base, &intel_dsi_connector_funcs, DRM_MODE_CONNECTOR_DSI); - drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs); + drm_connector_helper_add(&connector->base, &intel_dsi_connector_helper_funcs); - connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/ + connector->base.display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/ - intel_connector_attach_encoder(intel_connector, intel_encoder); + intel_connector_attach_encoder(connector, encoder); mutex_lock(&dev_priv->drm.mode_config.mutex); - intel_panel_add_vbt_lfp_fixed_mode(intel_connector); + intel_panel_add_vbt_lfp_fixed_mode(connector); mutex_unlock(&dev_priv->drm.mode_config.mutex); - if (!intel_panel_preferred_fixed_mode(intel_connector)) { + if (!intel_panel_preferred_fixed_mode(connector)) { drm_dbg_kms(&dev_priv->drm, "no fixed mode\n"); goto err_cleanup_connector; } @@ -2024,18 +2020,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) quirk_func(intel_dsi); } - intel_panel_init(intel_connector, NULL); + intel_panel_init(connector, NULL); - intel_backlight_setup(intel_connector, INVALID_PIPE); + intel_backlight_setup(connector, INVALID_PIPE); - vlv_dsi_add_properties(intel_connector); + vlv_dsi_add_properties(connector); return; err_cleanup_connector: - drm_connector_cleanup(&intel_connector->base); + drm_connector_cleanup(&connector->base); err: - drm_encoder_cleanup(&intel_encoder->base); + drm_encoder_cleanup(&encoder->base); kfree(intel_dsi); - kfree(intel_connector); + kfree(connector); } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index ae0a0b11ba..70c5a13a3c 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -365,13 +365,13 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { - u32 temp; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + u32 temp; - temp = intel_de_read(dev_priv, MIPI_CTRL(port)); + temp = intel_de_read(display, MIPI_CTRL(display, port)); temp &= ~ESCAPE_CLOCK_DIVIDER_MASK; - intel_de_write(dev_priv, MIPI_CTRL(port), + intel_de_write(display, MIPI_CTRL(display, port), temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT); } @@ -570,24 +570,24 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder, void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { + struct intel_display *display = to_intel_display(encoder); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 tmp; - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); /* Clear old configurations */ if (IS_BROXTON(dev_priv)) { - tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL); + tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL); tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port)); tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port)); - intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp); + intel_de_write(display, BXT_MIPI_CLOCK_CTL, tmp); } else { - intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0); + intel_de_rmw(display, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0); - intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0); + intel_de_rmw(display, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0); } - intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP); + intel_de_write(display, MIPI_EOT_DISABLE(display, port), CLOCKSTOP); } static void assert_dsi_pll(struct drm_i915_private *i915, bool state) diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h index abbe427e46..c1126d170e 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h +++ b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h @@ -11,26 +11,23 @@ #define VLV_MIPI_BASE VLV_DISPLAY_BASE #define BXT_MIPI_BASE 0x60000 -#define _MIPI_MMIO_BASE(__i915) ((__i915)->display.dsi.mmio_base) +#define _MIPI_MMIO_BASE(display) ((display)->dsi.mmio_base) #define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ -#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) +#define _MMIO_MIPI(base, port, a, c) _MMIO((base) + _MIPI_PORT(port, a, c)) /* BXT MIPI mode configure */ -#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8 -#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8 -#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(tc, \ - _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE) +#define _BXT_MIPIA_TRANS_HACTIVE 0xb0f8 +#define _BXT_MIPIC_TRANS_HACTIVE 0xb8f8 +#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE) -#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC -#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC -#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(tc, \ - _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE) +#define _BXT_MIPIA_TRANS_VACTIVE 0xb0fc +#define _BXT_MIPIC_TRANS_VACTIVE 0xb8fc +#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE) -#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100 -#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900 -#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(tc, \ - _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL) +#define _BXT_MIPIA_TRANS_VTOTAL 0xb100 +#define _BXT_MIPIC_TRANS_VTOTAL 0xb900 +#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL) #define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020) #define STAP_SELECT (1 << 0) @@ -38,14 +35,14 @@ #define BXT_P_DSI_REGULATOR_TX_CTRL _MMIO(0x160054) #define HS_IO_CTRL_SELECT (1 << 0) -#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) -#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) -#define MIPI_PORT_CTRL(port) _MMIO_MIPI(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL) +#define _MIPIA_PORT_CTRL 0x61190 +#define _MIPIC_PORT_CTRL 0x61700 +#define VLV_MIPI_PORT_CTRL(port) _MMIO_MIPI(VLV_MIPI_BASE, port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL) /* BXT port control */ -#define _BXT_MIPIA_PORT_CTRL 0x6B0C0 -#define _BXT_MIPIC_PORT_CTRL 0x6B8C0 -#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL) +#define _BXT_MIPIA_PORT_CTRL 0xb0c0 +#define _BXT_MIPIC_PORT_CTRL 0xb8c0 +#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(BXT_MIPI_BASE, tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL) #define DPI_ENABLE (1 << 31) /* A + C */ #define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 @@ -87,20 +84,17 @@ #define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0) #define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0) -#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) -#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) -#define MIPI_TEARING_CTRL(port) _MMIO_MIPI(port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL) +#define _MIPIA_TEARING_CTRL 0x61194 +#define _MIPIC_TEARING_CTRL 0x61704 +#define VLV_MIPI_TEARING_CTRL(port) _MMIO_MIPI(VLV_MIPI_BASE, port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL) #define TEARING_EFFECT_DELAY_SHIFT 0 #define TEARING_EFFECT_DELAY_MASK (0xffff << 0) -/* XXX: all bits reserved */ -#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0) - /* MIPI DSI Controller and D-PHY registers */ -#define _MIPIA_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb000) -#define _MIPIC_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb800) -#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY) +#define _MIPIA_DEVICE_READY 0xb000 +#define _MIPIC_DEVICE_READY 0xb800 +#define MIPI_DEVICE_READY(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY) #define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ #define ULPS_STATE_MASK (3 << 1) #define ULPS_STATE_ENTER (2 << 1) @@ -108,12 +102,12 @@ #define ULPS_STATE_NORMAL_OPERATION (0 << 1) #define DEVICE_READY (1 << 0) -#define _MIPIA_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb004) -#define _MIPIC_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb804) -#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT) -#define _MIPIA_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb008) -#define _MIPIC_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb808) -#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN) +#define _MIPIA_INTR_STAT 0xb004 +#define _MIPIC_INTR_STAT 0xb804 +#define MIPI_INTR_STAT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT) +#define _MIPIA_INTR_EN 0xb008 +#define _MIPIC_INTR_EN 0xb808 +#define MIPI_INTR_EN(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_EN, _MIPIC_INTR_EN) #define TEARING_EFFECT (1 << 31) #define SPL_PKT_SENT_INTERRUPT (1 << 30) #define GEN_READ_DATA_AVAIL (1 << 29) @@ -147,9 +141,9 @@ #define RXSOT_SYNC_ERROR (1 << 1) #define RXSOT_ERROR (1 << 0) -#define _MIPIA_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb00c) -#define _MIPIC_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb80c) -#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG) +#define _MIPIA_DSI_FUNC_PRG 0xb00c +#define _MIPIC_DSI_FUNC_PRG 0xb80c +#define MIPI_DSI_FUNC_PRG(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG) #define CMD_MODE_DATA_WIDTH_MASK (7 << 13) #define CMD_MODE_NOT_SUPPORTED (0 << 13) #define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) @@ -170,77 +164,77 @@ #define DATA_LANES_PRG_REG_SHIFT 0 #define DATA_LANES_PRG_REG_MASK (7 << 0) -#define _MIPIA_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb010) -#define _MIPIC_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb810) -#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT) +#define _MIPIA_HS_TX_TIMEOUT 0xb010 +#define _MIPIC_HS_TX_TIMEOUT 0xb810 +#define MIPI_HS_TX_TIMEOUT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT) #define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff -#define _MIPIA_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb014) -#define _MIPIC_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb814) -#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT) +#define _MIPIA_LP_RX_TIMEOUT 0xb014 +#define _MIPIC_LP_RX_TIMEOUT 0xb814 +#define MIPI_LP_RX_TIMEOUT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT) #define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff -#define _MIPIA_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb018) -#define _MIPIC_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb818) -#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT) +#define _MIPIA_TURN_AROUND_TIMEOUT 0xb018 +#define _MIPIC_TURN_AROUND_TIMEOUT 0xb818 +#define MIPI_TURN_AROUND_TIMEOUT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT) #define TURN_AROUND_TIMEOUT_MASK 0x3f -#define _MIPIA_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb01c) -#define _MIPIC_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb81c) -#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER) +#define _MIPIA_DEVICE_RESET_TIMER 0xb01c +#define _MIPIC_DEVICE_RESET_TIMER 0xb81c +#define MIPI_DEVICE_RESET_TIMER(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER) #define DEVICE_RESET_TIMER_MASK 0xffff -#define _MIPIA_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb020) -#define _MIPIC_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb820) -#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION) +#define _MIPIA_DPI_RESOLUTION 0xb020 +#define _MIPIC_DPI_RESOLUTION 0xb820 +#define MIPI_DPI_RESOLUTION(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION) #define VERTICAL_ADDRESS_SHIFT 16 #define VERTICAL_ADDRESS_MASK (0xffff << 16) #define HORIZONTAL_ADDRESS_SHIFT 0 #define HORIZONTAL_ADDRESS_MASK 0xffff -#define _MIPIA_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb024) -#define _MIPIC_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb824) -#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE) +#define _MIPIA_DBI_FIFO_THROTTLE 0xb024 +#define _MIPIC_DBI_FIFO_THROTTLE 0xb824 +#define MIPI_DBI_FIFO_THROTTLE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE) #define DBI_FIFO_EMPTY_HALF (0 << 0) #define DBI_FIFO_EMPTY_QUARTER (1 << 0) #define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) /* regs below are bits 15:0 */ -#define _MIPIA_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb028) -#define _MIPIC_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb828) -#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT) +#define _MIPIA_HSYNC_PADDING_COUNT 0xb028 +#define _MIPIC_HSYNC_PADDING_COUNT 0xb828 +#define MIPI_HSYNC_PADDING_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT) -#define _MIPIA_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb02c) -#define _MIPIC_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb82c) -#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT) +#define _MIPIA_HBP_COUNT 0xb02c +#define _MIPIC_HBP_COUNT 0xb82c +#define MIPI_HBP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT) -#define _MIPIA_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb030) -#define _MIPIC_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb830) -#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT) +#define _MIPIA_HFP_COUNT 0xb030 +#define _MIPIC_HFP_COUNT 0xb830 +#define MIPI_HFP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT) -#define _MIPIA_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb034) -#define _MIPIC_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb834) -#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT) +#define _MIPIA_HACTIVE_AREA_COUNT 0xb034 +#define _MIPIC_HACTIVE_AREA_COUNT 0xb834 +#define MIPI_HACTIVE_AREA_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT) -#define _MIPIA_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb038) -#define _MIPIC_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb838) -#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT) +#define _MIPIA_VSYNC_PADDING_COUNT 0xb038 +#define _MIPIC_VSYNC_PADDING_COUNT 0xb838 +#define MIPI_VSYNC_PADDING_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT) -#define _MIPIA_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb03c) -#define _MIPIC_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb83c) -#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT) +#define _MIPIA_VBP_COUNT 0xb03c +#define _MIPIC_VBP_COUNT 0xb83c +#define MIPI_VBP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT) -#define _MIPIA_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb040) -#define _MIPIC_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb840) -#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT) +#define _MIPIA_VFP_COUNT 0xb040 +#define _MIPIC_VFP_COUNT 0xb840 +#define MIPI_VFP_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT) -#define _MIPIA_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb044) -#define _MIPIC_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb844) -#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT) +#define _MIPIA_HIGH_LOW_SWITCH_COUNT 0xb044 +#define _MIPIC_HIGH_LOW_SWITCH_COUNT 0xb844 +#define MIPI_HIGH_LOW_SWITCH_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT) -#define _MIPIA_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb048) -#define _MIPIC_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb848) -#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL) +#define _MIPIA_DPI_CONTROL 0xb048 +#define _MIPIC_DPI_CONTROL 0xb848 +#define MIPI_DPI_CONTROL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL) #define DPI_LP_MODE (1 << 6) #define BACKLIGHT_OFF (1 << 5) #define BACKLIGHT_ON (1 << 4) @@ -249,28 +243,27 @@ #define TURN_ON (1 << 1) #define SHUTDOWN (1 << 0) -#define _MIPIA_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb04c) -#define _MIPIC_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb84c) -#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA) +#define _MIPIA_DPI_DATA 0xb04c +#define _MIPIC_DPI_DATA 0xb84c +#define MIPI_DPI_DATA(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA) #define COMMAND_BYTE_SHIFT 0 #define COMMAND_BYTE_MASK (0x3f << 0) -#define _MIPIA_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb050) -#define _MIPIC_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb850) -#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT) +#define _MIPIA_INIT_COUNT 0xb050 +#define _MIPIC_INIT_COUNT 0xb850 +#define MIPI_INIT_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT) #define MASTER_INIT_TIMER_SHIFT 0 #define MASTER_INIT_TIMER_MASK (0xffff << 0) -#define _MIPIA_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb054) -#define _MIPIC_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb854) -#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \ - _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE) +#define _MIPIA_MAX_RETURN_PKT_SIZE 0xb054 +#define _MIPIC_MAX_RETURN_PKT_SIZE 0xb854 +#define MIPI_MAX_RETURN_PKT_SIZE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE) #define MAX_RETURN_PKT_SIZE_SHIFT 0 #define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) -#define _MIPIA_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb058) -#define _MIPIC_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb858) -#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT) +#define _MIPIA_VIDEO_MODE_FORMAT 0xb058 +#define _MIPIC_VIDEO_MODE_FORMAT 0xb858 +#define MIPI_VIDEO_MODE_FORMAT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT) #define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) #define DISABLE_VIDEO_BTA (1 << 3) #define IP_TG_CONFIG (1 << 2) @@ -278,9 +271,9 @@ #define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0) #define VIDEO_MODE_BURST (3 << 0) -#define _MIPIA_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb05c) -#define _MIPIC_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb85c) -#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE) +#define _MIPIA_EOT_DISABLE 0xb05c +#define _MIPIC_EOT_DISABLE 0xb85c +#define MIPI_EOT_DISABLE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE) #define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9) #define BXT_DPHY_DEFEATURE_EN (1 << 8) #define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) @@ -292,36 +285,36 @@ #define CLOCKSTOP (1 << 1) #define EOT_DISABLE (1 << 0) -#define _MIPIA_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb060) -#define _MIPIC_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb860) -#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK) +#define _MIPIA_LP_BYTECLK 0xb060 +#define _MIPIC_LP_BYTECLK 0xb860 +#define MIPI_LP_BYTECLK(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK) #define LP_BYTECLK_SHIFT 0 #define LP_BYTECLK_MASK (0xffff << 0) -#define _MIPIA_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb0a4) -#define _MIPIC_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb8a4) -#define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT) +#define _MIPIA_TLPX_TIME_COUNT 0xb0a4 +#define _MIPIC_TLPX_TIME_COUNT 0xb8a4 +#define MIPI_TLPX_TIME_COUNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT) -#define _MIPIA_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb098) -#define _MIPIC_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb898) -#define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING) +#define _MIPIA_CLK_LANE_TIMING 0xb098 +#define _MIPIC_CLK_LANE_TIMING 0xb898 +#define MIPI_CLK_LANE_TIMING(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING) /* bits 31:0 */ -#define _MIPIA_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb064) -#define _MIPIC_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb864) -#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA) +#define _MIPIA_LP_GEN_DATA 0xb064 +#define _MIPIC_LP_GEN_DATA 0xb864 +#define MIPI_LP_GEN_DATA(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA) /* bits 31:0 */ -#define _MIPIA_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb068) -#define _MIPIC_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb868) -#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA) - -#define _MIPIA_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb06c) -#define _MIPIC_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb86c) -#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL) -#define _MIPIA_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb070) -#define _MIPIC_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb870) -#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL) +#define _MIPIA_HS_GEN_DATA 0xb068 +#define _MIPIC_HS_GEN_DATA 0xb868 +#define MIPI_HS_GEN_DATA(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA) + +#define _MIPIA_LP_GEN_CTRL 0xb06c +#define _MIPIC_LP_GEN_CTRL 0xb86c +#define MIPI_LP_GEN_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL) +#define _MIPIA_HS_GEN_CTRL 0xb070 +#define _MIPIC_HS_GEN_CTRL 0xb870 +#define MIPI_HS_GEN_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL) #define LONG_PACKET_WORD_COUNT_SHIFT 8 #define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) #define SHORT_PACKET_PARAM_SHIFT 8 @@ -332,9 +325,9 @@ #define DATA_TYPE_MASK (0x3f << 0) /* data type values, see include/video/mipi_display.h */ -#define _MIPIA_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb074) -#define _MIPIC_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb874) -#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT) +#define _MIPIA_GEN_FIFO_STAT 0xb074 +#define _MIPIC_GEN_FIFO_STAT 0xb874 +#define MIPI_GEN_FIFO_STAT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT) #define DPI_FIFO_EMPTY (1 << 28) #define DBI_FIFO_EMPTY (1 << 27) #define LP_CTRL_FIFO_EMPTY (1 << 26) @@ -350,16 +343,16 @@ #define HS_DATA_FIFO_HALF_EMPTY (1 << 1) #define HS_DATA_FIFO_FULL (1 << 0) -#define _MIPIA_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb078) -#define _MIPIC_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb878) -#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE) +#define _MIPIA_HS_LS_DBI_ENABLE 0xb078 +#define _MIPIC_HS_LS_DBI_ENABLE 0xb878 +#define MIPI_HS_LP_DBI_ENABLE(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE) #define DBI_HS_LP_MODE_MASK (1 << 0) #define DBI_LP_MODE (1 << 0) #define DBI_HS_MODE (0 << 0) -#define _MIPIA_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb080) -#define _MIPIC_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb880) -#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM) +#define _MIPIA_DPHY_PARAM 0xb080 +#define _MIPIC_DPHY_PARAM 0xb880 +#define MIPI_DPHY_PARAM(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM) #define EXIT_ZERO_COUNT_SHIFT 24 #define EXIT_ZERO_COUNT_MASK (0x3f << 24) #define TRAIL_COUNT_SHIFT 16 @@ -369,34 +362,34 @@ #define PREPARE_COUNT_SHIFT 0 #define PREPARE_COUNT_MASK (0x3f << 0) -#define _MIPIA_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb084) -#define _MIPIC_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb884) -#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL) +#define _MIPIA_DBI_BW_CTRL 0xb084 +#define _MIPIC_DBI_BW_CTRL 0xb884 +#define MIPI_DBI_BW_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL) -#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb088) -#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb888) -#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT) +#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT 0xb088 +#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT 0xb888 +#define MIPI_CLK_LANE_SWITCH_TIME_CNT(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT) #define LP_HS_SSW_CNT_SHIFT 16 #define LP_HS_SSW_CNT_MASK (0xffff << 16) #define HS_LP_PWR_SW_CNT_SHIFT 0 #define HS_LP_PWR_SW_CNT_MASK (0xffff << 0) -#define _MIPIA_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb08c) -#define _MIPIC_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb88c) -#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL) +#define _MIPIA_STOP_STATE_STALL 0xb08c +#define _MIPIC_STOP_STATE_STALL 0xb88c +#define MIPI_STOP_STATE_STALL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL) #define STOP_STATE_STALL_COUNTER_SHIFT 0 #define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) -#define _MIPIA_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb090) -#define _MIPIC_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb890) -#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1) -#define _MIPIA_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb094) -#define _MIPIC_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb894) -#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1) +#define _MIPIA_INTR_STAT_REG_1 0xb090 +#define _MIPIC_INTR_STAT_REG_1 0xb890 +#define MIPI_INTR_STAT_REG_1(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1) +#define _MIPIA_INTR_EN_REG_1 0xb094 +#define _MIPIC_INTR_EN_REG_1 0xb894 +#define MIPI_INTR_EN_REG_1(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1) #define RX_CONTENTION_DETECTED (1 << 0) /* XXX: only pipe A ?!? */ -#define MIPIA_DBI_TYPEC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb100) +#define MIPIA_DBI_TYPEC_CTRL(display) (_MIPI_MMIO_BASE(display) + 0xb100) #define DBI_TYPEC_ENABLE (1 << 31) #define DBI_TYPEC_WIP (1 << 30) #define DBI_TYPEC_OPTION_SHIFT 28 @@ -409,9 +402,9 @@ /* MIPI adapter registers */ -#define _MIPIA_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb104) -#define _MIPIC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb904) -#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL) +#define _MIPIA_CTRL 0xb104 +#define _MIPIC_CTRL 0xb904 +#define MIPI_CTRL(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_CTRL, _MIPIC_CTRL) #define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ #define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) #define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) @@ -442,41 +435,41 @@ #define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */ #define GLK_MIPIIO_ENABLE (1 << 0) -#define _MIPIA_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb108) -#define _MIPIC_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb908) -#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS) +#define _MIPIA_DATA_ADDRESS 0xb108 +#define _MIPIC_DATA_ADDRESS 0xb908 +#define MIPI_DATA_ADDRESS(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS) #define DATA_MEM_ADDRESS_SHIFT 5 #define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) #define DATA_VALID (1 << 0) -#define _MIPIA_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb10c) -#define _MIPIC_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb90c) -#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH) +#define _MIPIA_DATA_LENGTH 0xb10c +#define _MIPIC_DATA_LENGTH 0xb90c +#define MIPI_DATA_LENGTH(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH) #define DATA_LENGTH_SHIFT 0 #define DATA_LENGTH_MASK (0xfffff << 0) -#define _MIPIA_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb110) -#define _MIPIC_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb910) -#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS) +#define _MIPIA_COMMAND_ADDRESS 0xb110 +#define _MIPIC_COMMAND_ADDRESS 0xb910 +#define MIPI_COMMAND_ADDRESS(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS) #define COMMAND_MEM_ADDRESS_SHIFT 5 #define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) #define AUTO_PWG_ENABLE (1 << 2) #define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1) #define COMMAND_VALID (1 << 0) -#define _MIPIA_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb114) -#define _MIPIC_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb914) -#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH) +#define _MIPIA_COMMAND_LENGTH 0xb114 +#define _MIPIC_COMMAND_LENGTH 0xb914 +#define MIPI_COMMAND_LENGTH(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH) #define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ #define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) -#define _MIPIA_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb118) -#define _MIPIC_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb918) -#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */ +#define _MIPIA_READ_DATA_RETURN0 0xb118 +#define _MIPIC_READ_DATA_RETURN0 0xb918 +#define MIPI_READ_DATA_RETURN(display, port, n) _MMIO_MIPI(_MIPI_MMIO_BASE(display) + 4 * (n), port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) /* n: 0...7 */ -#define _MIPIA_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb138) -#define _MIPIC_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb938) -#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID) +#define _MIPIA_READ_DATA_VALID 0xb138 +#define _MIPIC_READ_DATA_VALID 0xb938 +#define MIPI_READ_DATA_VALID(display, port) _MMIO_MIPI(_MIPI_MMIO_BASE(display), port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID) #define READ_DATA_VALID(n) (1 << (n)) #endif /* __VLV_DSI_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index dcbfe32fd3..81f65cab13 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -879,6 +879,7 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, struct i915_gem_proto_context *pc, struct drm_i915_gem_context_param *args) { + struct drm_i915_private *i915 = fpriv->i915; int ret = 0; switch (args->param) { @@ -904,6 +905,13 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, pc->user_flags &= ~BIT(UCONTEXT_BANNABLE); break; + case I915_CONTEXT_PARAM_LOW_LATENCY: + if (intel_uc_uses_guc_submission(&to_gt(i915)->uc)) + pc->user_flags |= BIT(UCONTEXT_LOW_LATENCY); + else + ret = -EINVAL; + break; + case I915_CONTEXT_PARAM_RECOVERABLE: if (args->size) ret = -EINVAL; @@ -992,6 +1000,9 @@ static int intel_context_set_gem(struct intel_context *ce, if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS)) ret = intel_context_reconfigure_sseu(ce, sseu); + if (test_bit(UCONTEXT_LOW_LATENCY, &ctx->user_flags)) + __set_bit(CONTEXT_LOW_LATENCY, &ce->flags); + return ret; } @@ -1630,6 +1641,9 @@ i915_gem_create_context(struct drm_i915_private *i915, if (vm) ctx->vm = vm; + /* Assign early so intel_context_set_gem can access these flags */ + ctx->user_flags = pc->user_flags; + mutex_init(&ctx->engines_mutex); if (pc->num_user_engines >= 0) { i915_gem_context_set_user_engines(ctx); @@ -1652,8 +1666,6 @@ i915_gem_create_context(struct drm_i915_private *i915, * is no remap info, it will be a NOP. */ ctx->remap_slice = ALL_L3_SLICES(i915); - ctx->user_flags = pc->user_flags; - for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index 03bc7f9d19..b6d97da63d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -338,6 +338,7 @@ struct i915_gem_context { #define UCONTEXT_BANNABLE 2 #define UCONTEXT_RECOVERABLE 3 #define UCONTEXT_PERSISTENCE 4 +#define UCONTEXT_LOW_LATENCY 5 /** * @flags: small set of booleans diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index d3a771afb0..090724fa76 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2457,7 +2457,7 @@ static int eb_submit(struct i915_execbuffer *eb) * The engine index is returned. */ static unsigned int -gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, +gen8_dispatch_bsd_engine(struct drm_i915_private *i915, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; @@ -2465,7 +2465,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, /* Check whether the file_priv has already selected one ring. */ if ((int)file_priv->bsd_engine < 0) file_priv->bsd_engine = - get_random_u32_below(dev_priv->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]); + get_random_u32_below(i915->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]); return file_priv->bsd_engine; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 0c5cdab278..1495b60744 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -386,7 +386,7 @@ struct drm_i915_gem_object { * and kernel mode driver for caching policy control after GEN12. * In the meantime platform specific tables are created to translate * i915_cache_level into pat index, for more details check the macros - * defined i915/i915_pci.c, e.g. PVC_CACHELEVEL. + * defined i915/i915_pci.c, e.g. TGL_CACHELEVEL. * For backward compatibility, this field contains values exactly match * the entries of enum i915_cache_level for pre-GEN12 platforms (See * LEGACY_CACHELEVEL), so that the PTE encode functions for these diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 0ba955611d..8780aa2431 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -5,6 +5,7 @@ */ #include +#include #include "gt/intel_gt.h" #include "gt/intel_tlb.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 38b72d8656..c5e1c718a6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -654,7 +654,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915, /* Allocate a new GEM object and fill it with the supplied data */ struct drm_i915_gem_object * -i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, +i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, const void *data, resource_size_t size) { struct drm_i915_gem_object *obj; @@ -663,8 +663,8 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, resource_size_t offset; int err; - GEM_WARN_ON(IS_DGFX(dev_priv)); - obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); + GEM_WARN_ON(IS_DGFX(i915)); + obj = i915_gem_object_create_shmem(i915, round_up(size, PAGE_SIZE)); if (IS_ERR(obj)) return obj; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h index 258381d1c0..dfe0db8bb1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h @@ -14,14 +14,14 @@ struct drm_i915_gem_object; #define i915_stolen_fb drm_mm_node -int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, +int i915_gem_stolen_insert_node(struct drm_i915_private *i915, struct drm_mm_node *node, u64 size, unsigned alignment); -int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, +int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, struct drm_mm_node *node, u64 size, unsigned alignment, u64 start, u64 end); -void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, +void i915_gem_stolen_remove_node(struct drm_i915_private *i915, struct drm_mm_node *node); struct intel_memory_region * i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type, @@ -31,7 +31,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, u16 instance); struct drm_i915_gem_object * -i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, +i915_gem_object_create_stolen(struct drm_i915_private *i915, resource_size_t size); bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index a049ca0b79..d9eb84c1d2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -343,12 +343,12 @@ int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_set_tiling *args = data; struct drm_i915_gem_object *obj; int err; - if (!to_gt(dev_priv)->ggtt->num_fences) + if (!to_gt(i915)->ggtt->num_fences) return -EOPNOTSUPP; obj = i915_gem_object_lookup(file, args->handle); @@ -374,9 +374,9 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) - args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x; + args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_x; else - args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y; + args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, @@ -427,11 +427,11 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_get_tiling *args = data; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_object *obj; int err = -ENOENT; - if (!to_gt(dev_priv)->ggtt->num_fences) + if (!to_gt(i915)->ggtt->num_fences) return -EOPNOTSUPP; rcu_read_lock(); @@ -447,10 +447,10 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, switch (args->tiling_mode) { case I915_TILING_X: - args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x; + args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_x; break; case I915_TILING_Y: - args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y; + args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_y; break; default: case I915_TILING_NONE: @@ -459,7 +459,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, } /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ - if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) + if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN; else args->phys_swizzle_mode = args->swizzle_mode; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 61abfb5057..09b68713ab 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -463,13 +463,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev, struct drm_file *file) { static struct lock_class_key __maybe_unused lock_class; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_userptr *args = data; struct drm_i915_gem_object __maybe_unused *obj; int __maybe_unused ret; u32 __maybe_unused handle; - if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) { + if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) { /* We cannot support coherent userptr objects on hw without * LLC and broken snooping. */ @@ -501,7 +501,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, * On almost all of the older hw, we cannot tell the GPU that * a page is readonly. */ - if (!to_gt(dev_priv)->vm->has_read_only) + if (!to_gt(i915)->vm->has_read_only) return -ENODEV; } diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 3ff3d8889c..84d41e6ccf 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -713,7 +713,7 @@ static int igt_ppgtt_huge_fill(void *arg) { struct drm_i915_private *i915 = arg; unsigned int supported = RUNTIME_INFO(i915)->page_sizes; - bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50); + bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55); struct i915_address_space *vm; struct i915_gem_context *ctx; unsigned long max_pages; @@ -857,7 +857,7 @@ out: static int igt_ppgtt_64K(void *arg) { struct drm_i915_private *i915 = arg; - bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50); + bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55); struct drm_i915_gem_object *obj; struct i915_address_space *vm; struct i915_gem_context *ctx; @@ -1969,19 +1969,19 @@ int i915_gem_huge_page_mock_selftests(void) SUBTEST(igt_mock_memory_region_huge_pages), SUBTEST(igt_mock_ppgtt_misaligned_dma), }; - struct drm_i915_private *dev_priv; + struct drm_i915_private *i915; struct i915_ppgtt *ppgtt; int err; - dev_priv = mock_gem_device(); - if (!dev_priv) + i915 = mock_gem_device(); + if (!i915) return -ENOMEM; /* Pretend to be a device which supports the 48b PPGTT */ - RUNTIME_INFO(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; - RUNTIME_INFO(dev_priv)->ppgtt_size = 48; + RUNTIME_INFO(i915)->ppgtt_type = INTEL_PPGTT_FULL; + RUNTIME_INFO(i915)->ppgtt_size = 48; - ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0); + ppgtt = i915_ppgtt_create(to_gt(i915), 0); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; @@ -2005,7 +2005,7 @@ int i915_gem_huge_page_mock_selftests(void) out_put: i915_vm_put(&ppgtt->vm); out_unlock: - mock_destroy_device(dev_priv); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index 10a7847f1b..bac15196b4 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -117,7 +117,7 @@ static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915) if (gen < 12) return true; - if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) return false; return HAS_DISPLAY(i915); @@ -166,7 +166,7 @@ static int prepare_blit(const struct tiled_blits *t, src_pitch = t->width; /* in dwords */ if (src->tiling == CLIENT_TILING_Y) { src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR); - if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55)) src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4; } else if (src->tiling == CLIENT_TILING_X) { src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X); @@ -177,7 +177,7 @@ static int prepare_blit(const struct tiled_blits *t, dst_pitch = t->width; /* in dwords */ if (dst->tiling == CLIENT_TILING_Y) { dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR); - if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55)) dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4; } else if (dst->tiling == CLIENT_TILING_X) { dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X); @@ -365,7 +365,7 @@ static u64 tiled_offset(const struct intel_gt *gt, v += x; swizzle = gt->ggtt->bit_6_swizzle_x; - } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) { + } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) { /* Y-major tiling layout is Tile4 for Xe_HP and beyond */ v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index d684a70f2c..3527b8f446 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -7,6 +7,7 @@ #include "i915_drv.h" #include "i915_selftest.h" #include "gem/i915_gem_context.h" +#include "gt/intel_gt.h" #include "mock_context.h" #include "mock_dmabuf.h" @@ -155,6 +156,7 @@ static int verify_access(struct drm_i915_private *i915, struct file *file; u32 *vaddr; int err = 0, i; + unsigned int mode; file = mock_file(i915); if (IS_ERR(file)) @@ -194,7 +196,8 @@ static int verify_access(struct drm_i915_private *i915, if (err) goto out_file; - vaddr = i915_gem_object_pin_map_unlocked(native_obj, I915_MAP_WB); + mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, false); + vaddr = i915_gem_object_pin_map_unlocked(native_obj, mode); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto out_file; diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c index b2a5882b8f..0756570187 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c @@ -4,6 +4,7 @@ * Copyright © 2016 Intel Corporation */ +#include #include "mock_dmabuf.h" static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment, diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index e1bf13e3d3..e9f65f27b5 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -189,9 +189,6 @@ static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine) { i915_reg_t reg = gen12_get_aux_inv_reg(engine); - if (IS_PONTEVECCHIO(engine->i915)) - return false; - /* * So far platforms supported by i915 having flat ccs do not require * AUX invalidation. Check also whether the engine requires it. @@ -743,21 +740,25 @@ static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs) } /* Wa_14014475959:dg2 */ -#define CCS_SEMAPHORE_PPHWSP_OFFSET 0x540 -static u32 ccs_semaphore_offset(struct i915_request *rq) +/* Wa_16019325821 */ +/* Wa_14019159160 */ +#define HOLD_SWITCHOUT_SEMAPHORE_PPHWSP_OFFSET 0x540 +static u32 hold_switchout_semaphore_offset(struct i915_request *rq) { return i915_ggtt_offset(rq->context->state) + - (LRC_PPHWSP_PN * PAGE_SIZE) + CCS_SEMAPHORE_PPHWSP_OFFSET; + (LRC_PPHWSP_PN * PAGE_SIZE) + HOLD_SWITCHOUT_SEMAPHORE_PPHWSP_OFFSET; } /* Wa_14014475959:dg2 */ -static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs) +/* Wa_16019325821 */ +/* Wa_14019159160 */ +static u32 *hold_switchout_emit_wa_busywait(struct i915_request *rq, u32 *cs) { int i; *cs++ = MI_ATOMIC_INLINE | MI_ATOMIC_GLOBAL_GTT | MI_ATOMIC_CS_STALL | MI_ATOMIC_MOVE; - *cs++ = ccs_semaphore_offset(rq); + *cs++ = hold_switchout_semaphore_offset(rq); *cs++ = 0; *cs++ = 1; @@ -773,7 +774,7 @@ static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs) MI_SEMAPHORE_POLL | MI_SEMAPHORE_SAD_EQ_SDD; *cs++ = 0; - *cs++ = ccs_semaphore_offset(rq); + *cs++ = hold_switchout_semaphore_offset(rq); *cs++ = 0; return cs; @@ -790,8 +791,10 @@ gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs) cs = gen12_emit_preempt_busywait(rq, cs); /* Wa_14014475959:dg2 */ - if (intel_engine_uses_wa_hold_ccs_switchout(rq->engine)) - cs = ccs_emit_wa_busywait(rq, cs); + /* Wa_16019325821 */ + /* Wa_14019159160 */ + if (intel_engine_uses_wa_hold_switchout(rq->engine)) + cs = hold_switchout_emit_wa_busywait(rq, cs); rq->tail = intel_ring_offset(rq, cs); assert_ring_tail_valid(rq->ring, rq->tail); @@ -827,7 +830,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) cs = gen12_emit_pipe_control(cs, 0, PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0); - if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) + if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) /* Wa_1409600907 */ flags |= PIPE_CONTROL_DEPTH_STALL; diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 81bf221637..398d60a664 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -500,11 +500,11 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, } static void -xehpsdv_ppgtt_insert_huge(struct i915_address_space *vm, - struct i915_vma_resource *vma_res, - struct sgt_dma *iter, - unsigned int pat_index, - u32 flags) +xehp_ppgtt_insert_huge(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + struct sgt_dma *iter, + unsigned int pat_index, + u32 flags) { const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags); unsigned int rem = sg_dma_len(iter->sg); @@ -741,8 +741,8 @@ static void gen8_ppgtt_insert(struct i915_address_space *vm, struct sgt_dma iter = sgt_dma(vma_res); if (vma_res->bi.page_sizes.sg > I915_GTT_PAGE_SIZE) { - if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 50)) - xehpsdv_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags); + if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 55)) + xehp_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags); else gen8_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags); } else { @@ -781,11 +781,11 @@ static void gen8_ppgtt_insert_entry(struct i915_address_space *vm, drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr)); } -static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - unsigned int pat_index, - u32 flags) +static void xehp_ppgtt_insert_entry_lm(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + unsigned int pat_index, + u32 flags) { u64 idx = offset >> GEN8_PTE_SHIFT; struct i915_page_directory * const pdp = @@ -810,15 +810,15 @@ static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm, vaddr[gen8_pd_index(idx, 0) / 16] = vm->pte_encode(addr, pat_index, flags); } -static void xehpsdv_ppgtt_insert_entry(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - unsigned int pat_index, - u32 flags) +static void xehp_ppgtt_insert_entry(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + unsigned int pat_index, + u32 flags) { if (flags & PTE_LM) - return __xehpsdv_ppgtt_insert_entry_lm(vm, addr, offset, - pat_index, flags); + return xehp_ppgtt_insert_entry_lm(vm, addr, offset, + pat_index, flags); return gen8_ppgtt_insert_entry(vm, addr, offset, pat_index, flags); } @@ -1045,7 +1045,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt, ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND; ppgtt->vm.insert_entries = gen8_ppgtt_insert; if (HAS_64K_PAGES(gt->i915)) - ppgtt->vm.insert_page = xehpsdv_ppgtt_insert_entry; + ppgtt->vm.insert_page = xehp_ppgtt_insert_entry; else ppgtt->vm.insert_page = gen8_ppgtt_insert_entry; ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc; diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 7eccbd70d8..ed95a7b57c 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -130,6 +130,7 @@ struct intel_context { #define CONTEXT_PERMA_PIN 11 #define CONTEXT_IS_PARKING 12 #define CONTEXT_EXITING 13 +#define CONTEXT_LOW_LATENCY 14 struct { u64 timeout_us; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index bc6209df0f..3b740ca250 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -497,9 +497,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, engine->logical_mask = BIT(logical_instance); __sprint_engine_name(engine); - if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) && - __ffs(CCS_MASK(engine->gt)) == engine->instance) || - engine->class == RENDER_CLASS) + if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) && + __ffs(CCS_MASK(engine->gt) | RCS_MASK(engine->gt)) == engine->instance) engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE; /* features common between engines sharing EUs */ @@ -589,7 +588,7 @@ u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value) * NB: The GuC API only supports 32bit values. However, the limit is further * reduced due to internal calculations which would otherwise overflow. */ - if (intel_guc_submission_is_wanted(&engine->gt->uc.guc)) + if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt))) value = min_t(u64, value, guc_policy_max_preempt_timeout_ms()); value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); @@ -610,7 +609,7 @@ u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value) * NB: The GuC API only supports 32bit values. However, the limit is further * reduced due to internal calculations which would otherwise overflow. */ - if (intel_guc_submission_is_wanted(&engine->gt->uc.guc)) + if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt))) value = min_t(u64, value, guc_policy_max_exec_quantum_ms()); value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); @@ -679,7 +678,7 @@ void intel_engines_release(struct intel_gt *gt) */ GEM_BUG_ON(intel_gt_pm_is_awake(gt)); if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) - __intel_gt_reset(gt, ALL_ENGINES); + intel_gt_reset_all_engines(gt); /* Decouple the backend; but keep the layout for late GPU resets */ for_each_engine(engine, gt, id) { @@ -765,14 +764,14 @@ static void engine_mask_apply_media_fuses(struct intel_gt *gt) * and bits have disable semantices. */ media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); - if (MEDIA_VER_FULL(i915) < IP_VER(12, 50)) + if (MEDIA_VER_FULL(i915) < IP_VER(12, 55)) media_fuse = ~media_fuse; vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> GEN11_GT_VEBOX_DISABLE_SHIFT; - if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) { + if (MEDIA_VER_FULL(i915) >= IP_VER(12, 55)) { fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1); gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1); } else { @@ -839,38 +838,6 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt) } } -static void engine_mask_apply_copy_fuses(struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - struct intel_gt_info *info = >->info; - unsigned long meml3_mask; - unsigned long quad; - - if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) && - GRAPHICS_VER_FULL(i915) < IP_VER(12, 70))) - return; - - meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3); - meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask); - - /* - * Link Copy engines may be fused off according to meml3_mask. Each - * bit is a quad that houses 2 Link Copy and two Sub Copy engines. - */ - for_each_clear_bit(quad, &meml3_mask, GEN12_MAX_MSLICES) { - unsigned int instance = quad * 2 + 1; - intel_engine_mask_t mask = GENMASK(_BCS(instance + 1), - _BCS(instance)); - - if (mask & info->engine_mask) { - gt_dbg(gt, "bcs%u fused off\n", instance); - gt_dbg(gt, "bcs%u fused off\n", instance + 1); - - info->engine_mask &= ~mask; - } - } -} - /* * Determine which engines are fused off in our particular hardware. * Note that we have a catch-22 situation where we need to be able to access @@ -889,7 +856,6 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) engine_mask_apply_media_fuses(gt); engine_mask_apply_compute_fuses(gt); - engine_mask_apply_copy_fuses(gt); /* * The only use of the GSC CS is to load and communicate with the GSC @@ -1216,7 +1182,6 @@ static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine) if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 74) || GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) || GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) || - GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) || GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) { regs = xehp_regs; num = ARRAY_SIZE(xehp_regs); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 960e6be204..ba55c05906 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -586,7 +586,7 @@ struct intel_engine_cs { #define I915_ENGINE_HAS_RCS_REG_STATE BIT(9) #define I915_ENGINE_HAS_EU_PRIORITY BIT(10) #define I915_ENGINE_FIRST_RENDER_COMPUTE BIT(11) -#define I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT BIT(12) +#define I915_ENGINE_USES_WA_HOLD_SWITCHOUT BIT(12) unsigned int flags; /* @@ -696,10 +696,12 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine) } /* Wa_14014475959:dg2 */ +/* Wa_16019325821 */ +/* Wa_14019159160 */ static inline bool -intel_engine_uses_wa_hold_ccs_switchout(struct intel_engine_cs *engine) +intel_engine_uses_wa_hold_switchout(struct intel_engine_cs *engine) { - return engine->flags & I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT; + return engine->flags & I915_ENGINE_USES_WA_HOLD_SWITCHOUT; } #endif /* __INTEL_ENGINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index b061a0a0d6..72090f52fb 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -493,7 +493,7 @@ __execlists_schedule_in(struct i915_request *rq) /* Use a fixed tag for OA and friends */ GEM_BUG_ON(ce->tag <= BITS_PER_LONG); ce->lrc.ccid = ce->tag; - } else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) { + } else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) { /* We don't need a strict matching tag, just different values */ unsigned int tag = ffs(READ_ONCE(engine->context_tag)); @@ -613,7 +613,7 @@ static void __execlists_schedule_out(struct i915_request * const rq, intel_engine_add_retire(engine, ce->timeline); ccid = ce->lrc.ccid; - if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) { + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) { ccid >>= XEHP_SW_CTX_ID_SHIFT - 32; ccid &= XEHP_MAX_CONTEXT_HW_ID; } else { @@ -1907,7 +1907,7 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive) ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n", head, upper_32_bits(csb), lower_32_bits(csb)); - if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) promote = xehp_csb_parse(csb); else if (GRAPHICS_VER(engine->i915) >= 12) promote = gen12_csb_parse(csb); @@ -2898,7 +2898,7 @@ static void enable_error_interrupt(struct intel_engine_cs *engine) drm_err(&engine->i915->drm, "engine '%s' resumed still in error: %08x\n", engine->name, status); - __intel_gt_reset(engine->gt, engine->mask); + intel_gt_reset_engine(engine); } /* @@ -3315,11 +3315,7 @@ static void remove_from_engine(struct i915_request *rq) static bool can_preempt(struct intel_engine_cs *engine) { - if (GRAPHICS_VER(engine->i915) > 8) - return true; - - /* GPGPU on bdw requires extra w/a; not implemented */ - return engine->class != RENDER_CLASS; + return GRAPHICS_VER(engine->i915) > 8; } static void kick_execlists(const struct i915_request *rq, int prio) @@ -3482,7 +3478,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) } } - if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) { + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) { if (intel_engine_has_preemption(engine)) engine->emit_bb_start = xehp_emit_bb_start; else @@ -3585,7 +3581,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0); if (GRAPHICS_VER(engine->i915) >= 11 && - GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 50)) { + GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 55)) { execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32); execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32); } diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index ec1cbe229f..0d0a0dc9f6 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -231,11 +231,8 @@ static void guc_ggtt_ct_invalidate(struct intel_gt *gt) struct intel_uncore *uncore = gt->uncore; intel_wakeref_t wakeref; - with_intel_runtime_pm_if_active(uncore->rpm, wakeref) { - struct intel_guc *guc = >->uc.guc; - - intel_guc_invalidate_tlb_guc(guc); - } + with_intel_runtime_pm_if_active(uncore->rpm, wakeref) + intel_guc_invalidate_tlb_guc(gt_to_guc(gt)); } static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) @@ -246,7 +243,7 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) gen8_ggtt_invalidate(ggtt); list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) { - if (intel_guc_tlb_invalidation_is_available(>->uc.guc)) + if (intel_guc_tlb_invalidation_is_available(gt_to_guc(gt))) guc_ggtt_ct_invalidate(gt); else if (GRAPHICS_VER(i915) >= 12) intel_uncore_write_fw(gt->uncore, diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c index 6d440de8ba..1e925c75fb 100644 --- a/drivers/gpu/drm/i915/gt/intel_gsc.c +++ b/drivers/gpu/drm/i915/gt/intel_gsc.c @@ -103,19 +103,6 @@ static const struct gsc_def gsc_def_dg1[] = { } }; -static const struct gsc_def gsc_def_xehpsdv[] = { - { - /* HECI1 not enabled on the device. */ - }, - { - .name = "mei-gscfi", - .bar = DG1_GSC_HECI2_BASE, - .bar_size = GSC_BAR_LENGTH, - .use_polling = true, - .slow_firmware = true, - } -}; - static const struct gsc_def gsc_def_dg2[] = { { .name = "mei-gsc", @@ -188,8 +175,6 @@ static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc, if (IS_DG1(i915)) { def = &gsc_def_dg1[intf_id]; - } else if (IS_XEHPSDV(i915)) { - def = &gsc_def_xehpsdv[intf_id]; } else if (IS_DG2(i915)) { def = &gsc_def_dg2[intf_id]; } else { diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 6a2c2718bc..626b166e67 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -278,7 +278,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt, intel_uncore_posting_read(uncore, XELPMP_RING_FAULT_REG); - } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { + } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) { intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG, RING_FAULT_VALID, 0); intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG); @@ -403,7 +403,7 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt) struct drm_i915_private *i915 = gt->i915; /* From GEN8 onwards we only have one 'All Engine Fault Register' */ - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) xehp_check_faults(gt); else if (GRAPHICS_VER(i915) >= 8) gen8_check_faults(gt); @@ -832,7 +832,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt) /* Scrub all HW state upon release */ with_intel_runtime_pm(gt->uncore->rpm, wakeref) - __intel_gt_reset(gt, ALL_ENGINES); + intel_gt_reset_all_engines(gt); } void intel_gt_driver_release(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 003eb93b82..b5e114d284 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -124,6 +124,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) return guc_to_gt(guc)->i915; } +static inline struct intel_guc *gt_to_guc(struct intel_gt *gt) +{ + return >->uc.guc; +} + void intel_gt_common_init_early(struct intel_gt *gt); int intel_root_gt_init_early(struct drm_i915_private *i915); int intel_gt_assign_ggtt(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index 77fb572234..ad4c51f18d 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -68,9 +68,9 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, struct intel_gt *media_gt = gt->i915->media_gt; if (instance == OTHER_GUC_INSTANCE) - return guc_irq_handler(>->uc.guc, iir); + return guc_irq_handler(gt_to_guc(gt), iir); if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt) - return guc_irq_handler(&media_gt->uc.guc, iir); + return guc_irq_handler(gt_to_guc(media_gt), iir); if (instance == OTHER_GTPM_INSTANCE) return gen11_rps_irq_handler(>->rps, iir); @@ -442,7 +442,7 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl) iir = raw_reg_read(regs, GEN8_GT_IIR(2)); if (likely(iir)) { gen6_rps_irq_handler(>->rps, iir); - guc_irq_handler(>->uc.guc, iir >> 16); + guc_irq_handler(gt_to_guc(gt), iir >> 16); raw_reg_write(regs, GEN8_GT_IIR(2), iir); } } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c index e253750a51..b8912bd6c0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c @@ -57,51 +57,18 @@ static const struct intel_mmio_range icl_l3bank_steering_table[] = { * are of a "GAM" subclass that has special rules. Thus we use a separate * GAM table farther down for those. */ -static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = { +static const struct intel_mmio_range dg2_mslice_steering_table[] = { { 0x00DD00, 0x00DDFF }, { 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */ {}, }; -static const struct intel_mmio_range xehpsdv_gam_steering_table[] = { - { 0x004000, 0x004AFF }, - { 0x00C800, 0x00CFFF }, - {}, -}; - -static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = { - { 0x00B000, 0x00B0FF }, - { 0x00D800, 0x00D8FF }, - {}, -}; - static const struct intel_mmio_range dg2_lncf_steering_table[] = { { 0x00B000, 0x00B0FF }, { 0x00D880, 0x00D8FF }, {}, }; -/* - * We have several types of MCR registers on PVC where steering to (0,0) - * will always provide us with a non-terminated value. We'll stick them - * all in the same table for simplicity. - */ -static const struct intel_mmio_range pvc_instance0_steering_table[] = { - { 0x004000, 0x004AFF }, /* HALF-BSLICE */ - { 0x008800, 0x00887F }, /* CC */ - { 0x008A80, 0x008AFF }, /* TILEPSMI */ - { 0x00B000, 0x00B0FF }, /* HALF-BSLICE */ - { 0x00B100, 0x00B3FF }, /* L3BANK */ - { 0x00C800, 0x00CFFF }, /* HALF-BSLICE */ - { 0x00D800, 0x00D8FF }, /* HALF-BSLICE */ - { 0x00DD00, 0x00DDFF }, /* BSLICE */ - { 0x00E900, 0x00E9FF }, /* HALF-BSLICE */ - { 0x00EC00, 0x00EEFF }, /* HALF-BSLICE */ - { 0x00F000, 0x00FFFF }, /* HALF-BSLICE */ - { 0x024180, 0x0241FF }, /* HALF-BSLICE */ - {}, -}; - static const struct intel_mmio_range xelpg_instance0_steering_table[] = { { 0x000B00, 0x000BFF }, /* SQIDI */ { 0x001000, 0x001FFF }, /* SQIDI */ @@ -185,22 +152,16 @@ void intel_gt_mcr_init(struct intel_gt *gt) gt->steering_table[INSTANCE0] = xelpg_instance0_steering_table; gt->steering_table[L3BANK] = xelpg_l3bank_steering_table; gt->steering_table[DSS] = xelpg_dss_steering_table; - } else if (IS_PONTEVECCHIO(i915)) { - gt->steering_table[INSTANCE0] = pvc_instance0_steering_table; } else if (IS_DG2(i915)) { - gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table; + gt->steering_table[MSLICE] = dg2_mslice_steering_table; gt->steering_table[LNCF] = dg2_lncf_steering_table; /* * No need to hook up the GAM table since it has a dedicated * steering control register on DG2 and can use implicit * steering. */ - } else if (IS_XEHPSDV(i915)) { - gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table; - gt->steering_table[LNCF] = xehpsdv_lncf_steering_table; - gt->steering_table[GAM] = xehpsdv_gam_steering_table; } else if (GRAPHICS_VER(i915) >= 11 && - GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) { + GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) { gt->steering_table[L3BANK] = icl_l3bank_steering_table; gt->info.l3bank_mask = ~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) & @@ -821,8 +782,6 @@ void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt, for (int i = 0; i < NUM_STEERING_TYPES; i++) if (gt->steering_table[i]) report_steering_type(p, gt, i, dump_table); - } else if (IS_PONTEVECCHIO(gt->i915)) { - report_steering_type(p, gt, INSTANCE0, dump_table); } else if (HAS_MSLICE_STEERING(gt->i915)) { report_steering_type(p, gt, MSLICE, dump_table); report_steering_type(p, gt, LNCF, dump_table); @@ -842,10 +801,7 @@ void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt, void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss, unsigned int *group, unsigned int *instance) { - if (IS_PONTEVECCHIO(gt->i915)) { - *group = dss / GEN_DSS_PER_CSLICE; - *instance = dss % GEN_DSS_PER_CSLICE; - } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) { + if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) { *group = dss / GEN_DSS_PER_GSLICE; *instance = dss % GEN_DSS_PER_GSLICE; } else { diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h index 01ac565a56..a67a4c35a4 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h @@ -54,7 +54,7 @@ int intel_gt_mcr_wait_for_reg(struct intel_gt *gt, * the topology, so we lookup the DSS ID directly in "slice 0." */ #define _HAS_SS(ss_, gt_, group_, instance_) ( \ - GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 50) ? \ + GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 55) ? \ intel_sseu_has_subslice(&(gt_)->info.sseu, 0, ss_) : \ intel_sseu_has_subslice(&(gt_)->info.sseu, group_, instance_)) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 220ac4f92e..c08fdb65cc 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -159,7 +159,7 @@ static bool reset_engines(struct intel_gt *gt) if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) return false; - return __intel_gt_reset(gt, ALL_ENGINES) == 0; + return intel_gt_reset_all_engines(gt) == 0; } static void gt_sanitize(struct intel_gt *gt, bool force) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index 7114c116e9..4fcba42cfe 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -392,10 +392,6 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p) drm_puts(p, "no P-state info available\n"); } - drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk); - drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq); - drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq); - intel_runtime_pm_put(uncore->rpm, wakeref); } @@ -538,7 +534,7 @@ static bool rps_eval(void *data) { struct intel_gt *gt = data; - if (intel_guc_slpc_is_used(>->uc.guc)) + if (intel_guc_slpc_is_used(gt_to_guc(gt))) return false; else return HAS_RPS(gt->i915); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index 743fe35667..e42b3a5d4e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -718,44 +718,11 @@ #define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434) #define VFUNIT_CLKGATE_DIS REG_BIT(20) -#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */ #define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */ #define GAMEDIA_CLKGATE_DIS REG_BIT(11) #define HSUNIT_CLKGATE_DIS REG_BIT(8) #define VSUNIT_CLKGATE_DIS REG_BIT(3) -#define UNSLCGCTL9440 _MMIO(0x9440) -#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28) -#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27) -#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26) -#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24) -#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23) -#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22) -#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21) -#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17) -#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16) -#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15) -#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14) -#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6) - -#define UNSLCGCTL9444 _MMIO(0x9444) -#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30) -#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29) -#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28) -#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27) -#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26) -#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25) -#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24) -#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23) -#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22) -#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21) -#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20) -#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19) -#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18) -#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17) -#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16) -#define LTCDD_CLKGATE_DIS REG_BIT(10) - #define GEN11_SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4) #define XEHP_SLICE_UNIT_LEVEL_CLKGATE MCR_REG(0x94d4) #define SARBUNIT_CLKGATE_DIS (1 << 5) @@ -765,9 +732,6 @@ #define L3_CLKGATE_DIS REG_BIT(16) #define L3_CR2X_CLKGATE_DIS REG_BIT(17) -#define SCCGCTL94DC MCR_REG(0x94dc) -#define CG3DDISURB REG_BIT(14) - #define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4) #define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19) #define PSDUNIT_CLKGATE_DIS REG_BIT(5) @@ -989,10 +953,6 @@ #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C #define GEN7_L3AGDIS (1 << 19) -#define XEHPC_LNCFMISCCFGREG0 MCR_REG(0xb01c) -#define XEHPC_HOSTCACHEEN REG_BIT(1) -#define XEHPC_OVRLSCCC REG_BIT(0) - #define GEN7_L3CNTLREG2 _MMIO(0xb020) /* MOCS (Memory Object Control State) registers */ @@ -1046,20 +1006,9 @@ #define XEHP_L3SQCREG5 MCR_REG(0xb158) #define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0) -#define MLTICTXCTL MCR_REG(0xb170) -#define TDONRENDER REG_BIT(2) - #define XEHP_L3SCQREG7 MCR_REG(0xb188) #define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3) -#define XEHPC_L3SCRUB MCR_REG(0xb18c) -#define SCRUB_CL_DWNGRADE_SHARED REG_BIT(12) -#define SCRUB_RATE_PER_BANK_MASK REG_GENMASK(2, 0) -#define SCRUB_RATE_4B_PER_CLK REG_FIELD_PREP(SCRUB_RATE_PER_BANK_MASK, 0x6) - -#define L3SQCREG1_CCS0 MCR_REG(0xb200) -#define FLUSHALLNONCOH REG_BIT(5) - #define GEN11_GLBLINVL _MMIO(0xb404) #define GEN11_BANK_HASH_ADDR_EXCL_MASK (0x7f << 5) #define GEN11_BANK_HASH_ADDR_EXCL_BIT0 (1 << 5) @@ -1109,7 +1058,6 @@ #define XEHP_COMPCTX_TLB_INV_CR MCR_REG(0xcf04) #define XELPMP_GSC_TLB_INV_CR _MMIO(0xcf04) /* media GT only */ -#define XEHP_MERT_MOD_CTRL MCR_REG(0xcf28) #define RENDER_MOD_CTRL MCR_REG(0xcf2c) #define COMP_MOD_CTRL MCR_REG(0xcf30) #define XELPMP_GSC_MOD_CTRL _MMIO(0xcf30) /* media GT only */ @@ -1185,7 +1133,6 @@ #define EU_PERF_CNTL4 PERF_REG(0xe45c) #define GEN9_ROW_CHICKEN4 MCR_REG(0xe48c) -#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13) #define XEHP_DIS_BBL_SYSPIPE REG_BIT(11) #define GEN12_DISABLE_TDL_PUSH REG_BIT(9) #define GEN11_DIS_PICK_2ND_EU REG_BIT(7) @@ -1202,7 +1149,6 @@ #define FLOW_CONTROL_ENABLE REG_BIT(15) #define UGM_BACKUP_MODE REG_BIT(13) #define MDQ_ARBITRATION_MODE REG_BIT(12) -#define SYSTOLIC_DOP_CLOCK_GATING_DIS REG_BIT(10) #define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE REG_BIT(8) #define STALL_DOP_GATING_DISABLE REG_BIT(5) #define THROTTLE_12_5 REG_GENMASK(4, 2) @@ -1215,6 +1161,7 @@ #define GEN12_DISABLE_EARLY_READ REG_BIT(14) #define GEN12_ENABLE_LARGE_GRF_MODE REG_BIT(12) #define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8) +#define XELPG_DISABLE_TDL_SVHS_GATING REG_BIT(1) #define GEN12_DISABLE_DOP_GATING REG_BIT(0) #define RT_CTRL MCR_REG(0xe530) @@ -1685,11 +1632,6 @@ #define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000) -#define GT0_PACKAGE_ENERGY_STATUS _MMIO(0x250004) -#define GT0_PACKAGE_RAPL_LIMIT _MMIO(0x250008) -#define GT0_PACKAGE_POWER_SKU_UNIT _MMIO(0x250068) -#define GT0_PLATFORM_ENERGY_STATUS _MMIO(0x25006c) - /* * Standalone Media's non-engine GT registers are located at their regular GT * offsets plus 0x380000. This extra offset is stored inside the intel_uncore diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c index c0b2022239..d7784650e4 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c @@ -442,7 +442,7 @@ static ssize_t slpc_ignore_eff_freq_show(struct kobject *kobj, char *buff) { struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name); - struct intel_guc_slpc *slpc = >->uc.guc.slpc; + struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc; return sysfs_emit(buff, "%u\n", slpc->ignore_eff_freq); } @@ -452,7 +452,7 @@ static ssize_t slpc_ignore_eff_freq_store(struct kobject *kobj, const char *buff, size_t count) { struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name); - struct intel_guc_slpc *slpc = >->uc.guc.slpc; + struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc; int err; u32 val; @@ -573,7 +573,6 @@ static ssize_t media_freq_factor_show(struct kobject *kobj, char *buff) { struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name); - struct intel_guc_slpc *slpc = >->uc.guc.slpc; intel_wakeref_t wakeref; u32 mode; @@ -581,20 +580,12 @@ static ssize_t media_freq_factor_show(struct kobject *kobj, * Retrieve media_ratio_mode from GEN6_RPNSWREQ bit 13 set by * GuC. GEN6_RPNSWREQ:13 value 0 represents 1:2 and 1 represents 1:1 */ - if (IS_XEHPSDV(gt->i915) && - slpc->media_ratio_mode == SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL) { - /* - * For XEHPSDV dynamic mode GEN6_RPNSWREQ:13 does not contain - * the media_ratio_mode, just return the cached media ratio - */ - mode = slpc->media_ratio_mode; - } else { - with_intel_runtime_pm(gt->uncore->rpm, wakeref) - mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ); - mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ? - SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE : - SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO; - } + with_intel_runtime_pm(gt->uncore->rpm, wakeref) + mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ); + + mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ? + SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE : + SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO; return sysfs_emit(buff, "%u\n", media_ratio_mode_to_factor(mode)); } @@ -604,7 +595,7 @@ static ssize_t media_freq_factor_store(struct kobject *kobj, const char *buff, size_t count) { struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name); - struct intel_guc_slpc *slpc = >->uc.guc.slpc; + struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc; u32 factor, mode; int err; diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 7811a8c9da..30b128b1fd 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -680,7 +680,7 @@ void setup_private_pat(struct intel_gt *gt) if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) xelpg_setup_private_ppat(gt); - else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) + else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) xehp_setup_private_ppat(gt); else if (GRAPHICS_VER(i915) >= 12) tgl_setup_private_ppat(uncore); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 7c367ba8d9..b387146ede 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -546,47 +546,6 @@ static const u8 gen12_rcs_offsets[] = { END }; -static const u8 xehp_rcs_offsets[] = { - NOP(1), - LRI(13, POSTED), - REG16(0x244), - REG(0x034), - REG(0x030), - REG(0x038), - REG(0x03c), - REG(0x168), - REG(0x140), - REG(0x110), - REG(0x1c0), - REG(0x1c4), - REG(0x1c8), - REG(0x180), - REG16(0x2b4), - - NOP(5), - LRI(9, POSTED), - REG16(0x3a8), - REG16(0x28c), - REG16(0x288), - REG16(0x284), - REG16(0x280), - REG16(0x27c), - REG16(0x278), - REG16(0x274), - REG16(0x270), - - LRI(3, POSTED), - REG(0x1b0), - REG16(0x5a8), - REG16(0x5ac), - - NOP(6), - LRI(1, 0), - REG(0x0c8), - - END -}; - static const u8 dg2_rcs_offsets[] = { NOP(1), LRI(15, POSTED), @@ -695,8 +654,6 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine) return mtl_rcs_offsets; else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) return dg2_rcs_offsets; - else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) - return xehp_rcs_offsets; else if (GRAPHICS_VER(engine->i915) >= 12) return gen12_rcs_offsets; else if (GRAPHICS_VER(engine->i915) >= 11) @@ -719,7 +676,7 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine) static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) { - if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) return 0x70; else if (GRAPHICS_VER(engine->i915) >= 12) return 0x60; @@ -733,7 +690,7 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) static int lrc_ring_bb_offset(const struct intel_engine_cs *engine) { - if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) return 0x80; else if (GRAPHICS_VER(engine->i915) >= 12) return 0x70; @@ -748,7 +705,7 @@ static int lrc_ring_bb_offset(const struct intel_engine_cs *engine) static int lrc_ring_gpr0(const struct intel_engine_cs *engine) { - if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) return 0x84; else if (GRAPHICS_VER(engine->i915) >= 12) return 0x74; @@ -795,7 +752,7 @@ static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine) static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine) { - if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) /* * Note that the CSFE context has a dummy slot for CMD_BUF_CCTL * simply to match the RCS context image layout. diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c index 576e5ef028..6f7af40771 100644 --- a/drivers/gpu/drm/i915/gt/intel_migrate.c +++ b/drivers/gpu/drm/i915/gt/intel_migrate.c @@ -35,9 +35,9 @@ static bool engine_supports_migration(struct intel_engine_cs *engine) return true; } -static void xehpsdv_toggle_pdes(struct i915_address_space *vm, - struct i915_page_table *pt, - void *data) +static void xehp_toggle_pdes(struct i915_address_space *vm, + struct i915_page_table *pt, + void *data) { struct insert_pte_data *d = data; @@ -52,9 +52,9 @@ static void xehpsdv_toggle_pdes(struct i915_address_space *vm, d->offset += SZ_2M; } -static void xehpsdv_insert_pte(struct i915_address_space *vm, - struct i915_page_table *pt, - void *data) +static void xehp_insert_pte(struct i915_address_space *vm, + struct i915_page_table *pt, + void *data) { struct insert_pte_data *d = data; @@ -120,7 +120,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt) * 512 entry layout using 4K GTT pages. The other two windows just map * lmem pages and must use the new compact 32 entry layout using 64K GTT * pages, which ensures we can address any lmem object that the user - * throws at us. We then also use the xehpsdv_toggle_pdes as a way of + * throws at us. We then also use the xehp_toggle_pdes as a way of * just toggling the PDE bit(GEN12_PDE_64K) for us, to enable the * compact layout for each of these page-tables, that fall within the * [CHUNK_SIZE, 3 * CHUNK_SIZE) range. @@ -209,12 +209,12 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt) /* Now allow the GPU to rewrite the PTE via its own ppGTT */ if (HAS_64K_PAGES(gt->i915)) { vm->vm.foreach(&vm->vm, base, d.offset - base, - xehpsdv_insert_pte, &d); + xehp_insert_pte, &d); d.offset = base + CHUNK_SZ; vm->vm.foreach(&vm->vm, d.offset, 2 * CHUNK_SZ, - xehpsdv_toggle_pdes, &d); + xehp_toggle_pdes, &d); } else { vm->vm.foreach(&vm->vm, base, d.offset - base, insert_pte, &d); @@ -925,7 +925,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size, GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX); - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) ring_sz = XY_FAST_COLOR_BLT_DW; else if (ver >= 8) ring_sz = 8; @@ -936,7 +936,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size, if (IS_ERR(cs)) return PTR_ERR(cs); - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { + if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) { *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 | (XY_FAST_COLOR_BLT_DW - 2); *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) | diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 25c1023eb5..d791d63d49 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -53,7 +53,6 @@ struct drm_i915_mocs_table { /* Helper defines */ #define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ -#define PVC_NUM_MOCS_ENTRIES 3 #define MTL_NUM_MOCS_ENTRIES 16 /* (e)LLC caching options */ @@ -367,31 +366,6 @@ static const struct drm_i915_mocs_entry gen12_mocs_table[] = { L3_3_WB), }; -static const struct drm_i915_mocs_entry xehpsdv_mocs_table[] = { - /* wa_1608975824 */ - MOCS_ENTRY(0, 0, L3_3_WB | L3_LKUP(1)), - - /* UC - Coherent; GO:L3 */ - MOCS_ENTRY(1, 0, L3_1_UC | L3_LKUP(1)), - /* UC - Coherent; GO:Memory */ - MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)), - /* UC - Non-Coherent; GO:Memory */ - MOCS_ENTRY(3, 0, L3_1_UC | L3_GLBGO(1)), - /* UC - Non-Coherent; GO:L3 */ - MOCS_ENTRY(4, 0, L3_1_UC), - - /* WB */ - MOCS_ENTRY(5, 0, L3_3_WB | L3_LKUP(1)), - - /* HW Reserved - SW program but never use. */ - MOCS_ENTRY(48, 0, L3_3_WB | L3_LKUP(1)), - MOCS_ENTRY(49, 0, L3_1_UC | L3_LKUP(1)), - MOCS_ENTRY(60, 0, L3_1_UC), - MOCS_ENTRY(61, 0, L3_1_UC), - MOCS_ENTRY(62, 0, L3_1_UC), - MOCS_ENTRY(63, 0, L3_1_UC), -}; - static const struct drm_i915_mocs_entry dg2_mocs_table[] = { /* UC - Coherent; GO:L3 */ MOCS_ENTRY(0, 0, L3_1_UC | L3_LKUP(1)), @@ -404,17 +378,6 @@ static const struct drm_i915_mocs_entry dg2_mocs_table[] = { MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)), }; -static const struct drm_i915_mocs_entry pvc_mocs_table[] = { - /* Error */ - MOCS_ENTRY(0, 0, L3_3_WB), - - /* UC */ - MOCS_ENTRY(1, 0, L3_1_UC), - - /* WB */ - MOCS_ENTRY(2, 0, L3_3_WB), -}; - static const struct drm_i915_mocs_entry mtl_mocs_table[] = { /* Error - Reserved for Non-Use */ MOCS_ENTRY(0, @@ -501,25 +464,12 @@ static unsigned int get_mocs_settings(struct drm_i915_private *i915, table->n_entries = MTL_NUM_MOCS_ENTRIES; table->uc_index = 9; table->unused_entries_index = 1; - } else if (IS_PONTEVECCHIO(i915)) { - table->size = ARRAY_SIZE(pvc_mocs_table); - table->table = pvc_mocs_table; - table->n_entries = PVC_NUM_MOCS_ENTRIES; - table->uc_index = 1; - table->wb_index = 2; - table->unused_entries_index = 2; } else if (IS_DG2(i915)) { table->size = ARRAY_SIZE(dg2_mocs_table); table->table = dg2_mocs_table; table->uc_index = 1; table->n_entries = GEN9_NUM_MOCS_ENTRIES; table->unused_entries_index = 3; - } else if (IS_XEHPSDV(i915)) { - table->size = ARRAY_SIZE(xehpsdv_mocs_table); - table->table = xehpsdv_mocs_table; - table->uc_index = 2; - table->n_entries = GEN9_NUM_MOCS_ENTRIES; - table->unused_entries_index = 5; } else if (IS_DG1(i915)) { table->size = ARRAY_SIZE(dg1_mocs_table); table->table = dg1_mocs_table; @@ -670,7 +620,7 @@ static void init_l3cc_table(struct intel_gt *gt, intel_gt_mcr_lock(gt, &flags); for_each_l3cc(l3cc, table, i) - if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) intel_gt_mcr_multicast_write_fw(gt, XEHP_LNCFCMOCS(i), l3cc); else intel_uncore_write_fw(gt->uncore, GEN9_LNCFCMOCS(i), l3cc); diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 8f4b3c8af0..c864d101fa 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -109,7 +109,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) * thus allowing GuC to control RC6 entry/exit fully instead. * We will not set the HW ENABLE and EI bits */ - if (!intel_guc_rc_enable(>->uc.guc)) + if (!intel_guc_rc_enable(gt_to_guc(gt))) rc6->ctl_enable = GEN6_RC_CTL_RC6_ENABLE; else rc6->ctl_enable = @@ -569,7 +569,7 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6) struct intel_gt *gt = rc6_to_gt(rc6); /* Take control of RC6 back from GuC */ - intel_guc_rc_disable(>->uc.guc); + intel_guc_rc_disable(gt_to_guc(gt)); intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); if (GRAPHICS_VER(i915) >= 9) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index c8e9aa41fd..6161f7a3ff 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -764,7 +764,7 @@ wa_14015076503_end(struct intel_gt *gt, intel_engine_mask_t engine_mask) HECI_H_GS1_ER_PREP, 0); } -int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) +static int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) { const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1; reset_func reset; @@ -879,8 +879,17 @@ static intel_engine_mask_t reset_prepare(struct intel_gt *gt) intel_engine_mask_t awake = 0; enum intel_engine_id id; - /* For GuC mode, ensure submission is disabled before stopping ring */ - intel_uc_reset_prepare(>->uc); + /** + * For GuC mode with submission enabled, ensure submission + * is disabled before stopping ring. + * + * For GuC mode with submission disabled, ensure that GuC is not + * sanitized, do that after engine reset. reset_prepare() + * is followed by engine reset which in this mode requires GuC to + * process any CSB FIFO entries generated by the resets. + */ + if (intel_uc_uses_guc_submission(>->uc)) + intel_uc_reset_prepare(>->uc); for_each_engine(engine, gt, id) { if (intel_engine_pm_get_if_awake(engine)) @@ -978,7 +987,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) /* Even if the GPU reset fails, it should still stop the engines */ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) - __intel_gt_reset(gt, ALL_ENGINES); + intel_gt_reset_all_engines(gt); for_each_engine(engine, gt, id) engine->submit_request = nop_submit_request; @@ -1089,7 +1098,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) /* We must reset pending GPU events before restoring our submission */ ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) - ok = __intel_gt_reset(gt, ALL_ENGINES) == 0; + ok = intel_gt_reset_all_engines(gt) == 0; if (!ok) { /* * Warn CI about the unrecoverable wedged condition. @@ -1133,10 +1142,10 @@ static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) { int err, i; - err = __intel_gt_reset(gt, ALL_ENGINES); + err = intel_gt_reset_all_engines(gt); for (i = 0; err && i < RESET_MAX_RETRIES; i++) { msleep(10 * (i + 1)); - err = __intel_gt_reset(gt, ALL_ENGINES); + err = intel_gt_reset_all_engines(gt); } if (err) return err; @@ -1227,6 +1236,9 @@ void intel_gt_reset(struct intel_gt *gt, intel_overlay_reset(gt->i915); + /* sanitize uC after engine reset */ + if (!intel_uc_uses_guc_submission(>->uc)) + intel_uc_reset_prepare(>->uc); /* * Next we need to restore the context, but we don't use those * yet either... @@ -1270,7 +1282,30 @@ error: goto finish; } -static int intel_gt_reset_engine(struct intel_engine_cs *engine) +/** + * intel_gt_reset_all_engines() - Reset all engines in the given gt. + * @gt: the GT to reset all engines for. + * + * This function resets all engines within the given gt. + * + * Returns: + * Zero on success, negative error code on failure. + */ +int intel_gt_reset_all_engines(struct intel_gt *gt) +{ + return __intel_gt_reset(gt, ALL_ENGINES); +} + +/** + * intel_gt_reset_engine() - Reset a specific engine within a gt. + * @engine: engine to be reset. + * + * This function resets the specified engine within a gt. + * + * Returns: + * Zero on success, negative error code on failure. + */ +int intel_gt_reset_engine(struct intel_engine_cs *engine) { return __intel_gt_reset(engine->gt, engine->mask); } diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h index f615b30b81..c00de35307 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.h +++ b/drivers/gpu/drm/i915/gt/intel_reset.h @@ -54,7 +54,8 @@ int intel_gt_terminally_wedged(struct intel_gt *gt); void intel_gt_set_wedged_on_init(struct intel_gt *gt); void intel_gt_set_wedged_on_fini(struct intel_gt *gt); -int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask); +int intel_gt_reset_engine(struct intel_engine_cs *engine); +int intel_gt_reset_all_engines(struct intel_gt *gt); int intel_reset_guc(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 4feef874e6..c9cb2a3919 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -52,7 +52,7 @@ static struct intel_guc_slpc *rps_to_slpc(struct intel_rps *rps) { struct intel_gt *gt = rps_to_gt(rps); - return >->uc.guc.slpc; + return >_to_guc(gt)->slpc; } static bool rps_uses_slpc(struct intel_rps *rps) @@ -1013,6 +1013,10 @@ void intel_rps_boost(struct i915_request *rq) if (i915_request_signaled(rq) || i915_request_has_waitboost(rq)) return; + /* Waitboost is not needed for contexts marked with a Freq hint */ + if (test_bit(CONTEXT_LOW_LATENCY, &rq->context->flags)) + return; + /* Serializes with i915_request_retire() */ if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) { struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; @@ -1086,11 +1090,7 @@ static u32 intel_rps_read_state_cap(struct intel_rps *rps) struct drm_i915_private *i915 = rps_to_i915(rps); struct intel_uncore *uncore = rps_to_uncore(rps); - if (IS_PONTEVECCHIO(i915)) - return intel_uncore_read(uncore, PVC_RP_STATE_CAP); - else if (IS_XEHPSDV(i915)) - return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP); - else if (IS_GEN9_LP(i915)) + if (IS_GEN9_LP(i915)) return intel_uncore_read(uncore, BXT_RP_STATE_CAP); else return intel_uncore_read(uncore, GEN6_RP_STATE_CAP); diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index 6a3246240e..c8fadf58d8 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -214,13 +214,8 @@ static void xehp_sseu_info_init(struct intel_gt *gt) int num_compute_regs, num_geometry_regs; int eu; - if (IS_PONTEVECCHIO(gt->i915)) { - num_geometry_regs = 0; - num_compute_regs = 2; - } else { - num_geometry_regs = 1; - num_compute_regs = 1; - } + num_geometry_regs = 1; + num_compute_regs = 1; /* * The concept of slice has been removed in Xe_HP. To be compatible @@ -642,7 +637,7 @@ void intel_sseu_info_init(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) xehp_sseu_info_init(gt); else if (GRAPHICS_VER(i915) >= 12) gen12_sseu_info_init(gt); @@ -851,7 +846,7 @@ void intel_sseu_print_topology(struct drm_i915_private *i915, { if (sseu->max_slices == 0) drm_printf(p, "Unavailable\n"); - else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) + else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) sseu_print_xehp_topology(sseu, p); else sseu_print_hsw_topology(sseu, p); diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c index 4bb13d1890..756e9ebbc7 100644 --- a/drivers/gpu/drm/i915/gt/intel_tlb.c +++ b/drivers/gpu/drm/i915/gt/intel_tlb.c @@ -132,7 +132,7 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno) return; with_intel_gt_pm_if_awake(gt, wakeref) { - struct intel_guc *guc = >->uc.guc; + struct intel_guc *guc = gt_to_guc(gt); mutex_lock(>->tlb.invalidate_lock); if (tlb_seqno_passed(gt, seqno)) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 85c860ea9d..5a0f1b279a 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -17,6 +17,8 @@ #include "intel_ring.h" #include "intel_workarounds.h" +#include "display/intel_fbc_regs.h" + /** * DOC: Hardware workarounds * @@ -259,12 +261,6 @@ wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set) wa_write_clr_set(wal, reg, ~0, set); } -static void -wa_mcr_write(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set) -{ - wa_mcr_write_clr_set(wal, reg, ~0, set); -} - static void wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set) { @@ -920,12 +916,8 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74))) xelpg_ctx_workarounds_init(engine, wal); - else if (IS_PONTEVECCHIO(i915)) - ; /* noop; none at this time */ else if (IS_DG2(i915)) dg2_ctx_workarounds_init(engine, wal); - else if (IS_XEHPSDV(i915)) - ; /* noop; none at this time */ else if (IS_DG1(i915)) dg1_ctx_workarounds_init(engine, wal); else if (GRAPHICS_VER(i915) == 12) @@ -1352,9 +1344,6 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) gt->steering_table[MSLICE] = NULL; } - if (IS_XEHPSDV(gt->i915) && slice_mask & BIT(0)) - gt->steering_table[GAM] = NULL; - slice = __ffs(slice_mask); subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) % GEN_DSS_PER_GSLICE; @@ -1381,20 +1370,6 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) __set_mcr_steering(wal, GAM_MCR_SELECTOR, 1, 0); } -static void -pvc_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) -{ - unsigned int dss; - - /* - * Setup implicit steering for COMPUTE and DSS ranges to the first - * non-fused-off DSS. All other types of MCR registers will be - * explicitly steered. - */ - dss = intel_sseu_find_first_xehp_dss(>->info.sseu, 0, 0); - __add_mcr_wa(gt, wal, dss / GEN_DSS_PER_CSLICE, dss % GEN_DSS_PER_CSLICE); -} - static void icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { @@ -1521,76 +1496,6 @@ dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL); } -static void -xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) -{ - struct drm_i915_private *i915 = gt->i915; - - xehp_init_mcr(gt, wal); - - /* Wa_1409757795:xehpsdv */ - wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB); - - /* Wa_18011725039:xehpsdv */ - if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) { - wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER); - wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH); - } - - /* Wa_16011155590:xehpsdv */ - if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, - TSGUNIT_CLKGATE_DIS); - - /* Wa_14011780169:xehpsdv */ - if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) { - wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS | - GAMTLBVDBOX7_CLKGATE_DIS | - GAMTLBVDBOX6_CLKGATE_DIS | - GAMTLBVDBOX5_CLKGATE_DIS | - GAMTLBVDBOX4_CLKGATE_DIS | - GAMTLBVDBOX3_CLKGATE_DIS | - GAMTLBVDBOX2_CLKGATE_DIS | - GAMTLBVDBOX1_CLKGATE_DIS | - GAMTLBVDBOX0_CLKGATE_DIS | - GAMTLBKCR_CLKGATE_DIS | - GAMTLBGUC_CLKGATE_DIS | - GAMTLBBLT_CLKGATE_DIS); - wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS | - GAMTLBGFXA1_CLKGATE_DIS | - GAMTLBCOMPA0_CLKGATE_DIS | - GAMTLBCOMPA1_CLKGATE_DIS | - GAMTLBCOMPB0_CLKGATE_DIS | - GAMTLBCOMPB1_CLKGATE_DIS | - GAMTLBCOMPC0_CLKGATE_DIS | - GAMTLBCOMPC1_CLKGATE_DIS | - GAMTLBCOMPD0_CLKGATE_DIS | - GAMTLBCOMPD1_CLKGATE_DIS | - GAMTLBMERT_CLKGATE_DIS | - GAMTLBVEBOX3_CLKGATE_DIS | - GAMTLBVEBOX2_CLKGATE_DIS | - GAMTLBVEBOX1_CLKGATE_DIS | - GAMTLBVEBOX0_CLKGATE_DIS); - } - - /* Wa_16012725990:xehpsdv */ - if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER)) - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS); - - /* Wa_14011060649:xehpsdv */ - wa_14011060649(gt, wal); - - /* Wa_14012362059:xehpsdv */ - wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB); - - /* Wa_14014368820:xehpsdv */ - wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL, - INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE); - - /* Wa_14010670810:xehpsdv */ - wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE); -} - static void dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { @@ -1633,24 +1538,6 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE); } -static void -pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) -{ - pvc_init_mcr(gt, wal); - - /* Wa_14015795083 */ - wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE); - - /* Wa_18018781329 */ - wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB); - wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB); - wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB); - wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB); - - /* Wa_16016694945 */ - wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC); -} - static void xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { @@ -1727,12 +1614,6 @@ static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal) wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS); } - if (IS_PONTEVECCHIO(gt->i915)) { - wa_mcr_write(wal, XEHPC_L3SCRUB, - SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK); - wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN); - } - if (IS_DG2(gt->i915)) { wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS); wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS); @@ -1757,12 +1638,8 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal) if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) xelpg_gt_workarounds_init(gt, wal); - else if (IS_PONTEVECCHIO(i915)) - pvc_gt_workarounds_init(gt, wal); else if (IS_DG2(i915)) dg2_gt_workarounds_init(gt, wal); - else if (IS_XEHPSDV(i915)) - xehpsdv_gt_workarounds_init(gt, wal); else if (IS_DG1(i915)) dg1_gt_workarounds_init(gt, wal); else if (GRAPHICS_VER(i915) == 12) @@ -2180,30 +2057,6 @@ static void dg2_whitelist_build(struct intel_engine_cs *engine) } } -static void blacklist_trtt(struct intel_engine_cs *engine) -{ - struct i915_wa_list *w = &engine->whitelist; - - /* - * Prevent read/write access to [0x4400, 0x4600) which covers - * the TRTT range across all engines. Note that normally userspace - * cannot access the other engines' trtt control, but for simplicity - * we cover the entire range on each engine. - */ - whitelist_reg_ext(w, _MMIO(0x4400), - RING_FORCE_TO_NONPRIV_DENY | - RING_FORCE_TO_NONPRIV_RANGE_64); - whitelist_reg_ext(w, _MMIO(0x4500), - RING_FORCE_TO_NONPRIV_DENY | - RING_FORCE_TO_NONPRIV_RANGE_64); -} - -static void pvc_whitelist_build(struct intel_engine_cs *engine) -{ - /* Wa_16014440446:pvc */ - blacklist_trtt(engine); -} - static void xelpg_whitelist_build(struct intel_engine_cs *engine) { struct i915_wa_list *w = &engine->whitelist; @@ -2230,12 +2083,8 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) ; /* none yet */ else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74))) xelpg_whitelist_build(engine); - else if (IS_PONTEVECCHIO(i915)) - pvc_whitelist_build(engine); else if (IS_DG2(i915)) dg2_whitelist_build(engine); - else if (IS_XEHPSDV(i915)) - ; /* none needed */ else if (GRAPHICS_VER(i915) == 12) tgl_whitelist_build(engine); else if (GRAPHICS_VER(i915) == 11) @@ -2816,10 +2665,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) static void ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { - if (IS_PVC_CT_STEP(engine->i915, STEP_A0, STEP_C0)) { - /* Wa_14014999345:pvc */ - wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS, DISABLE_ECC); - } + /* boilerplate for any CCS engine workaround */ } /* @@ -2852,7 +2698,7 @@ add_render_compute_tuning_settings(struct intel_gt *gt, wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE, THREAD_EX_ARB_MODE_RR_AFTER_DEP); - if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) + if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC); } @@ -2918,10 +2764,14 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) || IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) || - IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74))) + IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74))) { /* Wa_14017856879 */ wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH); + /* Wa_14020495402 */ + wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, XELPG_DISABLE_TDL_SVHS_GATING); + } + if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) /* @@ -2949,21 +2799,15 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) || - IS_PONTEVECCHIO(i915) || IS_DG2(i915)) { /* Wa_22014226127 */ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE); } - if (IS_PONTEVECCHIO(i915) || IS_DG2(i915)) { + if (IS_DG2(i915)) { /* Wa_14015227452:dg2,pvc */ wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE); - /* Wa_16015675438:dg2,pvc */ - wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE); - } - - if (IS_DG2(i915)) { /* * Wa_16011620976:dg2_g11 * Wa_22015475538:dg2 @@ -2999,22 +2843,6 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li 0 /* write-only, so skip validation */, true); } - - if (IS_XEHPSDV(i915)) { - /* Wa_1409954639 */ - wa_mcr_masked_en(wal, - GEN8_ROW_CHICKEN, - SYSTOLIC_DOP_CLOCK_GATING_DIS); - - /* Wa_1607196519 */ - wa_mcr_masked_en(wal, - GEN9_ROW_CHICKEN4, - GEN12_DISABLE_GRF_CLEAR); - - /* Wa_14010449647:xehpsdv */ - wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1, - GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); - } } static void @@ -3097,7 +2925,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset) const struct i915_range *mcr_ranges; int i; - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) mcr_ranges = mcr_ranges_xehp; else if (GRAPHICS_VER(i915) >= 12) mcr_ranges = mcr_ranges_gen12; diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 0dd4d00ee8..9ce8ff1c04 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -319,7 +319,7 @@ static int igt_hang_sanitycheck(void *arg) i915_request_add(rq); timeout = 0; - intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */) + intel_wedge_on_timeout(&w, gt, HZ / 5 /* 200ms */) timeout = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); if (intel_gt_is_wedged(gt)) diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index f40de408cd..2cfc23c58e 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -281,7 +281,7 @@ static int igt_atomic_reset(void *arg) awake = reset_prepare(gt); p->critical_section_begin(); - err = __intel_gt_reset(gt, ALL_ENGINES); + err = intel_gt_reset_all_engines(gt); p->critical_section_end(); reset_finish(gt, awake); diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c index 302d054029..4ecc4ae74a 100644 --- a/drivers/gpu/drm/i915/gt/selftest_slpc.c +++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c @@ -53,7 +53,7 @@ static int slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 freq) static int slpc_set_freq(struct intel_gt *gt, u32 freq) { int err; - struct intel_guc_slpc *slpc = >->uc.guc.slpc; + struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc; err = slpc_set_max_freq(slpc, freq); if (err) { @@ -182,7 +182,7 @@ static int vary_min_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps, static int slpc_power(struct intel_gt *gt, struct intel_engine_cs *engine) { - struct intel_guc_slpc *slpc = >->uc.guc.slpc; + struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc; struct { u64 power; int freq; @@ -262,7 +262,7 @@ static int max_granted_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps, static int run_test(struct intel_gt *gt, int test_type) { - struct intel_guc_slpc *slpc = >->uc.guc.slpc; + struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc; struct intel_rps *rps = >->rps; struct intel_engine_cs *engine; enum intel_engine_id id; diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c index bccc3a1200..1fb6ff77fd 100644 --- a/drivers/gpu/drm/i915/gt/shmem_utils.c +++ b/drivers/gpu/drm/i915/gt/shmem_utils.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "i915_drv.h" #include "gem/i915_gem_object.h" diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h index 811add10c3..c34674e797 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h @@ -207,6 +207,27 @@ struct slpc_shared_data { u8 reserved_mode_definition[4096]; } __packed; +struct slpc_context_frequency_request { + u32 frequency_request:16; + u32 reserved:12; + u32 is_compute:1; + u32 ignore_busyness:1; + u32 is_minimum:1; + u32 is_predefined:1; +} __packed; + +#define SLPC_CTX_FREQ_REQ_IS_COMPUTE REG_BIT(28) + +struct slpc_optimized_strategies { + u32 compute:1; + u32 async_flip:1; + u32 media:1; + u32 vsync_flip:1; + u32 reserved:28; +} __packed; + +#define SLPC_OPTIMIZED_STRATEGY_COMPUTE REG_BIT(0) + /** * DOC: SLPC H2G MESSAGE FORMAT * diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h index dabeaf4f24..00d6402333 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h @@ -36,6 +36,7 @@ enum intel_guc_load_status { INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_START, INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID = 0x73, INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID = 0x74, + INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR = 0x75, INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_END, INTEL_GUC_LOAD_STATUS_READY = 0xF0, diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h index 4f4f53c42a..525587cfe1 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h @@ -101,4 +101,11 @@ enum { GUC_CONTEXT_POLICIES_KLV_NUM_IDS = 5, }; +/* + * Workaround keys: + */ +enum { + GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE = 0x9001, +}; + #endif /* _ABI_GUC_KLVS_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c index e2e42b3e0d..3b69bc6616 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c @@ -298,7 +298,7 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc) memcpy_toio(gsc->local_vaddr, src, gsc->fw.size); memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size); - intel_guc_write_barrier(>->uc.guc); + intel_guc_write_barrier(gt_to_guc(gt)); i915_gem_object_unpin_map(gsc->fw.obj); @@ -351,7 +351,7 @@ static int gsc_fw_query_compatibility_version(struct intel_gsc_uc *gsc) void *vaddr; int err; - err = intel_guc_allocate_and_map_vma(>->uc.guc, GSC_VER_PKT_SZ * 2, + err = intel_guc_allocate_and_map_vma(gt_to_guc(gt), GSC_VER_PKT_SZ * 2, &vma, &vaddr); if (err) { gt_err(gt, "failed to allocate vma for GSC version query\n"); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c index 40817ebcca..a7d5465655 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c @@ -358,7 +358,8 @@ static int proxy_channel_alloc(struct intel_gsc_uc *gsc) void *vaddr; int err; - err = intel_guc_allocate_and_map_vma(>->uc.guc, GSC_PROXY_CHANNEL_SIZE, + err = intel_guc_allocate_and_map_vma(gt_to_guc(gt), + GSC_PROXY_CHANNEL_SIZE, &vma, &vaddr); if (err) return err; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 2b450c43bb..5e60a34692 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -286,7 +286,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc) /* Wa_22012773006:gen11,gen12 < XeHP */ if (GRAPHICS_VER(gt->i915) >= 11 && - GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50)) + GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 55)) flags |= GUC_WA_POLLCS; /* Wa_14014475959 */ @@ -294,6 +294,11 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc) IS_DG2(gt->i915)) flags |= GUC_WA_HOLD_CCS_SWITCHOUT; + /* Wa_16019325821 */ + /* Wa_14019159160 */ + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) + flags |= GUC_WA_RCS_CCS_SWITCHOUT; + /* * Wa_14012197797 * Wa_22011391025 @@ -315,15 +320,12 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc) if (IS_DG2_G11(gt->i915)) flags |= GUC_WA_CONTEXT_ISOLATION; - /* Wa_16015675438 */ - if (!RCS_MASK(gt)) - flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST; - - /* Wa_14018913170 */ - if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0)) { - if (IS_DG2(gt->i915) || IS_METEORLAKE(gt->i915) || IS_PONTEVECCHIO(gt->i915)) - flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6; - } + /* + * Wa_14018913170: Applicable to all platforms supported by i915 so + * don't bother testing for all X/Y/Z platforms explicitly. + */ + if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0)) + flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6; return flags; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index be70c46604..57b9031327 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -204,6 +204,8 @@ struct intel_guc { struct guc_mmio_reg *ads_regset; /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */ u32 ads_golden_ctxt_size; + /** @ads_waklv_size: size of workaround KLVs */ + u32 ads_waklv_size; /** @ads_capture_size: size of register lists in the ADS used for error capture */ u32 ads_capture_size; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index f7372f736a..c606bb5e3b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -46,6 +46,10 @@ * +---------------------------------------+ * | padding | * +---------------------------------------+ <== 4K aligned + * | w/a KLVs | + * +---------------------------------------+ + * | padding | + * +---------------------------------------+ <== 4K aligned * | capture lists | * +---------------------------------------+ * | padding | @@ -88,6 +92,11 @@ static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc) return PAGE_ALIGN(guc->ads_golden_ctxt_size); } +static u32 guc_ads_waklv_size(struct intel_guc *guc) +{ + return PAGE_ALIGN(guc->ads_waklv_size); +} + static u32 guc_ads_capture_size(struct intel_guc *guc) { return PAGE_ALIGN(guc->ads_capture_size); @@ -113,7 +122,7 @@ static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc) return PAGE_ALIGN(offset); } -static u32 guc_ads_capture_offset(struct intel_guc *guc) +static u32 guc_ads_waklv_offset(struct intel_guc *guc) { u32 offset; @@ -123,6 +132,16 @@ static u32 guc_ads_capture_offset(struct intel_guc *guc) return PAGE_ALIGN(offset); } +static u32 guc_ads_capture_offset(struct intel_guc *guc) +{ + u32 offset; + + offset = guc_ads_waklv_offset(guc) + + guc_ads_waklv_size(guc); + + return PAGE_ALIGN(offset); +} + static u32 guc_ads_private_data_offset(struct intel_guc *guc) { u32 offset; @@ -393,7 +412,7 @@ static int guc_mmio_regset_init(struct temp_regset *regset, /* add in local MOCS registers */ for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) - if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) ret |= GUC_MCR_REG_ADD(gt, regset, XEHP_LNCFCMOCS(i), false); else ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false); @@ -503,7 +522,7 @@ static void fill_engine_enable_masks(struct intel_gt *gt, #define LR_HW_CONTEXT_SIZE (80 * sizeof(u32)) #define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32)) -#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50) ? \ +#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55) ? \ XEHP_LR_HW_CONTEXT_SIZE : \ LR_HW_CONTEXT_SIZE) #define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915)) @@ -796,6 +815,65 @@ engine_instance_list: return PAGE_ALIGN(total_size); } +/* Wa_14019159160 */ +static u32 guc_waklv_ra_mode(struct intel_guc *guc, u32 offset, u32 remain) +{ + u32 size; + u32 klv_entry[] = { + /* 16:16 key/length */ + FIELD_PREP(GUC_KLV_0_KEY, GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE) | + FIELD_PREP(GUC_KLV_0_LEN, 0), + /* 0 dwords data */ + }; + + size = sizeof(klv_entry); + GEM_BUG_ON(remain < size); + + iosys_map_memcpy_to(&guc->ads_map, offset, klv_entry, size); + + return size; +} + +static void guc_waklv_init(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + u32 offset, addr_ggtt, remain, size; + + if (!intel_uc_uses_guc_submission(>->uc)) + return; + + if (GUC_FIRMWARE_VER(guc) < MAKE_GUC_VER(70, 10, 0)) + return; + + GEM_BUG_ON(iosys_map_is_null(&guc->ads_map)); + offset = guc_ads_waklv_offset(guc); + remain = guc_ads_waklv_size(guc); + + /* Wa_14019159160 */ + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) { + size = guc_waklv_ra_mode(guc, offset, remain); + offset += size; + remain -= size; + } + + size = guc_ads_waklv_size(guc) - remain; + if (!size) + return; + + offset = guc_ads_waklv_offset(guc); + addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset; + + ads_blob_write(guc, ads.wa_klv_addr_lo, addr_ggtt); + ads_blob_write(guc, ads.wa_klv_addr_hi, 0); + ads_blob_write(guc, ads.wa_klv_size, size); +} + +static int guc_prep_waklv(struct intel_guc *guc) +{ + /* Fudge something chunky for now: */ + return PAGE_SIZE; +} + static void __guc_ads_init(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); @@ -843,6 +921,9 @@ static void __guc_ads_init(struct intel_guc *guc) /* MMIO save/restore list */ guc_mmio_reg_state_init(guc); + /* Workaround KLV list */ + guc_waklv_init(guc); + /* Private Data */ ads_blob_write(guc, ads.private_data, base + guc_ads_private_data_offset(guc)); @@ -886,6 +967,12 @@ int intel_guc_ads_create(struct intel_guc *guc) return ret; guc->ads_capture_size = ret; + /* And don't forget the workaround KLVs: */ + ret = guc_prep_waklv(guc); + if (ret < 0) + return ret; + guc->ads_waklv_size = ret; + /* Now the total size can be determined: */ size = guc_ads_blob_size(guc); @@ -961,7 +1048,7 @@ u32 intel_guc_engine_usage_offset(struct intel_guc *guc) struct iosys_map intel_guc_engine_usage_record_map(struct intel_engine_cs *engine) { - struct intel_guc *guc = &engine->gt->uc.guc; + struct intel_guc *guc = gt_to_guc(engine->gt); u8 guc_class = engine_class_to_guc_class(engine->class); size_t offset = offsetof(struct __guc_ads_blob, engine_usage.engines[guc_class][ilog2(engine->logical_mask)]); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c index a1cd40d805..9547fff672 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c @@ -51,6 +51,7 @@ { RING_ESR(0), 0, 0, "ESR" }, \ { RING_DMA_FADD(0), 0, 0, "RING_DMA_FADD_LDW" }, \ { RING_DMA_FADD_UDW(0), 0, 0, "RING_DMA_FADD_UDW" }, \ + { RING_EIR(0), 0, 0, "EIR" }, \ { RING_IPEIR(0), 0, 0, "IPEIR" }, \ { RING_IPEHR(0), 0, 0, "IPEHR" }, \ { RING_INSTPS(0), 0, 0, "INSTPS" }, \ @@ -80,9 +81,6 @@ { GEN8_RING_PDP_LDW(0, 3), 0, 0, "PDP3_LDW" }, \ { GEN8_RING_PDP_UDW(0, 3), 0, 0, "PDP3_UDW" } -#define COMMON_BASE_HAS_EU \ - { EIR, 0, 0, "EIR" } - #define COMMON_BASE_RENDER \ { GEN7_SC_INSTDONE, 0, 0, "GEN7_SC_INSTDONE" } @@ -105,7 +103,6 @@ static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = { /* XE_LP Render / Compute Per-Class */ static const struct __guc_mmio_reg_descr xe_lp_rc_class_regs[] = { - COMMON_BASE_HAS_EU, COMMON_BASE_RENDER, COMMON_GEN12BASE_RENDER, }; @@ -148,7 +145,6 @@ static const struct __guc_mmio_reg_descr gen8_global_regs[] = { }; static const struct __guc_mmio_reg_descr gen8_rc_class_regs[] = { - COMMON_BASE_HAS_EU, COMMON_BASE_RENDER, }; @@ -1441,7 +1437,7 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf, if (!cap || !ee->engine) return -ENODEV; - guc = &ee->engine->gt->uc.guc; + guc = gt_to_guc(ee->engine->gt); i915_error_printf(ebuf, "global --- GuC Error Capture on %s command stream:\n", ee->engine->name); @@ -1543,7 +1539,7 @@ bool intel_guc_capture_is_matching_engine(struct intel_gt *gt, if (!gt || !ce || !engine) return false; - guc = >->uc.guc; + guc = gt_to_guc(gt); if (!guc->capture) return false; @@ -1573,7 +1569,7 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt, if (!gt || !ee || !ce) return; - guc = >->uc.guc; + guc = gt_to_guc(gt); if (!guc->capture) return; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 52332bb143..23f54c84cb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -26,7 +26,7 @@ static void guc_prepare_xfer(struct intel_gt *gt) GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | GUC_ENABLE_MIA_CLOCK_GATING; - if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 55)) shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES | GUC_ENABLE_MIA_CACHING; @@ -115,6 +115,7 @@ static inline bool guc_load_done(struct intel_uncore *uncore, u32 *status, bool case INTEL_GUC_LOAD_STATUS_INIT_DATA_INVALID: case INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID: case INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID: + case INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR: *success = false; return true; } @@ -241,6 +242,11 @@ static int guc_wait_ucode(struct intel_guc *guc) ret = -EPERM; break; + case INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR: + guc_info(guc, "invalid w/a KLV entry\n"); + ret = -EINVAL; + break; + case INTEL_GUC_LOAD_STATUS_HWCONFIG_START: guc_info(guc, "still extracting hwconfig table.\n"); ret = -ETIMEDOUT; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 8ae1846431..14797e80bc 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -96,8 +96,9 @@ #define GUC_WA_GAM_CREDITS BIT(10) #define GUC_WA_DUAL_QUEUE BIT(11) #define GUC_WA_RCS_RESET_BEFORE_RC6 BIT(13) -#define GUC_WA_CONTEXT_ISOLATION BIT(15) #define GUC_WA_PRE_PARSER BIT(14) +#define GUC_WA_CONTEXT_ISOLATION BIT(15) +#define GUC_WA_RCS_CCS_SWITCHOUT BIT(16) #define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17) #define GUC_WA_POLLCS BIT(18) #define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21) @@ -430,7 +431,10 @@ struct guc_ads { u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES]; u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES]; u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX]; - u32 reserved[14]; + u32 wa_klv_addr_lo; + u32 wa_klv_addr_hi; + u32 wa_klv_size; + u32 reserved[11]; } __packed; /* Engine usage stats */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c index cc9569af7f..b67a15f742 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c @@ -111,7 +111,7 @@ static bool has_table(struct drm_i915_private *i915) static int guc_hwconfig_init(struct intel_gt *gt) { struct intel_hwconfig *hwconfig = >->info.hwconfig; - struct intel_guc *guc = >->uc.guc; + struct intel_guc *guc = gt_to_guc(gt); int ret; if (!has_table(gt->i915)) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 3e681ab6fb..706fffca69 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -537,6 +537,20 @@ int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val) return ret; } +int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val) +{ + struct drm_i915_private *i915 = slpc_to_i915(slpc); + intel_wakeref_t wakeref; + int ret = 0; + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) + ret = slpc_set_param(slpc, + SLPC_PARAM_STRATEGIES, + val); + + return ret; +} + int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val) { struct drm_i915_private *i915 = slpc_to_i915(slpc); @@ -711,6 +725,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) /* Set cached media freq ratio mode */ intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode); + /* Enable SLPC Optimized Strategy for compute */ + intel_guc_slpc_set_strategy(slpc, SLPC_OPTIMIZED_STRATEGY_COMPUTE); + return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h index 6ac6503c39..1cb5fd44f0 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h @@ -45,5 +45,6 @@ void intel_guc_pm_intrmsk_enable(struct intel_gt *gt); void intel_guc_slpc_boost(struct intel_guc_slpc *slpc); void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc); int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val); +int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 0f83c6d437..0eaa106424 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -398,7 +398,7 @@ static inline void set_context_guc_id_invalid(struct intel_context *ce) static inline struct intel_guc *ce_to_guc(struct intel_context *ce) { - return &ce->engine->gt->uc.guc; + return gt_to_guc(ce->engine->gt); } static inline struct i915_priolist *to_priolist(struct rb_node *rb) @@ -1246,7 +1246,7 @@ static void __get_engine_usage_record(struct intel_engine_cs *engine, static void guc_update_engine_gt_clks(struct intel_engine_cs *engine) { struct intel_engine_guc_stats *stats = &engine->stats.guc; - struct intel_guc *guc = &engine->gt->uc.guc; + struct intel_guc *guc = gt_to_guc(engine->gt); u32 last_switch, ctx_id, total; lockdep_assert_held(&guc->timestamp.lock); @@ -1311,7 +1311,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc; struct i915_gpu_error *gpu_error = &engine->i915->gpu_error; struct intel_gt *gt = engine->gt; - struct intel_guc *guc = >->uc.guc; + struct intel_guc *guc = gt_to_guc(gt); u64 total, gt_stamp_saved; unsigned long flags; u32 reset_count; @@ -1577,7 +1577,7 @@ static void guc_fini_engine_stats(struct intel_guc *guc) void intel_guc_busyness_park(struct intel_gt *gt) { - struct intel_guc *guc = >->uc.guc; + struct intel_guc *guc = gt_to_guc(gt); if (!guc_submission_initialized(guc)) return; @@ -1604,7 +1604,7 @@ void intel_guc_busyness_park(struct intel_gt *gt) void intel_guc_busyness_unpark(struct intel_gt *gt) { - struct intel_guc *guc = >->uc.guc; + struct intel_guc *guc = gt_to_guc(gt); unsigned long flags; ktime_t unused; @@ -2189,7 +2189,7 @@ static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq) static void guc_submit_request(struct i915_request *rq) { struct i915_sched_engine *sched_engine = rq->engine->sched_engine; - struct intel_guc *guc = &rq->engine->gt->uc.guc; + struct intel_guc *guc = gt_to_guc(rq->engine->gt); unsigned long flags; /* Will be called from irq-context when using foreign fences. */ @@ -2215,11 +2215,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce) order_base_2(ce->parallel.number_children + 1)); else - ret = ida_simple_get(&guc->submission_state.guc_ids, - NUMBER_MULTI_LRC_GUC_ID(guc), - guc->submission_state.num_guc_ids, - GFP_KERNEL | __GFP_RETRY_MAYFAIL | - __GFP_NOWARN); + ret = ida_alloc_range(&guc->submission_state.guc_ids, + NUMBER_MULTI_LRC_GUC_ID(guc), + guc->submission_state.num_guc_ids - 1, + GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); if (unlikely(ret < 0)) return ret; @@ -2242,8 +2241,8 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce) + 1)); } else { --guc->submission_state.guc_ids_in_use; - ida_simple_remove(&guc->submission_state.guc_ids, - ce->guc_id.id); + ida_free(&guc->submission_state.guc_ids, + ce->guc_id.id); } clr_ctx_id_mapping(guc, ce->guc_id.id); set_context_guc_id_invalid(ce); @@ -2640,6 +2639,7 @@ MAKE_CONTEXT_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM) MAKE_CONTEXT_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT) MAKE_CONTEXT_POLICY_ADD(priority, SCHEDULING_PRIORITY) MAKE_CONTEXT_POLICY_ADD(preempt_to_idle, PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY) +MAKE_CONTEXT_POLICY_ADD(slpc_ctx_freq_req, SLPM_GT_FREQUENCY) #undef MAKE_CONTEXT_POLICY_ADD @@ -2655,10 +2655,11 @@ static int __guc_context_set_context_policies(struct intel_guc *guc, static int guc_context_policy_init_v70(struct intel_context *ce, bool loop) { struct intel_engine_cs *engine = ce->engine; - struct intel_guc *guc = &engine->gt->uc.guc; + struct intel_guc *guc = gt_to_guc(engine->gt); struct context_policy policy; u32 execution_quantum; u32 preemption_timeout; + u32 slpc_ctx_freq_req = 0; unsigned long flags; int ret; @@ -2670,11 +2671,15 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop) execution_quantum = engine->props.timeslice_duration_ms * 1000; preemption_timeout = engine->props.preempt_timeout_ms * 1000; + if (ce->flags & BIT(CONTEXT_LOW_LATENCY)) + slpc_ctx_freq_req |= SLPC_CTX_FREQ_REQ_IS_COMPUTE; + __guc_context_policy_start_klv(&policy, ce->guc_id.id); __guc_context_policy_add_priority(&policy, ce->guc_state.prio); __guc_context_policy_add_execution_quantum(&policy, execution_quantum); __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout); + __guc_context_policy_add_slpc_ctx_freq_req(&policy, slpc_ctx_freq_req); if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION) __guc_context_policy_add_preempt_to_idle(&policy, 1); @@ -2731,7 +2736,7 @@ static u32 map_guc_prio_to_lrc_desc_prio(u8 prio) static void prepare_context_registration_info_v69(struct intel_context *ce) { struct intel_engine_cs *engine = ce->engine; - struct intel_guc *guc = &engine->gt->uc.guc; + struct intel_guc *guc = gt_to_guc(engine->gt); u32 ctx_id = ce->guc_id.id; struct guc_lrc_desc_v69 *desc; struct intel_context *child; @@ -2800,7 +2805,7 @@ static void prepare_context_registration_info_v70(struct intel_context *ce, struct guc_ctxt_registration_info *info) { struct intel_engine_cs *engine = ce->engine; - struct intel_guc *guc = &engine->gt->uc.guc; + struct intel_guc *guc = gt_to_guc(engine->gt); u32 ctx_id = ce->guc_id.id; GEM_BUG_ON(!engine->mask); @@ -2863,7 +2868,7 @@ static int try_context_registration(struct intel_context *ce, bool loop) { struct intel_engine_cs *engine = ce->engine; struct intel_runtime_pm *runtime_pm = engine->uncore->rpm; - struct intel_guc *guc = &engine->gt->uc.guc; + struct intel_guc *guc = gt_to_guc(engine->gt); intel_wakeref_t wakeref; u32 ctx_id = ce->guc_id.id; bool context_registered; @@ -4491,7 +4496,13 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) if (engine->class == COMPUTE_CLASS) if (IS_GFX_GT_IP_STEP(engine->gt, IP_VER(12, 70), STEP_A0, STEP_B0) || IS_DG2(engine->i915)) - engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT; + engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT; + + /* Wa_16019325821 */ + /* Wa_14019159160 */ + if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) && + IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71))) + engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT; /* * TODO: GuC supports timeslicing and semaphores as well, but they're @@ -4502,7 +4513,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) */ engine->emit_bb_start = gen8_emit_bb_start; - if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) engine->emit_bb_start = xehp_emit_bb_start; } @@ -4544,7 +4555,7 @@ static void guc_sched_engine_destroy(struct kref *kref) int intel_guc_submission_setup(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; - struct intel_guc *guc = &engine->gt->uc.guc; + struct intel_guc *guc = gt_to_guc(engine->gt); /* * The setup relies on several assumptions (e.g. irqs always enabled) @@ -5303,7 +5314,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc, void intel_guc_find_hung_context(struct intel_engine_cs *engine) { - struct intel_guc *guc = &engine->gt->uc.guc; + struct intel_guc *guc = gt_to_guc(engine->gt); struct intel_context *ce; struct i915_request *rq; unsigned long index; @@ -5365,7 +5376,7 @@ void intel_guc_dump_active_requests(struct intel_engine_cs *engine, struct i915_request *hung_rq, struct drm_printer *m) { - struct intel_guc *guc = &engine->gt->uc.guc; + struct intel_guc *guc = gt_to_guc(engine->gt); struct intel_context *ce; unsigned long index; unsigned long flags; @@ -5817,7 +5828,7 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, if (!ve) return ERR_PTR(-ENOMEM); - guc = &siblings[0]->gt->uc.guc; + guc = gt_to_guc(siblings[0]->gt); ve->base.i915 = siblings[0]->i915; ve->base.gt = siblings[0]->gt; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index 0945b177d5..2d9152eb72 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -385,7 +385,7 @@ int intel_huc_init(struct intel_huc *huc) if (HAS_ENGINE(gt, GSC0)) { struct i915_vma *vma; - vma = intel_guc_allocate_vma(>->uc.guc, PXP43_HUC_AUTH_INOUT_SIZE * 2); + vma = intel_guc_allocate_vma(gt_to_guc(gt), PXP43_HUC_AUTH_INOUT_SIZE * 2); if (IS_ERR(vma)) { err = PTR_ERR(vma); huc_info(huc, "Failed to allocate heci pkt\n"); @@ -540,7 +540,7 @@ int intel_huc_wait_for_auth_complete(struct intel_huc *huc, int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type) { struct intel_gt *gt = huc_to_gt(huc); - struct intel_guc *guc = >->uc.guc; + struct intel_guc *guc = gt_to_guc(gt); int ret; if (!intel_uc_fw_is_loaded(&huc->fw)) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 399bc31918..7a63abf8f6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -50,10 +50,6 @@ static void uc_expand_default_options(struct intel_uc *uc) /* Default: enable HuC authentication and GuC submission */ i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION; - - /* XEHPSDV and PVC do not use HuC */ - if (IS_XEHPSDV(i915) || IS_PONTEVECCHIO(i915)) - i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC; } /* Reset GuC providing us with fresh state for both GuC and HuC. diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 756093eaf2..d80278eb45 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -807,7 +807,7 @@ static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware ** static int check_mtl_huc_guc_compatibility(struct intel_gt *gt, struct intel_uc_fw_file *huc_selected) { - struct intel_uc_fw_file *guc_selected = >->uc.guc.fw.file_selected; + struct intel_uc_fw_file *guc_selected = >_to_guc(gt)->fw.file_selected; struct intel_uc_fw_ver *huc_ver = &huc_selected->ver; struct intel_uc_fw_ver *guc_ver = &guc_selected->ver; bool new_huc, new_guc; @@ -1209,7 +1209,7 @@ static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw) * since its GGTT offset will be GuC accessible. */ GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE); - vma = intel_guc_allocate_vma(>->uc.guc, PAGE_SIZE); + vma = intel_guc_allocate_vma(gt_to_guc(gt), PAGE_SIZE); if (IS_ERR(vma)) return PTR_ERR(vma); diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c index c900aac85a..68feb55654 100644 --- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c @@ -144,7 +144,7 @@ err: static int intel_guc_steal_guc_ids(void *arg) { struct intel_gt *gt = arg; - struct intel_guc *guc = >->uc.guc; + struct intel_guc *guc = gt_to_guc(gt); int ret, sv, context_index = 0; intel_wakeref_t wakeref; struct intel_engine_cs *engine; diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index d4a3f3e093..4be8cb65fb 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -50,6 +50,7 @@ #include "trace.h" #include "display/intel_display.h" +#include "display/intel_sprite_regs.h" #include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index e0c5dfb788..2b7df7fcf3 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -36,8 +36,10 @@ #include "i915_reg.h" #include "gvt.h" +#include "display/bxt_dpio_phy_regs.h" #include "display/intel_display.h" #include "display/intel_dpio_phy.h" +#include "display/intel_sprite_regs.h" static int get_edp_pipe(struct intel_vgpu *vgpu) { diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 313efdabee..4140da68aa 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -34,11 +34,14 @@ */ #include -#include "i915_drv.h" + #include "gvt.h" +#include "i915_drv.h" #include "i915_pvinfo.h" #include "i915_reg.h" +#include "display/intel_sprite_regs.h" + #define PRIMARY_FORMAT_NUM 16 struct pixel_format { int drm_format; /* Pixel format in DRM definition */ diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 4dd52ac204..221a3ae81b 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -30,6 +30,7 @@ #include #include +#include #include "i915_drv.h" #include "gvt.h" @@ -50,21 +51,7 @@ struct gvt_firmware_header { #define dev_to_drm_minor(d) dev_get_drvdata((d)) -static ssize_t -gvt_firmware_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t offset, size_t count) -{ - memcpy(buf, attr->private + offset, count); - return count; -} - -static struct bin_attribute firmware_attr = { - .attr = {.name = "gvt_firmware", .mode = (S_IRUSR)}, - .read = gvt_firmware_read, - .write = NULL, - .mmap = NULL, -}; +static BIN_ATTR_SIMPLE_ADMIN_RO(gvt_firmware); static int expose_firmware_sysfs(struct intel_gvt *gvt) { @@ -107,10 +94,10 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) crc32_start = offsetof(struct gvt_firmware_header, version); h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start); - firmware_attr.size = size; - firmware_attr.private = firmware; + bin_attr_gvt_firmware.size = size; + bin_attr_gvt_firmware.private = firmware; - ret = device_create_bin_file(&pdev->dev, &firmware_attr); + ret = device_create_bin_file(&pdev->dev, &bin_attr_gvt_firmware); if (ret) { vfree(firmware); return ret; @@ -122,8 +109,8 @@ static void clean_firmware_sysfs(struct intel_gvt *gvt) { struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); - device_remove_bin_file(&pdev->dev, &firmware_attr); - vfree(firmware_attr.private); + device_remove_bin_file(&pdev->dev, &bin_attr_gvt_firmware); + vfree(bin_attr_gvt_firmware.private); } /** diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 094fca9b0e..58cca4906f 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -39,6 +39,7 @@ #include "trace.h" #include "gt/intel_gt_regs.h" +#include #if defined(VERBOSE_DEBUG) #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index efcb00472b..22fbddbe3e 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -41,6 +41,7 @@ #include "gvt.h" #include "i915_pvinfo.h" #include "intel_mchbar_regs.h" +#include "display/bxt_dpio_phy_regs.h" #include "display/intel_display_types.h" #include "display/intel_dmc_regs.h" #include "display/intel_dp_aux_regs.h" @@ -49,9 +50,11 @@ #include "display/intel_fdi_regs.h" #include "display/intel_pps_regs.h" #include "display/intel_psr_regs.h" +#include "display/intel_sprite_regs.h" #include "display/skl_watermark_regs.h" #include "display/vlv_dsi_pll_regs.h" #include "gt/intel_gt_regs.h" +#include /* XXX FIXME i915 has changed PP_XXX definition */ #define PCH_PP_STATUS _MMIO(0xc7200) @@ -2763,15 +2766,15 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT, NULL, bxt_pcs_dw12_grp_write); - MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT, + MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT, bxt_port_tx_dw3_read, NULL); MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT, NULL, bxt_pcs_dw12_grp_write); - MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT, + MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT, bxt_port_tx_dw3_read, NULL); MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT, NULL, bxt_pcs_dw12_grp_write); - MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT, + MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT, bxt_port_tx_dw3_read, NULL); MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write); MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL); diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 5b5def6dde..e16e0d4c95 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -33,10 +33,12 @@ * */ +#include #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" +#include "display/bxt_dpio_phy_regs.h" #include "display/intel_dpio_phy.h" #include "gt/intel_gt_regs.h" diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 08ad1bd651..63c751ca41 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -34,6 +34,7 @@ #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" +#include void populate_pvinfo_page(struct intel_vgpu *vgpu) { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 990eaa029d..bc717cf544 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -30,6 +30,7 @@ #include #include +#include #include #include "display/intel_display_params.h" @@ -156,18 +157,6 @@ static const char *i915_cache_level_str(struct drm_i915_gem_object *obj) case 4: return " WB (2-Way Coh)"; default: return " not defined"; } - } else if (IS_PONTEVECCHIO(i915)) { - switch (obj->pat_index) { - case 0: return " UC"; - case 1: return " WC"; - case 2: return " WT"; - case 3: return " WB"; - case 4: return " WT (CLOS1)"; - case 5: return " WB (CLOS1)"; - case 6: return " WT (CLOS2)"; - case 7: return " WT (CLOS2)"; - default: return " not defined"; - } } else if (GRAPHICS_VER(i915) >= 12) { switch (obj->pat_index) { case 0: return " WB"; diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c index 8bca02025e..33d2dcb0de 100644 --- a/drivers/gpu/drm/i915/i915_debugfs_params.c +++ b/drivers/gpu/drm/i915/i915_debugfs_params.c @@ -4,6 +4,7 @@ */ #include +#include #include "i915_debugfs_params.h" #include "gt/intel_gt.h" diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 4b9233c07a..161b21eff6 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -202,7 +202,7 @@ static void sanitize_gpu(struct drm_i915_private *i915) unsigned int i; for_each_gt(gt, i915, i) - __intel_gt_reset(gt, ALL_ENGINES); + intel_gt_reset_all_engines(gt); } } @@ -920,27 +920,6 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file) return 0; } -/** - * i915_driver_lastclose - clean up after all DRM clients have exited - * @dev: DRM device - * - * Take care of cleaning up after all DRM clients have exited. In the - * mode setting case, we want to restore the kernel's initial mode (just - * in case the last client left us in a bad state). - * - * Additionally, in the non-mode setting case, we'll tear down the GTT - * and DMA structures, since the kernel won't be using them, and clea - * up any GEM state. - */ -static void i915_driver_lastclose(struct drm_device *dev) -{ - struct drm_i915_private *i915 = to_i915(dev); - - intel_fbdev_restore_mode(i915); - - vga_switcheroo_process_delayed_switch(); -} - static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; @@ -1831,7 +1810,6 @@ static const struct drm_driver i915_drm_driver = { DRIVER_SYNCOBJ_TIMELINE, .release = i915_driver_release, .open = i915_driver_open, - .lastclose = i915_driver_lastclose, .postclose = i915_driver_postclose, .show_fdinfo = PTR_IF(IS_ENABLED(CONFIG_PROC_FS), i915_drm_client_fdinfo), diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e81b3b2858..ee0d7d5f13 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -235,25 +235,17 @@ struct drm_i915_private { /* protects the irq masks */ spinlock_t irq_lock; - bool display_irqs_enabled; - /* Sideband mailbox protection */ struct mutex sb_lock; struct pm_qos_request sb_qos; /** Cached value of IMR to avoid reads in updating the bitfield */ - union { - u32 irq_mask; - u32 de_irq_mask[I915_MAX_PIPES]; - }; - u32 pipestat_irq_mask[I915_MAX_PIPES]; + u32 irq_mask; bool preserve_bios_swizzle; unsigned int fsb_freq, mem_freq, is_ddr3; - unsigned int skl_preferred_vco_freq; - unsigned int max_dotclk_freq; unsigned int hpll_freq; unsigned int czclk_freq; @@ -350,9 +342,6 @@ struct drm_i915_private { struct intel_pxp *pxp; - /* For i915gm/i945gm vblank irq workaround */ - u8 vblank_enabled; - bool irq_enabled; struct i915_pmu pmu; @@ -544,9 +533,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_DG1(i915) IS_PLATFORM(i915, INTEL_DG1) #define IS_ALDERLAKE_S(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_S) #define IS_ALDERLAKE_P(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_P) -#define IS_XEHPSDV(i915) IS_PLATFORM(i915, INTEL_XEHPSDV) #define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2) -#define IS_PONTEVECCHIO(i915) IS_PLATFORM(i915, INTEL_PONTEVECCHIO) #define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE) #define IS_LUNARLAKE(i915) 0 @@ -621,17 +608,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_TIGERLAKE_UY(i915) \ IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY) -#define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \ - (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until)) - -#define IS_PVC_BD_STEP(__i915, since, until) \ - (IS_PONTEVECCHIO(__i915) && \ - IS_BASEDIE_STEP(__i915, since, until)) - -#define IS_PVC_CT_STEP(__i915, since, until) \ - (IS_PONTEVECCHIO(__i915) && \ - IS_GRAPHICS_STEP(__i915, since, until)) - #define IS_LP(i915) (INTEL_INFO(i915)->is_lp) #define IS_GEN9_LP(i915) (GRAPHICS_VER(i915) == 9 && IS_LP(i915)) #define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_LP(i915)) diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h deleted file mode 100644 index a327094de2..0000000000 --- a/drivers/gpu/drm/i915/i915_fixed.h +++ /dev/null @@ -1,148 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2018 Intel Corporation - */ - -#ifndef _I915_FIXED_H_ -#define _I915_FIXED_H_ - -#include -#include -#include -#include - -typedef struct { - u32 val; -} uint_fixed_16_16_t; - -#define FP_16_16_MAX ((uint_fixed_16_16_t){ .val = UINT_MAX }) - -static inline bool is_fixed16_zero(uint_fixed_16_16_t val) -{ - return val.val == 0; -} - -static inline uint_fixed_16_16_t u32_to_fixed16(u32 val) -{ - uint_fixed_16_16_t fp = { .val = val << 16 }; - - WARN_ON(val > U16_MAX); - - return fp; -} - -static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp) -{ - return DIV_ROUND_UP(fp.val, 1 << 16); -} - -static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp) -{ - return fp.val >> 16; -} - -static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1, - uint_fixed_16_16_t min2) -{ - uint_fixed_16_16_t min = { .val = min(min1.val, min2.val) }; - - return min; -} - -static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1, - uint_fixed_16_16_t max2) -{ - uint_fixed_16_16_t max = { .val = max(max1.val, max2.val) }; - - return max; -} - -static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val) -{ - uint_fixed_16_16_t fp = { .val = (u32)val }; - - WARN_ON(val > U32_MAX); - - return fp; -} - -static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val, - uint_fixed_16_16_t d) -{ - return DIV_ROUND_UP(val.val, d.val); -} - -static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul) -{ - u64 tmp; - - tmp = mul_u32_u32(val, mul.val); - tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16); - WARN_ON(tmp > U32_MAX); - - return (u32)tmp; -} - -static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, - uint_fixed_16_16_t mul) -{ - u64 tmp; - - tmp = mul_u32_u32(val.val, mul.val); - tmp = tmp >> 16; - - return clamp_u64_to_fixed16(tmp); -} - -static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d) -{ - u64 tmp; - - tmp = (u64)val << 16; - tmp = DIV_ROUND_UP_ULL(tmp, d); - - return clamp_u64_to_fixed16(tmp); -} - -static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d) -{ - u64 tmp; - - tmp = (u64)val << 16; - tmp = DIV_ROUND_UP_ULL(tmp, d.val); - WARN_ON(tmp > U32_MAX); - - return (u32)tmp; -} - -static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul) -{ - u64 tmp; - - tmp = mul_u32_u32(val, mul.val); - - return clamp_u64_to_fixed16(tmp); -} - -static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1, - uint_fixed_16_16_t add2) -{ - u64 tmp; - - tmp = (u64)add1.val + add2.val; - - return clamp_u64_to_fixed16(tmp); -} - -static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1, - u32 add2) -{ - uint_fixed_16_16_t tmp_add2 = u32_to_fixed16(add2); - u64 tmp; - - tmp = (u64)add1.val + tmp_add2.val; - - return clamp_u64_to_fixed16(tmp); -} - -#endif /* _I915_FIXED_H_ */ diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index 5c3fec63cb..a62405787e 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -155,12 +155,18 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, */ value = 1; break; + case I915_PARAM_HAS_CONTEXT_FREQ_HINT: + if (intel_uc_uses_guc_submission(&to_gt(i915)->uc)) + value = 1; + else + value = -EINVAL; + break; case I915_PARAM_HAS_CONTEXT_ISOLATION: value = intel_engines_has_context_isolation(i915); break; case I915_PARAM_SLICE_MASK: /* Not supported from Xe_HP onward; use topology queries */ - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) return -EINVAL; value = sseu->slice_mask; @@ -169,7 +175,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, break; case I915_PARAM_SUBSLICE_MASK: /* Not supported from Xe_HP onward; use topology queries */ - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) return -EINVAL; /* Only copy bits from the first slice */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index a0b784ebad..625b3c0245 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -28,6 +28,7 @@ */ #include +#include #include #include #include @@ -1245,8 +1246,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee) if (MEDIA_VER(i915) >= 13 && engine->gt->type == GT_MEDIA) ee->fault_reg = intel_uncore_read(engine->uncore, XELPMP_RING_FAULT_REG); - - else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) + else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) ee->fault_reg = intel_gt_mcr_read_any(engine->gt, XEHP_RING_FAULT_REG); else if (GRAPHICS_VER(i915) >= 12) @@ -1852,7 +1852,7 @@ static void gt_record_global_regs(struct intel_gt_coredump *gt) if (GRAPHICS_VER(i915) == 7) gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT); - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { + if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) { gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt, XEHP_FAULT_TLB_DATA0); gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt, diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c index c0662a022f..49db3e0982 100644 --- a/drivers/gpu/drm/i915/i915_hwmon.c +++ b/drivers/gpu/drm/i915/i915_hwmon.c @@ -739,12 +739,6 @@ hwm_get_preregistration_info(struct drm_i915_private *i915) hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT; hwmon->rg.energy_status_all = PCU_PACKAGE_ENERGY_STATUS; hwmon->rg.energy_status_tile = INVALID_MMIO_REG; - } else if (IS_XEHPSDV(i915)) { - hwmon->rg.pkg_power_sku_unit = GT0_PACKAGE_POWER_SKU_UNIT; - hwmon->rg.pkg_power_sku = INVALID_MMIO_REG; - hwmon->rg.pkg_rapl_limit = GT0_PACKAGE_RAPL_LIMIT; - hwmon->rg.energy_status_all = GT0_PLATFORM_ENERGY_STATUS; - hwmon->rg.energy_status_tile = GT0_PACKAGE_ENERGY_STATUS; } else { hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG; hwmon->rg.pkg_power_sku = INVALID_MMIO_REG; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8130f04369..678d632ed0 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -702,7 +702,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv) gen5_gt_irq_reset(to_gt(dev_priv)); spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display_irqs_enabled) + if (dev_priv->display.irq.display_irqs_enabled) vlv_display_irq_reset(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); } @@ -767,7 +767,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv) GEN3_IRQ_RESET(uncore, GEN8_PCU_); spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display_irqs_enabled) + if (dev_priv->display.irq.display_irqs_enabled) vlv_display_irq_reset(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); } @@ -784,7 +784,7 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) gen5_gt_irq_postinstall(to_gt(dev_priv)); spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display_irqs_enabled) + if (dev_priv->display.irq.display_irqs_enabled) vlv_display_irq_postinstall(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); @@ -838,7 +838,7 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) gen8_gt_irq_postinstall(to_gt(dev_priv)); spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display_irqs_enabled) + if (dev_priv->display.irq.display_irqs_enabled) vlv_display_irq_postinstall(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index de43048543..8c00169e3a 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -108,9 +108,6 @@ i915_param_named_unsafe(guc_firmware_path, charp, 0400, i915_param_named_unsafe(huc_firmware_path, charp, 0400, "HuC firmware path to use instead of the default one"); -i915_param_named_unsafe(dmc_firmware_path, charp, 0400, - "DMC firmware path to use instead of the default one"); - i915_param_named_unsafe(gsc_firmware_path, charp, 0400, "GSC firmware path to use instead of the default one"); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 1315d7fac8..2eb3f2115f 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -51,7 +51,6 @@ struct drm_printer; param(int, guc_log_level, -1, 0400) \ param(char *, guc_firmware_path, NULL, 0400) \ param(char *, huc_firmware_path, NULL, 0400) \ - param(char *, dmc_firmware_path, NULL, 0400) \ param(char *, gsc_firmware_path, NULL, 0400) \ param(bool, memtest, false, 0400) \ param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 8b4fdeabb1..405ca17a99 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -38,6 +38,9 @@ #include "i915_reg.h" #include "intel_pci_config.h" +__diag_push(); +__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for device info"); + #define PLATFORM(x) .platform = (x) #define GEN(x) \ .__runtime.graphics.ip.ver = (x), \ @@ -59,14 +62,6 @@ [I915_CACHE_WT] = 2, \ } -#define PVC_CACHELEVEL \ - .cachelevel_to_pat = { \ - [I915_CACHE_NONE] = 0, \ - [I915_CACHE_LLC] = 3, \ - [I915_CACHE_L3_LLC] = 3, \ - [I915_CACHE_WT] = 2, \ - } - #define MTL_CACHELEVEL \ .cachelevel_to_pat = { \ [I915_CACHE_NONE] = 2, \ @@ -705,8 +700,6 @@ static const struct intel_device_info adl_p_info = { I915_GTT_PAGE_SIZE_2M #define XE_HP_FEATURES \ - .__runtime.graphics.ip.ver = 12, \ - .__runtime.graphics.ip.rel = 50, \ XE_HP_PAGE_SIZES, \ TGL_CACHELEVEL, \ .dma_mask_size = 46, \ @@ -730,32 +723,12 @@ static const struct intel_device_info adl_p_info = { .__runtime.ppgtt_size = 48, \ .__runtime.ppgtt_type = INTEL_PPGTT_FULL -#define XE_HPM_FEATURES \ - .__runtime.media.ip.ver = 12, \ - .__runtime.media.ip.rel = 50 - -__maybe_unused -static const struct intel_device_info xehpsdv_info = { - XE_HP_FEATURES, - XE_HPM_FEATURES, - DGFX_FEATURES, - PLATFORM(INTEL_XEHPSDV), - .has_64k_pages = 1, - .has_media_ratio_mode = 1, - .platform_engine_mask = - BIT(RCS0) | BIT(BCS0) | - BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) | - BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) | - BIT(VCS4) | BIT(VCS5) | BIT(VCS6) | BIT(VCS7) | - BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3), - .require_force_probe = 1, -}; - #define DG2_FEATURES \ XE_HP_FEATURES, \ - XE_HPM_FEATURES, \ DGFX_FEATURES, \ + .__runtime.graphics.ip.ver = 12, \ .__runtime.graphics.ip.rel = 55, \ + .__runtime.media.ip.ver = 12, \ .__runtime.media.ip.rel = 55, \ PLATFORM(INTEL_DG2), \ .has_64k_pages = 1, \ @@ -778,33 +751,6 @@ static const struct intel_device_info ats_m_info = { .tuning_thread_rr_after_dep = 1, }; -#define XE_HPC_FEATURES \ - XE_HP_FEATURES, \ - .dma_mask_size = 52, \ - .has_3d_pipeline = 0, \ - .has_guc_deprivilege = 1, \ - .has_l3_ccs_read = 1, \ - .has_mslice_steering = 0, \ - .has_one_eu_per_fuse_bit = 1 - -__maybe_unused -static const struct intel_device_info pvc_info = { - XE_HPC_FEATURES, - XE_HPM_FEATURES, - DGFX_FEATURES, - .__runtime.graphics.ip.rel = 60, - .__runtime.media.ip.rel = 60, - PLATFORM(INTEL_PONTEVECCHIO), - .has_flat_ccs = 0, - .max_pat_index = 7, - .platform_engine_mask = - BIT(BCS0) | - BIT(VCS0) | - BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3), - .require_force_probe = 1, - PVC_CACHELEVEL, -}; - static const struct intel_gt_definition xelpmp_extra_gt[] = { { .type = GT_MEDIA, @@ -842,6 +788,8 @@ static const struct intel_device_info mtl_info = { #undef PLATFORM +__diag_pop(); + /* * Make sure any device matches here are from most specific to most * general. For example, since the Quanta match is based on the subsystem diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index bd9d812b1a..0b1cd4c7a5 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -292,7 +292,7 @@ static u32 i915_perf_stream_paranoid = true; #define OAREPORT_REASON_CTX_SWITCH (1<<3) #define OAREPORT_REASON_CLK_RATIO (1<<5) -#define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) +#define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate * @@ -817,7 +817,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, */ if (oa_report_ctx_invalid(stream, report) && - GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 50)) { + GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 55)) { ctx_id = INVALID_CTX_ID; oa_context_id_squash(stream, report32); } @@ -1419,7 +1419,7 @@ static int gen12_get_render_context_id(struct i915_perf_stream *stream) mask = ((1U << GEN12_GUC_SW_CTX_ID_WIDTH) - 1) << (GEN12_GUC_SW_CTX_ID_SHIFT - 32); - } else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 50)) { + } else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 55)) { ctx_id = (XEHP_MAX_CONTEXT_HW_ID - 1) << (XEHP_SW_CTX_ID_SHIFT - 32); @@ -2881,11 +2881,11 @@ gen12_enable_metric_set(struct i915_perf_stream *stream, int ret; /* - * Wa_1508761755:xehpsdv, dg2 + * Wa_1508761755 * EU NOA signals behave incorrectly if EU clock gating is enabled. * Disable thread stall DOP gating and EU DOP gating. */ - if (IS_XEHPSDV(i915) || IS_DG2(i915)) { + if (IS_DG2(i915)) { intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN, _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE)); intel_uncore_write(uncore, GEN7_ROW_CHICKEN2, @@ -2911,7 +2911,7 @@ gen12_enable_metric_set(struct i915_perf_stream *stream, /* * Initialize Super Queue Internal Cnt Register * Set PMON Enable in order to collect valid metrics. - * Enable byets per clock reporting in OA for XEHPSDV onward. + * Enable bytes per clock reporting in OA. */ sqcnt1 = GEN12_SQCNT1_PMON_ENABLE | (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0); @@ -2971,10 +2971,9 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream) u32 sqcnt1; /* - * Wa_1508761755:xehpsdv, dg2 - * Enable thread stall DOP gating and EU DOP gating. + * Wa_1508761755: Enable thread stall DOP gating and EU DOP gating. */ - if (IS_XEHPSDV(i915) || IS_DG2(i915)) { + if (IS_DG2(i915)) { intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN, _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE)); intel_uncore_write(uncore, GEN7_ROW_CHICKEN2, @@ -4123,7 +4122,7 @@ static int read_properties_unlocked(struct i915_perf *perf, props->hold_preemption = !!value; break; case DRM_I915_PERF_PROP_GLOBAL_SSEU: { - if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 50)) { + if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 55)) { drm_dbg(&perf->i915->drm, "SSEU config not supported on gfx %x\n", GRAPHICS_VER_FULL(perf->i915)); diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 3baa2f54a8..14d9ec0ed7 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -105,7 +105,7 @@ static int query_geometry_subslices(struct drm_i915_private *i915, struct intel_engine_cs *engine; struct i915_engine_class_instance classinstance; - if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) return -ENODEV; classinstance = *((struct i915_engine_class_instance *)&query_item->flags); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3b2e49ce29..e22a82a5dd 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -195,367 +195,6 @@ #define DPIO_SFR_BYPASS (1 << 1) #define DPIO_CMNRST (1 << 0) -/* - * Per pipe/PLL DPIO regs - */ -#define _VLV_PLL_DW3_CH0 0x800c -#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */ -#define DPIO_POST_DIV_DAC 0 -#define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */ -#define DPIO_POST_DIV_LVDS1 2 -#define DPIO_POST_DIV_LVDS2 3 -#define DPIO_K_SHIFT (24) /* 4 bits */ -#define DPIO_P1_SHIFT (21) /* 3 bits */ -#define DPIO_P2_SHIFT (16) /* 5 bits */ -#define DPIO_N_SHIFT (12) /* 4 bits */ -#define DPIO_ENABLE_CALIBRATION (1 << 11) -#define DPIO_M1DIV_SHIFT (8) /* 3 bits */ -#define DPIO_M2DIV_MASK 0xff -#define _VLV_PLL_DW3_CH1 0x802c -#define VLV_PLL_DW3(ch) _PIPE(ch, _VLV_PLL_DW3_CH0, _VLV_PLL_DW3_CH1) - -#define _VLV_PLL_DW5_CH0 0x8014 -#define DPIO_REFSEL_OVERRIDE 27 -#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ -#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ -#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */ -#define DPIO_PLL_REFCLK_SEL_MASK 3 -#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ -#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ -#define _VLV_PLL_DW5_CH1 0x8034 -#define VLV_PLL_DW5(ch) _PIPE(ch, _VLV_PLL_DW5_CH0, _VLV_PLL_DW5_CH1) - -#define _VLV_PLL_DW7_CH0 0x801c -#define _VLV_PLL_DW7_CH1 0x803c -#define VLV_PLL_DW7(ch) _PIPE(ch, _VLV_PLL_DW7_CH0, _VLV_PLL_DW7_CH1) - -#define _VLV_PLL_DW8_CH0 0x8040 -#define _VLV_PLL_DW8_CH1 0x8060 -#define VLV_PLL_DW8(ch) _PIPE(ch, _VLV_PLL_DW8_CH0, _VLV_PLL_DW8_CH1) - -#define VLV_PLL_DW9_BCAST 0xc044 -#define _VLV_PLL_DW9_CH0 0x8044 -#define _VLV_PLL_DW9_CH1 0x8064 -#define VLV_PLL_DW9(ch) _PIPE(ch, _VLV_PLL_DW9_CH0, _VLV_PLL_DW9_CH1) - -#define _VLV_PLL_DW10_CH0 0x8048 -#define _VLV_PLL_DW10_CH1 0x8068 -#define VLV_PLL_DW10(ch) _PIPE(ch, _VLV_PLL_DW10_CH0, _VLV_PLL_DW10_CH1) - -#define _VLV_PLL_DW11_CH0 0x804c -#define _VLV_PLL_DW11_CH1 0x806c -#define VLV_PLL_DW11(ch) _PIPE(ch, _VLV_PLL_DW11_CH0, _VLV_PLL_DW11_CH1) - -/* Spec for ref block start counts at DW10 */ -#define VLV_REF_DW13 0x80ac - -#define VLV_CMN_DW0 0x8100 - -/* - * Per DDI channel DPIO regs - */ - -#define _VLV_PCS_DW0_CH0 0x8200 -#define _VLV_PCS_DW0_CH1 0x8400 -#define DPIO_PCS_TX_LANE2_RESET (1 << 16) -#define DPIO_PCS_TX_LANE1_RESET (1 << 7) -#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1 << 4) -#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1 << 3) -#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1) - -#define _VLV_PCS01_DW0_CH0 0x200 -#define _VLV_PCS23_DW0_CH0 0x400 -#define _VLV_PCS01_DW0_CH1 0x2600 -#define _VLV_PCS23_DW0_CH1 0x2800 -#define VLV_PCS01_DW0(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1) -#define VLV_PCS23_DW0(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1) - -#define _VLV_PCS_DW1_CH0 0x8204 -#define _VLV_PCS_DW1_CH1 0x8404 -#define CHV_PCS_REQ_SOFTRESET_EN (1 << 23) -#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1 << 22) -#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1 << 21) -#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6) -#define DPIO_PCS_CLK_SOFT_RESET (1 << 5) -#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1) - -#define _VLV_PCS01_DW1_CH0 0x204 -#define _VLV_PCS23_DW1_CH0 0x404 -#define _VLV_PCS01_DW1_CH1 0x2604 -#define _VLV_PCS23_DW1_CH1 0x2804 -#define VLV_PCS01_DW1(ch) _PORT(ch, _VLV_PCS01_DW1_CH0, _VLV_PCS01_DW1_CH1) -#define VLV_PCS23_DW1(ch) _PORT(ch, _VLV_PCS23_DW1_CH0, _VLV_PCS23_DW1_CH1) - -#define _VLV_PCS_DW8_CH0 0x8220 -#define _VLV_PCS_DW8_CH1 0x8420 -#define CHV_PCS_USEDCLKCHANNEL_OVRRIDE (1 << 20) -#define CHV_PCS_USEDCLKCHANNEL (1 << 21) -#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1) - -#define _VLV_PCS01_DW8_CH0 0x0220 -#define _VLV_PCS23_DW8_CH0 0x0420 -#define _VLV_PCS01_DW8_CH1 0x2620 -#define _VLV_PCS23_DW8_CH1 0x2820 -#define VLV_PCS01_DW8(port) _PORT(port, _VLV_PCS01_DW8_CH0, _VLV_PCS01_DW8_CH1) -#define VLV_PCS23_DW8(port) _PORT(port, _VLV_PCS23_DW8_CH0, _VLV_PCS23_DW8_CH1) - -#define _VLV_PCS_DW9_CH0 0x8224 -#define _VLV_PCS_DW9_CH1 0x8424 -#define DPIO_PCS_TX2MARGIN_MASK (0x7 << 13) -#define DPIO_PCS_TX2MARGIN_000 (0 << 13) -#define DPIO_PCS_TX2MARGIN_101 (1 << 13) -#define DPIO_PCS_TX1MARGIN_MASK (0x7 << 10) -#define DPIO_PCS_TX1MARGIN_000 (0 << 10) -#define DPIO_PCS_TX1MARGIN_101 (1 << 10) -#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1) - -#define _VLV_PCS01_DW9_CH0 0x224 -#define _VLV_PCS23_DW9_CH0 0x424 -#define _VLV_PCS01_DW9_CH1 0x2624 -#define _VLV_PCS23_DW9_CH1 0x2824 -#define VLV_PCS01_DW9(ch) _PORT(ch, _VLV_PCS01_DW9_CH0, _VLV_PCS01_DW9_CH1) -#define VLV_PCS23_DW9(ch) _PORT(ch, _VLV_PCS23_DW9_CH0, _VLV_PCS23_DW9_CH1) - -#define _CHV_PCS_DW10_CH0 0x8228 -#define _CHV_PCS_DW10_CH1 0x8428 -#define DPIO_PCS_SWING_CALC_TX0_TX2 (1 << 30) -#define DPIO_PCS_SWING_CALC_TX1_TX3 (1 << 31) -#define DPIO_PCS_TX2DEEMP_MASK (0xf << 24) -#define DPIO_PCS_TX2DEEMP_9P5 (0 << 24) -#define DPIO_PCS_TX2DEEMP_6P0 (2 << 24) -#define DPIO_PCS_TX1DEEMP_MASK (0xf << 16) -#define DPIO_PCS_TX1DEEMP_9P5 (0 << 16) -#define DPIO_PCS_TX1DEEMP_6P0 (2 << 16) -#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1) - -#define _VLV_PCS01_DW10_CH0 0x0228 -#define _VLV_PCS23_DW10_CH0 0x0428 -#define _VLV_PCS01_DW10_CH1 0x2628 -#define _VLV_PCS23_DW10_CH1 0x2828 -#define VLV_PCS01_DW10(port) _PORT(port, _VLV_PCS01_DW10_CH0, _VLV_PCS01_DW10_CH1) -#define VLV_PCS23_DW10(port) _PORT(port, _VLV_PCS23_DW10_CH0, _VLV_PCS23_DW10_CH1) - -#define _VLV_PCS_DW11_CH0 0x822c -#define _VLV_PCS_DW11_CH1 0x842c -#define DPIO_TX2_STAGGER_MASK(x) ((x) << 24) -#define DPIO_LANEDESKEW_STRAP_OVRD (1 << 3) -#define DPIO_LEFT_TXFIFO_RST_MASTER (1 << 1) -#define DPIO_RIGHT_TXFIFO_RST_MASTER (1 << 0) -#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1) - -#define _VLV_PCS01_DW11_CH0 0x022c -#define _VLV_PCS23_DW11_CH0 0x042c -#define _VLV_PCS01_DW11_CH1 0x262c -#define _VLV_PCS23_DW11_CH1 0x282c -#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1) -#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1) - -#define _VLV_PCS01_DW12_CH0 0x0230 -#define _VLV_PCS23_DW12_CH0 0x0430 -#define _VLV_PCS01_DW12_CH1 0x2630 -#define _VLV_PCS23_DW12_CH1 0x2830 -#define VLV_PCS01_DW12(ch) _PORT(ch, _VLV_PCS01_DW12_CH0, _VLV_PCS01_DW12_CH1) -#define VLV_PCS23_DW12(ch) _PORT(ch, _VLV_PCS23_DW12_CH0, _VLV_PCS23_DW12_CH1) - -#define _VLV_PCS_DW12_CH0 0x8230 -#define _VLV_PCS_DW12_CH1 0x8430 -#define DPIO_TX2_STAGGER_MULT(x) ((x) << 20) -#define DPIO_TX1_STAGGER_MULT(x) ((x) << 16) -#define DPIO_TX1_STAGGER_MASK(x) ((x) << 8) -#define DPIO_LANESTAGGER_STRAP_OVRD (1 << 6) -#define DPIO_LANESTAGGER_STRAP(x) ((x) << 0) -#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1) - -#define _VLV_PCS_DW14_CH0 0x8238 -#define _VLV_PCS_DW14_CH1 0x8438 -#define VLV_PCS_DW14(ch) _PORT(ch, _VLV_PCS_DW14_CH0, _VLV_PCS_DW14_CH1) - -#define _VLV_PCS_DW23_CH0 0x825c -#define _VLV_PCS_DW23_CH1 0x845c -#define VLV_PCS_DW23(ch) _PORT(ch, _VLV_PCS_DW23_CH0, _VLV_PCS_DW23_CH1) - -#define _VLV_TX_DW2_CH0 0x8288 -#define _VLV_TX_DW2_CH1 0x8488 -#define DPIO_SWING_MARGIN000_SHIFT 16 -#define DPIO_SWING_MARGIN000_MASK (0xff << DPIO_SWING_MARGIN000_SHIFT) -#define DPIO_UNIQ_TRANS_SCALE_SHIFT 8 -#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1) - -#define _VLV_TX_DW3_CH0 0x828c -#define _VLV_TX_DW3_CH1 0x848c -/* The following bit for CHV phy */ -#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1 << 27) -#define DPIO_SWING_MARGIN101_SHIFT 16 -#define DPIO_SWING_MARGIN101_MASK (0xff << DPIO_SWING_MARGIN101_SHIFT) -#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1) - -#define _VLV_TX_DW4_CH0 0x8290 -#define _VLV_TX_DW4_CH1 0x8490 -#define DPIO_SWING_DEEMPH9P5_SHIFT 24 -#define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT) -#define DPIO_SWING_DEEMPH6P0_SHIFT 16 -#define DPIO_SWING_DEEMPH6P0_MASK (0xff << DPIO_SWING_DEEMPH6P0_SHIFT) -#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1) - -#define _VLV_TX3_DW4_CH0 0x690 -#define _VLV_TX3_DW4_CH1 0x2a90 -#define VLV_TX3_DW4(ch) _PORT(ch, _VLV_TX3_DW4_CH0, _VLV_TX3_DW4_CH1) - -#define _VLV_TX_DW5_CH0 0x8294 -#define _VLV_TX_DW5_CH1 0x8494 -#define DPIO_TX_OCALINIT_EN (1 << 31) -#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1) - -#define _VLV_TX_DW11_CH0 0x82ac -#define _VLV_TX_DW11_CH1 0x84ac -#define VLV_TX_DW11(ch) _PORT(ch, _VLV_TX_DW11_CH0, _VLV_TX_DW11_CH1) - -#define _VLV_TX_DW14_CH0 0x82b8 -#define _VLV_TX_DW14_CH1 0x84b8 -#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1) - -/* CHV dpPhy registers */ -#define _CHV_PLL_DW0_CH0 0x8000 -#define _CHV_PLL_DW0_CH1 0x8180 -#define CHV_PLL_DW0(ch) _PIPE(ch, _CHV_PLL_DW0_CH0, _CHV_PLL_DW0_CH1) - -#define _CHV_PLL_DW1_CH0 0x8004 -#define _CHV_PLL_DW1_CH1 0x8184 -#define DPIO_CHV_N_DIV_SHIFT 8 -#define DPIO_CHV_M1_DIV_BY_2 (0 << 0) -#define CHV_PLL_DW1(ch) _PIPE(ch, _CHV_PLL_DW1_CH0, _CHV_PLL_DW1_CH1) - -#define _CHV_PLL_DW2_CH0 0x8008 -#define _CHV_PLL_DW2_CH1 0x8188 -#define CHV_PLL_DW2(ch) _PIPE(ch, _CHV_PLL_DW2_CH0, _CHV_PLL_DW2_CH1) - -#define _CHV_PLL_DW3_CH0 0x800c -#define _CHV_PLL_DW3_CH1 0x818c -#define DPIO_CHV_FRAC_DIV_EN (1 << 16) -#define DPIO_CHV_FIRST_MOD (0 << 8) -#define DPIO_CHV_SECOND_MOD (1 << 8) -#define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0 -#define DPIO_CHV_FEEDFWD_GAIN_MASK (0xF << 0) -#define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1) - -#define _CHV_PLL_DW6_CH0 0x8018 -#define _CHV_PLL_DW6_CH1 0x8198 -#define DPIO_CHV_GAIN_CTRL_SHIFT 16 -#define DPIO_CHV_INT_COEFF_SHIFT 8 -#define DPIO_CHV_PROP_COEFF_SHIFT 0 -#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1) - -#define _CHV_PLL_DW8_CH0 0x8020 -#define _CHV_PLL_DW8_CH1 0x81A0 -#define DPIO_CHV_TDC_TARGET_CNT_SHIFT 0 -#define DPIO_CHV_TDC_TARGET_CNT_MASK (0x3FF << 0) -#define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1) - -#define _CHV_PLL_DW9_CH0 0x8024 -#define _CHV_PLL_DW9_CH1 0x81A4 -#define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */ -#define DPIO_CHV_INT_LOCK_THRESHOLD_MASK (7 << 1) -#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */ -#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1) - -#define _CHV_CMN_DW0_CH0 0x8100 -#define DPIO_ALLDL_POWERDOWN_SHIFT_CH0 19 -#define DPIO_ANYDL_POWERDOWN_SHIFT_CH0 18 -#define DPIO_ALLDL_POWERDOWN (1 << 1) -#define DPIO_ANYDL_POWERDOWN (1 << 0) - -#define _CHV_CMN_DW5_CH0 0x8114 -#define CHV_BUFRIGHTENA1_DISABLE (0 << 20) -#define CHV_BUFRIGHTENA1_NORMAL (1 << 20) -#define CHV_BUFRIGHTENA1_FORCE (3 << 20) -#define CHV_BUFRIGHTENA1_MASK (3 << 20) -#define CHV_BUFLEFTENA1_DISABLE (0 << 22) -#define CHV_BUFLEFTENA1_NORMAL (1 << 22) -#define CHV_BUFLEFTENA1_FORCE (3 << 22) -#define CHV_BUFLEFTENA1_MASK (3 << 22) - -#define _CHV_CMN_DW13_CH0 0x8134 -#define _CHV_CMN_DW0_CH1 0x8080 -#define DPIO_CHV_S1_DIV_SHIFT 21 -#define DPIO_CHV_P1_DIV_SHIFT 13 /* 3 bits */ -#define DPIO_CHV_P2_DIV_SHIFT 8 /* 5 bits */ -#define DPIO_CHV_K_DIV_SHIFT 4 -#define DPIO_PLL_FREQLOCK (1 << 1) -#define DPIO_PLL_LOCK (1 << 0) -#define CHV_CMN_DW13(ch) _PIPE(ch, _CHV_CMN_DW13_CH0, _CHV_CMN_DW0_CH1) - -#define _CHV_CMN_DW14_CH0 0x8138 -#define _CHV_CMN_DW1_CH1 0x8084 -#define DPIO_AFC_RECAL (1 << 14) -#define DPIO_DCLKP_EN (1 << 13) -#define CHV_BUFLEFTENA2_DISABLE (0 << 17) /* CL2 DW1 only */ -#define CHV_BUFLEFTENA2_NORMAL (1 << 17) /* CL2 DW1 only */ -#define CHV_BUFLEFTENA2_FORCE (3 << 17) /* CL2 DW1 only */ -#define CHV_BUFLEFTENA2_MASK (3 << 17) /* CL2 DW1 only */ -#define CHV_BUFRIGHTENA2_DISABLE (0 << 19) /* CL2 DW1 only */ -#define CHV_BUFRIGHTENA2_NORMAL (1 << 19) /* CL2 DW1 only */ -#define CHV_BUFRIGHTENA2_FORCE (3 << 19) /* CL2 DW1 only */ -#define CHV_BUFRIGHTENA2_MASK (3 << 19) /* CL2 DW1 only */ -#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1) - -#define _CHV_CMN_DW19_CH0 0x814c -#define _CHV_CMN_DW6_CH1 0x8098 -#define DPIO_ALLDL_POWERDOWN_SHIFT_CH1 30 /* CL2 DW6 only */ -#define DPIO_ANYDL_POWERDOWN_SHIFT_CH1 29 /* CL2 DW6 only */ -#define DPIO_DYNPWRDOWNEN_CH1 (1 << 28) /* CL2 DW6 only */ -#define CHV_CMN_USEDCLKCHANNEL (1 << 13) - -#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1) - -#define CHV_CMN_DW28 0x8170 -#define DPIO_CL1POWERDOWNEN (1 << 23) -#define DPIO_DYNPWRDOWNEN_CH0 (1 << 22) -#define DPIO_SUS_CLK_CONFIG_ON (0 << 0) -#define DPIO_SUS_CLK_CONFIG_CLKREQ (1 << 0) -#define DPIO_SUS_CLK_CONFIG_GATE (2 << 0) -#define DPIO_SUS_CLK_CONFIG_GATE_CLKREQ (3 << 0) - -#define CHV_CMN_DW30 0x8178 -#define DPIO_CL2_LDOFUSE_PWRENB (1 << 6) -#define DPIO_LRC_BYPASS (1 << 3) - -#define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \ - (lane) * 0x200 + (offset)) - -#define CHV_TX_DW0(ch, lane) _TXLANE(ch, lane, 0x80) -#define CHV_TX_DW1(ch, lane) _TXLANE(ch, lane, 0x84) -#define CHV_TX_DW2(ch, lane) _TXLANE(ch, lane, 0x88) -#define CHV_TX_DW3(ch, lane) _TXLANE(ch, lane, 0x8c) -#define CHV_TX_DW4(ch, lane) _TXLANE(ch, lane, 0x90) -#define CHV_TX_DW5(ch, lane) _TXLANE(ch, lane, 0x94) -#define CHV_TX_DW6(ch, lane) _TXLANE(ch, lane, 0x98) -#define CHV_TX_DW7(ch, lane) _TXLANE(ch, lane, 0x9c) -#define CHV_TX_DW8(ch, lane) _TXLANE(ch, lane, 0xa0) -#define CHV_TX_DW9(ch, lane) _TXLANE(ch, lane, 0xa4) -#define CHV_TX_DW10(ch, lane) _TXLANE(ch, lane, 0xa8) -#define CHV_TX_DW11(ch, lane) _TXLANE(ch, lane, 0xac) -#define DPIO_FRC_LATENCY_SHFIT 8 -#define CHV_TX_DW14(ch, lane) _TXLANE(ch, lane, 0xb8) -#define DPIO_UPAR_SHIFT 30 - -/* BXT PHY registers */ -#define _BXT_PHY0_BASE 0x6C000 -#define _BXT_PHY1_BASE 0x162000 -#define _BXT_PHY2_BASE 0x163000 -#define BXT_PHY_BASE(phy) \ - _PICK_EVEN_2RANGES(phy, 1, \ - _BXT_PHY0_BASE, _BXT_PHY0_BASE, \ - _BXT_PHY1_BASE, _BXT_PHY2_BASE) - -#define _BXT_PHY(phy, reg) \ - _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg)) - -#define _BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \ - (BXT_PHY_BASE(phy) + _PIPE((ch), (reg_ch0) - _BXT_PHY0_BASE, \ - (reg_ch1) - _BXT_PHY0_BASE)) -#define _MMIO_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \ - _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1)) - #define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090) #define MIPIO_RST_CTRL (1 << 2) @@ -577,250 +216,6 @@ _PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \ _PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C)) -/* BXT PHY PLL registers */ -#define _PORT_PLL_A 0x46074 -#define _PORT_PLL_B 0x46078 -#define _PORT_PLL_C 0x4607c -#define PORT_PLL_ENABLE REG_BIT(31) -#define PORT_PLL_LOCK REG_BIT(30) -#define PORT_PLL_REF_SEL REG_BIT(27) -#define PORT_PLL_POWER_ENABLE REG_BIT(26) -#define PORT_PLL_POWER_STATE REG_BIT(25) -#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B) - -#define _PORT_PLL_EBB_0_A 0x162034 -#define _PORT_PLL_EBB_0_B 0x6C034 -#define _PORT_PLL_EBB_0_C 0x6C340 -#define PORT_PLL_P1_MASK REG_GENMASK(15, 13) -#define PORT_PLL_P1(p1) REG_FIELD_PREP(PORT_PLL_P1_MASK, (p1)) -#define PORT_PLL_P2_MASK REG_GENMASK(12, 8) -#define PORT_PLL_P2(p2) REG_FIELD_PREP(PORT_PLL_P2_MASK, (p2)) -#define BXT_PORT_PLL_EBB_0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ - _PORT_PLL_EBB_0_B, \ - _PORT_PLL_EBB_0_C) - -#define _PORT_PLL_EBB_4_A 0x162038 -#define _PORT_PLL_EBB_4_B 0x6C038 -#define _PORT_PLL_EBB_4_C 0x6C344 -#define PORT_PLL_RECALIBRATE REG_BIT(14) -#define PORT_PLL_10BIT_CLK_ENABLE REG_BIT(13) -#define BXT_PORT_PLL_EBB_4(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ - _PORT_PLL_EBB_4_B, \ - _PORT_PLL_EBB_4_C) - -#define _PORT_PLL_0_A 0x162100 -#define _PORT_PLL_0_B 0x6C100 -#define _PORT_PLL_0_C 0x6C380 -/* PORT_PLL_0_A */ -#define PORT_PLL_M2_INT_MASK REG_GENMASK(7, 0) -#define PORT_PLL_M2_INT(m2_int) REG_FIELD_PREP(PORT_PLL_M2_INT_MASK, (m2_int)) -/* PORT_PLL_1_A */ -#define PORT_PLL_N_MASK REG_GENMASK(11, 8) -#define PORT_PLL_N(n) REG_FIELD_PREP(PORT_PLL_N_MASK, (n)) -/* PORT_PLL_2_A */ -#define PORT_PLL_M2_FRAC_MASK REG_GENMASK(21, 0) -#define PORT_PLL_M2_FRAC(m2_frac) REG_FIELD_PREP(PORT_PLL_M2_FRAC_MASK, (m2_frac)) -/* PORT_PLL_3_A */ -#define PORT_PLL_M2_FRAC_ENABLE REG_BIT(16) -/* PORT_PLL_6_A */ -#define PORT_PLL_GAIN_CTL_MASK REG_GENMASK(18, 16) -#define PORT_PLL_GAIN_CTL(x) REG_FIELD_PREP(PORT_PLL_GAIN_CTL_MASK, (x)) -#define PORT_PLL_INT_COEFF_MASK REG_GENMASK(12, 8) -#define PORT_PLL_INT_COEFF(x) REG_FIELD_PREP(PORT_PLL_INT_COEFF_MASK, (x)) -#define PORT_PLL_PROP_COEFF_MASK REG_GENMASK(3, 0) -#define PORT_PLL_PROP_COEFF(x) REG_FIELD_PREP(PORT_PLL_PROP_COEFF_MASK, (x)) -/* PORT_PLL_8_A */ -#define PORT_PLL_TARGET_CNT_MASK REG_GENMASK(9, 0) -#define PORT_PLL_TARGET_CNT(x) REG_FIELD_PREP(PORT_PLL_TARGET_CNT_MASK, (x)) -/* PORT_PLL_9_A */ -#define PORT_PLL_LOCK_THRESHOLD_MASK REG_GENMASK(3, 1) -#define PORT_PLL_LOCK_THRESHOLD(x) REG_FIELD_PREP(PORT_PLL_LOCK_THRESHOLD_MASK, (x)) -/* PORT_PLL_10_A */ -#define PORT_PLL_DCO_AMP_OVR_EN_H REG_BIT(27) -#define PORT_PLL_DCO_AMP_MASK REG_GENMASK(13, 10) -#define PORT_PLL_DCO_AMP(x) REG_FIELD_PREP(PORT_PLL_DCO_AMP_MASK, (x)) -#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \ - _PORT_PLL_0_B, \ - _PORT_PLL_0_C) -#define BXT_PORT_PLL(phy, ch, idx) _MMIO(_PORT_PLL_BASE(phy, ch) + \ - (idx) * 4) - -/* BXT PHY common lane registers */ -#define _PORT_CL1CM_DW0_A 0x162000 -#define _PORT_CL1CM_DW0_BC 0x6C000 -#define PHY_POWER_GOOD (1 << 16) -#define PHY_RESERVED (1 << 7) -#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC) - -#define _PORT_CL1CM_DW9_A 0x162024 -#define _PORT_CL1CM_DW9_BC 0x6C024 -#define IREF0RC_OFFSET_SHIFT 8 -#define IREF0RC_OFFSET_MASK (0xFF << IREF0RC_OFFSET_SHIFT) -#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC) - -#define _PORT_CL1CM_DW10_A 0x162028 -#define _PORT_CL1CM_DW10_BC 0x6C028 -#define IREF1RC_OFFSET_SHIFT 8 -#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT) -#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC) - -#define _PORT_CL1CM_DW28_A 0x162070 -#define _PORT_CL1CM_DW28_BC 0x6C070 -#define OCL1_POWER_DOWN_EN (1 << 23) -#define DW28_OLDO_DYN_PWR_DOWN_EN (1 << 22) -#define SUS_CLK_CONFIG 0x3 -#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC) - -#define _PORT_CL1CM_DW30_A 0x162078 -#define _PORT_CL1CM_DW30_BC 0x6C078 -#define OCL2_LDOFUSE_PWR_DIS (1 << 6) -#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC) - -/* The spec defines this only for BXT PHY0, but lets assume that this - * would exist for PHY1 too if it had a second channel. - */ -#define _PORT_CL2CM_DW6_A 0x162358 -#define _PORT_CL2CM_DW6_BC 0x6C358 -#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC) -#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28) - -/* BXT PHY Ref registers */ -#define _PORT_REF_DW3_A 0x16218C -#define _PORT_REF_DW3_BC 0x6C18C -#define GRC_DONE (1 << 22) -#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC) - -#define _PORT_REF_DW6_A 0x162198 -#define _PORT_REF_DW6_BC 0x6C198 -#define GRC_CODE_SHIFT 24 -#define GRC_CODE_MASK (0xFF << GRC_CODE_SHIFT) -#define GRC_CODE_FAST_SHIFT 16 -#define GRC_CODE_FAST_MASK (0xFF << GRC_CODE_FAST_SHIFT) -#define GRC_CODE_SLOW_SHIFT 8 -#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT) -#define GRC_CODE_NOM_MASK 0xFF -#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC) - -#define _PORT_REF_DW8_A 0x1621A0 -#define _PORT_REF_DW8_BC 0x6C1A0 -#define GRC_DIS (1 << 15) -#define GRC_RDY_OVRD (1 << 1) -#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC) - -/* BXT PHY PCS registers */ -#define _PORT_PCS_DW10_LN01_A 0x162428 -#define _PORT_PCS_DW10_LN01_B 0x6C428 -#define _PORT_PCS_DW10_LN01_C 0x6C828 -#define _PORT_PCS_DW10_GRP_A 0x162C28 -#define _PORT_PCS_DW10_GRP_B 0x6CC28 -#define _PORT_PCS_DW10_GRP_C 0x6CE28 -#define BXT_PORT_PCS_DW10_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ - _PORT_PCS_DW10_LN01_B, \ - _PORT_PCS_DW10_LN01_C) -#define BXT_PORT_PCS_DW10_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ - _PORT_PCS_DW10_GRP_B, \ - _PORT_PCS_DW10_GRP_C) - -#define TX2_SWING_CALC_INIT (1 << 31) -#define TX1_SWING_CALC_INIT (1 << 30) - -#define _PORT_PCS_DW12_LN01_A 0x162430 -#define _PORT_PCS_DW12_LN01_B 0x6C430 -#define _PORT_PCS_DW12_LN01_C 0x6C830 -#define _PORT_PCS_DW12_LN23_A 0x162630 -#define _PORT_PCS_DW12_LN23_B 0x6C630 -#define _PORT_PCS_DW12_LN23_C 0x6CA30 -#define _PORT_PCS_DW12_GRP_A 0x162c30 -#define _PORT_PCS_DW12_GRP_B 0x6CC30 -#define _PORT_PCS_DW12_GRP_C 0x6CE30 -#define LANESTAGGER_STRAP_OVRD (1 << 6) -#define LANE_STAGGER_MASK 0x1F -#define BXT_PORT_PCS_DW12_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ - _PORT_PCS_DW12_LN01_B, \ - _PORT_PCS_DW12_LN01_C) -#define BXT_PORT_PCS_DW12_LN23(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ - _PORT_PCS_DW12_LN23_B, \ - _PORT_PCS_DW12_LN23_C) -#define BXT_PORT_PCS_DW12_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ - _PORT_PCS_DW12_GRP_B, \ - _PORT_PCS_DW12_GRP_C) - -/* BXT PHY TX registers */ -#define _BXT_LANE_OFFSET(lane) (((lane) >> 1) * 0x200 + \ - ((lane) & 1) * 0x80) - -#define _PORT_TX_DW2_LN0_A 0x162508 -#define _PORT_TX_DW2_LN0_B 0x6C508 -#define _PORT_TX_DW2_LN0_C 0x6C908 -#define _PORT_TX_DW2_GRP_A 0x162D08 -#define _PORT_TX_DW2_GRP_B 0x6CD08 -#define _PORT_TX_DW2_GRP_C 0x6CF08 -#define BXT_PORT_TX_DW2_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ - _PORT_TX_DW2_LN0_B, \ - _PORT_TX_DW2_LN0_C) -#define BXT_PORT_TX_DW2_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ - _PORT_TX_DW2_GRP_B, \ - _PORT_TX_DW2_GRP_C) -#define MARGIN_000_SHIFT 16 -#define MARGIN_000 (0xFF << MARGIN_000_SHIFT) -#define UNIQ_TRANS_SCALE_SHIFT 8 -#define UNIQ_TRANS_SCALE (0xFF << UNIQ_TRANS_SCALE_SHIFT) - -#define _PORT_TX_DW3_LN0_A 0x16250C -#define _PORT_TX_DW3_LN0_B 0x6C50C -#define _PORT_TX_DW3_LN0_C 0x6C90C -#define _PORT_TX_DW3_GRP_A 0x162D0C -#define _PORT_TX_DW3_GRP_B 0x6CD0C -#define _PORT_TX_DW3_GRP_C 0x6CF0C -#define BXT_PORT_TX_DW3_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ - _PORT_TX_DW3_LN0_B, \ - _PORT_TX_DW3_LN0_C) -#define BXT_PORT_TX_DW3_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ - _PORT_TX_DW3_GRP_B, \ - _PORT_TX_DW3_GRP_C) -#define SCALE_DCOMP_METHOD (1 << 26) -#define UNIQUE_TRANGE_EN_METHOD (1 << 27) - -#define _PORT_TX_DW4_LN0_A 0x162510 -#define _PORT_TX_DW4_LN0_B 0x6C510 -#define _PORT_TX_DW4_LN0_C 0x6C910 -#define _PORT_TX_DW4_GRP_A 0x162D10 -#define _PORT_TX_DW4_GRP_B 0x6CD10 -#define _PORT_TX_DW4_GRP_C 0x6CF10 -#define BXT_PORT_TX_DW4_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ - _PORT_TX_DW4_LN0_B, \ - _PORT_TX_DW4_LN0_C) -#define BXT_PORT_TX_DW4_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ - _PORT_TX_DW4_GRP_B, \ - _PORT_TX_DW4_GRP_C) -#define DEEMPH_SHIFT 24 -#define DE_EMPHASIS (0xFF << DEEMPH_SHIFT) - -#define _PORT_TX_DW5_LN0_A 0x162514 -#define _PORT_TX_DW5_LN0_B 0x6C514 -#define _PORT_TX_DW5_LN0_C 0x6C914 -#define _PORT_TX_DW5_GRP_A 0x162D14 -#define _PORT_TX_DW5_GRP_B 0x6CD14 -#define _PORT_TX_DW5_GRP_C 0x6CF14 -#define BXT_PORT_TX_DW5_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ - _PORT_TX_DW5_LN0_B, \ - _PORT_TX_DW5_LN0_C) -#define BXT_PORT_TX_DW5_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ - _PORT_TX_DW5_GRP_B, \ - _PORT_TX_DW5_GRP_C) -#define DCC_DELAY_RANGE_1 (1 << 9) -#define DCC_DELAY_RANGE_2 (1 << 8) - -#define _PORT_TX_DW14_LN0_A 0x162538 -#define _PORT_TX_DW14_LN0_B 0x6C538 -#define _PORT_TX_DW14_LN0_C 0x6C938 -#define LATENCY_OPTIM_SHIFT 30 -#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT) -#define BXT_PORT_TX_DW14_LN(phy, ch, lane) \ - _MMIO(_BXT_PHY_CH(phy, ch, _PORT_TX_DW14_LN0_B, \ - _PORT_TX_DW14_LN0_C) + \ - _BXT_LANE_OFFSET(lane)) - /* UAIMI scratch pad register 1 */ #define UAIMI_SPR1 _MMIO(0x4F074) /* SKL VccIO mask */ @@ -1228,22 +623,6 @@ #define I915_ASLE_INTERRUPT (1 << 0) #define I915_BSD_USER_INTERRUPT (1 << 25) -#define I915_HDMI_LPE_AUDIO_BASE (VLV_DISPLAY_BASE + 0x65000) -#define I915_HDMI_LPE_AUDIO_SIZE 0x1000 - -/* DisplayPort Audio w/ LPE */ -#define VLV_AUD_CHICKEN_BIT_REG _MMIO(VLV_DISPLAY_BASE + 0x62F38) -#define VLV_CHICKEN_BIT_DBG_ENABLE (1 << 0) - -#define _VLV_AUD_PORT_EN_B_DBG (VLV_DISPLAY_BASE + 0x62F20) -#define _VLV_AUD_PORT_EN_C_DBG (VLV_DISPLAY_BASE + 0x62F30) -#define _VLV_AUD_PORT_EN_D_DBG (VLV_DISPLAY_BASE + 0x62F34) -#define VLV_AUD_PORT_EN_DBG(port) _MMIO_PORT3((port) - PORT_B, \ - _VLV_AUD_PORT_EN_B_DBG, \ - _VLV_AUD_PORT_EN_C_DBG, \ - _VLV_AUD_PORT_EN_D_DBG) -#define VLV_AMP_MUTE (1 << 1) - #define GEN6_BSD_RNCID _MMIO(0x12198) #define GEN7_FF_THREAD_MODE _MMIO(0x20a0) @@ -1264,109 +643,6 @@ #define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1 << 4) /* Default */ #define GEN7_FF_DS_SCHED_HW (0x0 << 4) -/* - * Framebuffer compression (915+ only) - */ - -#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */ -#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */ -#define FBC_CONTROL _MMIO(0x3208) -#define FBC_CTL_EN REG_BIT(31) -#define FBC_CTL_PERIODIC REG_BIT(30) -#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16) -#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x)) -#define FBC_CTL_STOP_ON_MOD REG_BIT(15) -#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */ -#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm only */ -#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5) -#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x)) -#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0) -#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x)) -#define FBC_COMMAND _MMIO(0x320c) -#define FBC_CMD_COMPRESS REG_BIT(0) -#define FBC_STATUS _MMIO(0x3210) -#define FBC_STAT_COMPRESSING REG_BIT(31) -#define FBC_STAT_COMPRESSED REG_BIT(30) -#define FBC_STAT_MODIFIED REG_BIT(29) -#define FBC_STAT_CURRENT_LINE_MASK REG_GENMASK(10, 0) -#define FBC_CONTROL2 _MMIO(0x3214) /* i965gm only */ -#define FBC_CTL_FENCE_DBL REG_BIT(4) -#define FBC_CTL_IDLE_MASK REG_GENMASK(3, 2) -#define FBC_CTL_IDLE_IMM REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 0) -#define FBC_CTL_IDLE_FULL REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 1) -#define FBC_CTL_IDLE_LINE REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 2) -#define FBC_CTL_IDLE_DEBUG REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 3) -#define FBC_CTL_CPU_FENCE_EN REG_BIT(1) -#define FBC_CTL_PLANE_MASK REG_GENMASK(1, 0) -#define FBC_CTL_PLANE(i9xx_plane) REG_FIELD_PREP(FBC_CTL_PLANE_MASK, (i9xx_plane)) -#define FBC_FENCE_OFF _MMIO(0x3218) /* i965gm only, BSpec typo has 321Bh */ -#define FBC_MOD_NUM _MMIO(0x3220) /* i965gm only */ -#define FBC_MOD_NUM_MASK REG_GENMASK(31, 1) -#define FBC_MOD_NUM_VALID REG_BIT(0) -#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) /* 49 reisters */ -#define FBC_TAG_MASK REG_GENMASK(1, 0) /* 16 tags per register */ -#define FBC_TAG_MODIFIED REG_FIELD_PREP(FBC_TAG_MASK, 0) -#define FBC_TAG_UNCOMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 1) -#define FBC_TAG_UNCOMPRESSIBLE REG_FIELD_PREP(FBC_TAG_MASK, 2) -#define FBC_TAG_COMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 3) - -#define FBC_LL_SIZE (1536) - -/* Framebuffer compression for GM45+ */ -#define DPFC_CB_BASE _MMIO(0x3200) -#define ILK_DPFC_CB_BASE(fbc_id) _MMIO_PIPE((fbc_id), 0x43200, 0x43240) -#define DPFC_CONTROL _MMIO(0x3208) -#define ILK_DPFC_CONTROL(fbc_id) _MMIO_PIPE((fbc_id), 0x43208, 0x43248) -#define DPFC_CTL_EN REG_BIT(31) -#define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */ -#define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane)) -#define DPFC_CTL_FENCE_EN_G4X REG_BIT(29) /* g4x-snb */ -#define DPFC_CTL_PLANE_MASK_IVB REG_GENMASK(30, 29) /* ivb only */ -#define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane)) -#define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */ -#define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */ -#define DPFC_CTL_PLANE_BINDING_MASK REG_GENMASK(12, 11) /* lnl+ */ -#define DPFC_CTL_PLANE_BINDING(plane_id) REG_FIELD_PREP(DPFC_CTL_PLANE_BINDING_MASK, (plane_id)) -#define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */ -#define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */ -#define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */ -#define DPFC_CTL_LIMIT_MASK REG_GENMASK(7, 6) -#define DPFC_CTL_LIMIT_1X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 0) -#define DPFC_CTL_LIMIT_2X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 1) -#define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2) -#define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0) -#define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence)) -#define DPFC_RECOMP_CTL _MMIO(0x320c) -#define ILK_DPFC_RECOMP_CTL(fbc_id) _MMIO_PIPE((fbc_id), 0x4320c, 0x4324c) -#define DPFC_RECOMP_STALL_EN REG_BIT(27) -#define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16) -#define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0) -#define DPFC_STATUS _MMIO(0x3210) -#define ILK_DPFC_STATUS(fbc_id) _MMIO_PIPE((fbc_id), 0x43210, 0x43250) -#define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16) -#define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0) -#define DPFC_STATUS2 _MMIO(0x3214) -#define ILK_DPFC_STATUS2(fbc_id) _MMIO_PIPE((fbc_id), 0x43214, 0x43254) -#define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0) -#define DPFC_FENCE_YOFF _MMIO(0x3218) -#define ILK_DPFC_FENCE_YOFF(fbc_id) _MMIO_PIPE((fbc_id), 0x43218, 0x43258) -#define DPFC_CHICKEN _MMIO(0x3224) -#define ILK_DPFC_CHICKEN(fbc_id) _MMIO_PIPE((fbc_id), 0x43224, 0x43264) -#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */ -#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */ -#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */ -#define DPFC_CHICKEN_FORCE_SLB_INVALIDATION REG_BIT(13) /* icl+ */ -#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */ - -#define GLK_FBC_STRIDE(fbc_id) _MMIO_PIPE((fbc_id), 0x43228, 0x43268) -#define FBC_STRIDE_OVERRIDE REG_BIT(15) -#define FBC_STRIDE_MASK REG_GENMASK(14, 0) -#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x)) - -#define ILK_FBC_RT_BASE _MMIO(0x2128) -#define ILK_FBC_RT_VALID REG_BIT(0) -#define SNB_FBC_FRONT_BUFFER REG_BIT(1) - #define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000) #define ILK_FBCQ_DIS REG_BIT(22) #define ILK_PABSTRETCH_DIS REG_BIT(21) @@ -1382,37 +658,18 @@ #define IVB_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 2) #define IVB_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 3) - -/* - * Framebuffer compression for Sandybridge - * - * The following two registers are of type GTTMMADR - */ -#define SNB_DPFC_CTL_SA _MMIO(0x100100) -#define SNB_DPFC_FENCE_EN REG_BIT(29) -#define SNB_DPFC_FENCENO_MASK REG_GENMASK(4, 0) -#define SNB_DPFC_FENCENO(fence) REG_FIELD_PREP(SNB_DPFC_FENCENO_MASK, (fence)) -#define SNB_DPFC_CPU_FENCE_OFFSET _MMIO(0x100104) - -/* Framebuffer compression for Ivybridge */ -#define IVB_FBC_RT_BASE _MMIO(0x7020) -#define IVB_FBC_RT_BASE_UPPER _MMIO(0x7024) - #define IPS_CTL _MMIO(0x43408) #define IPS_ENABLE REG_BIT(31) #define IPS_FALSE_COLOR REG_BIT(4) -#define MSG_FBC_REND_STATE(fbc_id) _MMIO_PIPE((fbc_id), 0x50380, 0x50384) -#define FBC_REND_NUKE REG_BIT(2) -#define FBC_REND_CACHE_CLEAN REG_BIT(1) - /* * Clock control & power management */ -#define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014) -#define _DPLL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x6018) -#define _CHV_DPLL_C (DISPLAY_MMIO_BASE(dev_priv) + 0x6030) -#define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C) +#define _DPLL_A 0x6014 +#define _DPLL_B 0x6018 +#define _CHV_DPLL_C 0x6030 +#define DPLL(pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \ + (pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C) #define VGA0 _MMIO(0x6000) #define VGA1 _MMIO(0x6004) @@ -1508,10 +765,11 @@ #define SDVO_MULTIPLIER_SHIFT_HIRES 4 #define SDVO_MULTIPLIER_SHIFT_VGA 0 -#define _DPLL_A_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x601c) -#define _DPLL_B_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x6020) -#define _CHV_DPLL_C_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x603c) -#define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD) +#define _DPLL_A_MD 0x601c +#define _DPLL_B_MD 0x6020 +#define _CHV_DPLL_C_MD 0x603c +#define DPLL_MD(pipe) _MMIO_BASE_PIPE3(DISPLAY_MMIO_BASE(dev_priv), \ + (pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD) /* * UDI pixel divider, controlling how many pixels are stuffed into a packet. @@ -1716,42 +974,10 @@ #define GMBUSFREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6510) -/* - * Palette regs - */ -#define _PALETTE_A 0xa000 -#define _PALETTE_B 0xa800 -#define _CHV_PALETTE_C 0xc000 -/* 8bit mode / i965+ 10.6 interpolated mode ldw/udw */ -#define PALETTE_RED_MASK REG_GENMASK(23, 16) -#define PALETTE_GREEN_MASK REG_GENMASK(15, 8) -#define PALETTE_BLUE_MASK REG_GENMASK(7, 0) -/* pre-i965 10bit interpolated mode ldw */ -#define PALETTE_10BIT_RED_LDW_MASK REG_GENMASK(23, 16) -#define PALETTE_10BIT_GREEN_LDW_MASK REG_GENMASK(15, 8) -#define PALETTE_10BIT_BLUE_LDW_MASK REG_GENMASK(7, 0) -/* pre-i965 10bit interpolated mode udw */ -#define PALETTE_10BIT_RED_EXP_MASK REG_GENMASK(23, 22) -#define PALETTE_10BIT_RED_MANT_MASK REG_GENMASK(21, 18) -#define PALETTE_10BIT_RED_UDW_MASK REG_GENMASK(17, 16) -#define PALETTE_10BIT_GREEN_EXP_MASK REG_GENMASK(15, 14) -#define PALETTE_10BIT_GREEN_MANT_MASK REG_GENMASK(13, 10) -#define PALETTE_10BIT_GREEN_UDW_MASK REG_GENMASK(9, 8) -#define PALETTE_10BIT_BLUE_EXP_MASK REG_GENMASK(7, 6) -#define PALETTE_10BIT_BLUE_MANT_MASK REG_GENMASK(5, 2) -#define PALETTE_10BIT_BLUE_UDW_MASK REG_GENMASK(1, 0) -#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \ - _PICK_EVEN_2RANGES(pipe, 2, \ - _PALETTE_A, _PALETTE_B, \ - _CHV_PALETTE_C, _CHV_PALETTE_C) + \ - (i) * 4) - #define PEG_BAND_GAP_DATA _MMIO(0x14d68) #define BXT_RP_STATE_CAP _MMIO(0x138170) #define GEN9_RP_STATE_LIMITS _MMIO(0x138148) -#define XEHPSDV_RP_STATE_CAP _MMIO(0x250014) -#define PVC_RP_STATE_CAP _MMIO(0x281014) #define MTL_RP_STATE_CAP _MMIO(0x138000) #define MTL_MEDIAP_STATE_CAP _MMIO(0x138020) @@ -1911,18 +1137,18 @@ #define _PIPE_CRC_RES_4_B_IVB 0x61070 #define _PIPE_CRC_RES_5_B_IVB 0x61074 -#define PIPE_CRC_CTL(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_CTL_A) -#define PIPE_CRC_RES_1_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_1_A_IVB) -#define PIPE_CRC_RES_2_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_2_A_IVB) -#define PIPE_CRC_RES_3_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_3_A_IVB) -#define PIPE_CRC_RES_4_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_4_A_IVB) -#define PIPE_CRC_RES_5_IVB(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_5_A_IVB) +#define PIPE_CRC_CTL(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_CTL_A) +#define PIPE_CRC_RES_1_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_1_A_IVB) +#define PIPE_CRC_RES_2_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_2_A_IVB) +#define PIPE_CRC_RES_3_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_3_A_IVB) +#define PIPE_CRC_RES_4_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_4_A_IVB) +#define PIPE_CRC_RES_5_IVB(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_5_A_IVB) -#define PIPE_CRC_RES_RED(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RED_A) -#define PIPE_CRC_RES_GREEN(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_GREEN_A) -#define PIPE_CRC_RES_BLUE(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_BLUE_A) -#define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES1_A_I915) -#define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES2_A_G4X) +#define PIPE_CRC_RES_RED(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_RED_A) +#define PIPE_CRC_RES_GREEN(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_GREEN_A) +#define PIPE_CRC_RES_BLUE(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_BLUE_A) +#define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_RES1_A_I915) +#define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(dev_priv, pipe, _PIPE_CRC_RES_RES2_A_G4X) /* Pipe/transcoder A timing regs */ #define _TRANS_HTOTAL_A 0x60000 @@ -1991,23 +1217,23 @@ #define _TRANS_VSYNC_DSI1 0x6b814 #define _TRANS_VSYNCSHIFT_DSI1 0x6b828 -#define TRANS_HTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_HTOTAL_A) -#define TRANS_HBLANK(trans) _MMIO_TRANS2((trans), _TRANS_HBLANK_A) -#define TRANS_HSYNC(trans) _MMIO_TRANS2((trans), _TRANS_HSYNC_A) -#define TRANS_VTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_VTOTAL_A) -#define TRANS_VBLANK(trans) _MMIO_TRANS2((trans), _TRANS_VBLANK_A) -#define TRANS_VSYNC(trans) _MMIO_TRANS2((trans), _TRANS_VSYNC_A) -#define BCLRPAT(trans) _MMIO_TRANS2((trans), _BCLRPAT_A) -#define TRANS_VSYNCSHIFT(trans) _MMIO_TRANS2((trans), _TRANS_VSYNCSHIFT_A) -#define PIPESRC(pipe) _MMIO_TRANS2((pipe), _PIPEASRC) -#define TRANS_MULT(trans) _MMIO_TRANS2((trans), _TRANS_MULT_A) +#define TRANS_HTOTAL(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A) +#define TRANS_HBLANK(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A) +#define TRANS_HSYNC(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A) +#define TRANS_VTOTAL(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A) +#define TRANS_VBLANK(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A) +#define TRANS_VSYNC(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A) +#define BCLRPAT(trans) _MMIO_TRANS2(dev_priv, (trans), _BCLRPAT_A) +#define TRANS_VSYNCSHIFT(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNCSHIFT_A) +#define PIPESRC(pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC) +#define TRANS_MULT(trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A) /* VRR registers */ #define _TRANS_VRR_CTL_A 0x60420 #define _TRANS_VRR_CTL_B 0x61420 #define _TRANS_VRR_CTL_C 0x62420 #define _TRANS_VRR_CTL_D 0x63420 -#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A) +#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_CTL_A) #define VRR_CTL_VRR_ENABLE REG_BIT(31) #define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30) #define VRR_CTL_FLIP_LINE_EN REG_BIT(29) @@ -2021,21 +1247,21 @@ #define _TRANS_VRR_VMAX_B 0x61424 #define _TRANS_VRR_VMAX_C 0x62424 #define _TRANS_VRR_VMAX_D 0x63424 -#define TRANS_VRR_VMAX(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMAX_A) +#define TRANS_VRR_VMAX(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMAX_A) #define VRR_VMAX_MASK REG_GENMASK(19, 0) #define _TRANS_VRR_VMIN_A 0x60434 #define _TRANS_VRR_VMIN_B 0x61434 #define _TRANS_VRR_VMIN_C 0x62434 #define _TRANS_VRR_VMIN_D 0x63434 -#define TRANS_VRR_VMIN(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMIN_A) +#define TRANS_VRR_VMIN(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMIN_A) #define VRR_VMIN_MASK REG_GENMASK(15, 0) #define _TRANS_VRR_VMAXSHIFT_A 0x60428 #define _TRANS_VRR_VMAXSHIFT_B 0x61428 #define _TRANS_VRR_VMAXSHIFT_C 0x62428 #define _TRANS_VRR_VMAXSHIFT_D 0x63428 -#define TRANS_VRR_VMAXSHIFT(trans) _MMIO_TRANS2(trans, \ +#define TRANS_VRR_VMAXSHIFT(trans) _MMIO_TRANS2(dev_priv, trans, \ _TRANS_VRR_VMAXSHIFT_A) #define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16) #define VRR_VMAXSHIFT_DEC REG_BIT(16) @@ -2045,7 +1271,7 @@ #define _TRANS_VRR_STATUS_B 0x6142C #define _TRANS_VRR_STATUS_C 0x6242C #define _TRANS_VRR_STATUS_D 0x6342C -#define TRANS_VRR_STATUS(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS_A) +#define TRANS_VRR_STATUS(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS_A) #define VRR_STATUS_VMAX_REACHED REG_BIT(31) #define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30) #define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29) @@ -2065,7 +1291,7 @@ #define _TRANS_VRR_VTOTAL_PREV_B 0x61480 #define _TRANS_VRR_VTOTAL_PREV_C 0x62480 #define _TRANS_VRR_VTOTAL_PREV_D 0x63480 -#define TRANS_VRR_VTOTAL_PREV(trans) _MMIO_TRANS2(trans, \ +#define TRANS_VRR_VTOTAL_PREV(trans) _MMIO_TRANS2(dev_priv, trans, \ _TRANS_VRR_VTOTAL_PREV_A) #define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31) #define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30) @@ -2076,7 +1302,7 @@ #define _TRANS_VRR_FLIPLINE_B 0x61438 #define _TRANS_VRR_FLIPLINE_C 0x62438 #define _TRANS_VRR_FLIPLINE_D 0x63438 -#define TRANS_VRR_FLIPLINE(trans) _MMIO_TRANS2(trans, \ +#define TRANS_VRR_FLIPLINE(trans) _MMIO_TRANS2(dev_priv, trans, \ _TRANS_VRR_FLIPLINE_A) #define VRR_FLIPLINE_MASK REG_GENMASK(19, 0) @@ -2084,17 +1310,24 @@ #define _TRANS_VRR_STATUS2_B 0x6143C #define _TRANS_VRR_STATUS2_C 0x6243C #define _TRANS_VRR_STATUS2_D 0x6343C -#define TRANS_VRR_STATUS2(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS2_A) +#define TRANS_VRR_STATUS2(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS2_A) #define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0) #define _TRANS_PUSH_A 0x60A70 #define _TRANS_PUSH_B 0x61A70 #define _TRANS_PUSH_C 0x62A70 #define _TRANS_PUSH_D 0x63A70 -#define TRANS_PUSH(trans) _MMIO_TRANS2(trans, _TRANS_PUSH_A) +#define TRANS_PUSH(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_PUSH_A) #define TRANS_PUSH_EN REG_BIT(31) #define TRANS_PUSH_SEND REG_BIT(30) +#define _TRANS_VRR_VSYNC_A 0x60078 +#define TRANS_VRR_VSYNC(trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VSYNC_A) +#define VRR_VSYNC_END_MASK REG_GENMASK(28, 16) +#define VRR_VSYNC_END(vsync_end) REG_FIELD_PREP(VRR_VSYNC_END_MASK, (vsync_end)) +#define VRR_VSYNC_START_MASK REG_GENMASK(12, 0) +#define VRR_VSYNC_START(vsync_start) REG_FIELD_PREP(VRR_VSYNC_START_MASK, (vsync_start)) + /* VGA port control */ #define ADPA _MMIO(0x61100) #define PCH_ADPA _MMIO(0xe1100) @@ -2312,6 +1545,7 @@ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte * of the infoframe structure specified by CEA-861. */ #define VIDEO_DIP_DATA_SIZE 32 +#define VIDEO_DIP_ASYNC_DATA_SIZE 36 #define VIDEO_DIP_GMP_DATA_SIZE 36 #define VIDEO_DIP_VSC_DATA_SIZE 36 #define VIDEO_DIP_PPS_DATA_SIZE 132 @@ -2350,6 +1584,8 @@ #define VIDEO_DIP_ENABLE_VS_HSW (1 << 8) #define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4) #define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) +/* ADL and later: */ +#define VIDEO_DIP_ENABLE_AS_ADL REG_BIT(23) /* Panel fitting */ #define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230) @@ -2588,6 +1824,9 @@ #define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1) #define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2) #define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3) +#define TRANSCONF_PIXEL_COUNT_SCALING_MASK REG_GENMASK(1, 0) +#define TRANSCONF_PIXEL_COUNT_SCALING_X4 1 + #define _PIPEASTAT 0x70024 #define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31) #define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30) @@ -2639,18 +1878,18 @@ #define PIPESTAT_INT_ENABLE_MASK 0x7fff0000 #define PIPESTAT_INT_STATUS_MASK 0x0000ffff -#define TRANSCONF(trans) _MMIO_PIPE2((trans), _TRANSACONF) -#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL) -#define PIPEFRAME(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEHIGH) -#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL) -#define PIPESTAT(pipe) _MMIO_PIPE2(pipe, _PIPEASTAT) +#define TRANSCONF(trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF) +#define PIPEDSL(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL) +#define PIPEFRAME(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH) +#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL) +#define PIPESTAT(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT) #define _PIPEAGCMAX 0x70010 #define _PIPEBGCMAX 0x71010 -#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4) /* u1.16 */ +#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(dev_priv, pipe, _PIPEAGCMAX + (i) * 4) /* u1.16 */ #define _PIPE_ARB_CTL_A 0x70028 /* icl+ */ -#define PIPE_ARB_CTL(pipe) _MMIO_PIPE2(pipe, _PIPE_ARB_CTL_A) +#define PIPE_ARB_CTL(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPE_ARB_CTL_A) #define PIPE_ARB_USE_PROG_SLOTS REG_BIT(13) #define _PIPE_MISC_A 0x70030 @@ -2694,7 +1933,7 @@ #define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B) #define _ICL_PIPE_A_STATUS 0x70058 -#define ICL_PIPESTATUS(pipe) _MMIO_PIPE2(pipe, _ICL_PIPE_A_STATUS) +#define ICL_PIPESTATUS(pipe) _MMIO_PIPE2(dev_priv, pipe, _ICL_PIPE_A_STATUS) #define PIPE_STATUS_UNDERRUN REG_BIT(31) #define PIPE_STATUS_SOFT_UNDERRUN_XELPD REG_BIT(28) #define PIPE_STATUS_HARD_UNDERRUN_XELPD REG_BIT(27) @@ -2969,8 +2208,8 @@ #define _WM0_PIPEA_ILK 0x45100 #define _WM0_PIPEB_ILK 0x45104 #define _WM0_PIPEC_IVB 0x45200 -#define WM0_PIPE_ILK(pipe) _MMIO_PIPE3((pipe), _WM0_PIPEA_ILK, \ - _WM0_PIPEB_ILK, _WM0_PIPEC_IVB) +#define WM0_PIPE_ILK(pipe) _MMIO_BASE_PIPE3(0, (pipe), _WM0_PIPEA_ILK, \ + _WM0_PIPEB_ILK, _WM0_PIPEC_IVB) #define WM0_PIPE_PRIMARY_MASK REG_GENMASK(31, 16) #define WM0_PIPE_SPRITE_MASK REG_GENMASK(15, 8) #define WM0_PIPE_CURSOR_MASK REG_GENMASK(7, 0) @@ -3024,8 +2263,8 @@ /* GM45+ just has to be different */ #define _PIPEA_FRMCOUNT_G4X 0x70040 #define _PIPEA_FLIPCOUNT_G4X 0x70044 -#define PIPE_FRMCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FRMCOUNT_G4X) -#define PIPE_FLIPCOUNT_G4X(pipe) _MMIO_PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X) +#define PIPE_FRMCOUNT_G4X(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FRMCOUNT_G4X) +#define PIPE_FLIPCOUNT_G4X(pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FLIPCOUNT_G4X) /* Cursor A & B regs */ #define _CURACNTR 0x70080 @@ -3053,6 +2292,7 @@ #define MCURSOR_MODE_DISABLE 0x00 #define MCURSOR_MODE_128_32B_AX 0x02 #define MCURSOR_MODE_256_32B_AX 0x03 +#define MCURSOR_MODE_64_2B 0x04 #define MCURSOR_MODE_64_32B_AX 0x07 #define MCURSOR_MODE_128_ARGB_AX (0x20 | MCURSOR_MODE_128_32B_AX) #define MCURSOR_MODE_256_ARGB_AX (0x20 | MCURSOR_MODE_256_32B_AX) @@ -3085,14 +2325,14 @@ #define _CURBBASE_IVB 0x71084 #define _CURBPOS_IVB 0x71088 -#define CURCNTR(pipe) _MMIO_CURSOR2(pipe, _CURACNTR) -#define CURBASE(pipe) _MMIO_CURSOR2(pipe, _CURABASE) -#define CURPOS(pipe) _MMIO_CURSOR2(pipe, _CURAPOS) -#define CURPOS_ERLY_TPT(pipe) _MMIO_CURSOR2(pipe, _CURAPOS_ERLY_TPT) -#define CURSIZE(pipe) _MMIO_CURSOR2(pipe, _CURASIZE) -#define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(pipe, _CUR_FBC_CTL_A) -#define CUR_CHICKEN(pipe) _MMIO_CURSOR2(pipe, _CUR_CHICKEN_A) -#define CURSURFLIVE(pipe) _MMIO_CURSOR2(pipe, _CURASURFLIVE) +#define CURCNTR(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURACNTR) +#define CURBASE(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURABASE) +#define CURPOS(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURAPOS) +#define CURPOS_ERLY_TPT(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURAPOS_ERLY_TPT) +#define CURSIZE(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURASIZE) +#define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CUR_FBC_CTL_A) +#define CUR_CHICKEN(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CUR_CHICKEN_A) +#define CURSURFLIVE(pipe) _MMIO_CURSOR2(dev_priv, pipe, _CURASURFLIVE) /* Display A control */ #define _DSPAADDR_VLV 0x7017C /* vlv/chv */ @@ -3149,18 +2389,18 @@ #define _DSPASURFLIVE 0x701AC #define _DSPAGAMC 0x701E0 -#define DSPADDR_VLV(plane) _MMIO_PIPE2(plane, _DSPAADDR_VLV) -#define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR) -#define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR) -#define DSPSTRIDE(plane) _MMIO_PIPE2(plane, _DSPASTRIDE) -#define DSPPOS(plane) _MMIO_PIPE2(plane, _DSPAPOS) -#define DSPSIZE(plane) _MMIO_PIPE2(plane, _DSPASIZE) -#define DSPSURF(plane) _MMIO_PIPE2(plane, _DSPASURF) -#define DSPTILEOFF(plane) _MMIO_PIPE2(plane, _DSPATILEOFF) +#define DSPADDR_VLV(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAADDR_VLV) +#define DSPCNTR(plane) _MMIO_PIPE2(dev_priv, plane, _DSPACNTR) +#define DSPADDR(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAADDR) +#define DSPSTRIDE(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASTRIDE) +#define DSPPOS(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAPOS) +#define DSPSIZE(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASIZE) +#define DSPSURF(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASURF) +#define DSPTILEOFF(plane) _MMIO_PIPE2(dev_priv, plane, _DSPATILEOFF) #define DSPLINOFF(plane) DSPADDR(plane) -#define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET) -#define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE) -#define DSPGAMC(plane, i) _MMIO_PIPE2(plane, _DSPAGAMC + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */ +#define DSPOFFSET(plane) _MMIO_PIPE2(dev_priv, plane, _DSPAOFFSET) +#define DSPSURFLIVE(plane) _MMIO_PIPE2(dev_priv, plane, _DSPASURFLIVE) +#define DSPGAMC(plane, i) _MMIO_PIPE2(dev_priv, plane, _DSPAGAMC + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */ /* CHV pipe B blender and primary plane */ #define _CHV_BLEND_A 0x60a00 @@ -3187,11 +2427,11 @@ #define PRIM_CONST_ALPHA_MASK REG_GENMASK(7, 0) #define PRIM_CONST_ALPHA(alpha) REG_FIELD_PREP(PRIM_CONST_ALPHA_MASK, (alpha)) -#define CHV_BLEND(pipe) _MMIO_TRANS2(pipe, _CHV_BLEND_A) -#define CHV_CANVAS(pipe) _MMIO_TRANS2(pipe, _CHV_CANVAS_A) -#define PRIMPOS(plane) _MMIO_TRANS2(plane, _PRIMPOS_A) -#define PRIMSIZE(plane) _MMIO_TRANS2(plane, _PRIMSIZE_A) -#define PRIMCNSTALPHA(plane) _MMIO_TRANS2(plane, _PRIMCNSTALPHA_A) +#define CHV_BLEND(pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A) +#define CHV_CANVAS(pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A) +#define PRIMPOS(plane) _MMIO_TRANS2(dev_priv, plane, _PRIMPOS_A) +#define PRIMSIZE(plane) _MMIO_TRANS2(dev_priv, plane, _PRIMSIZE_A) +#define PRIMCNSTALPHA(plane) _MMIO_TRANS2(dev_priv, plane, _PRIMCNSTALPHA_A) /* Display/Sprite base address macros */ #define DISP_BASEADDR_MASK (0xfffff000) @@ -3241,346 +2481,6 @@ #define _PIPEDSI0CONF 0x7b008 #define _PIPEDSI1CONF 0x7b808 -/* Sprite A control */ -#define _DVSACNTR 0x72180 -#define DVS_ENABLE REG_BIT(31) -#define DVS_PIPE_GAMMA_ENABLE REG_BIT(30) -#define DVS_YUV_RANGE_CORRECTION_DISABLE REG_BIT(27) -#define DVS_FORMAT_MASK REG_GENMASK(26, 25) -#define DVS_FORMAT_YUV422 REG_FIELD_PREP(DVS_FORMAT_MASK, 0) -#define DVS_FORMAT_RGBX101010 REG_FIELD_PREP(DVS_FORMAT_MASK, 1) -#define DVS_FORMAT_RGBX888 REG_FIELD_PREP(DVS_FORMAT_MASK, 2) -#define DVS_FORMAT_RGBX161616 REG_FIELD_PREP(DVS_FORMAT_MASK, 3) -#define DVS_PIPE_CSC_ENABLE REG_BIT(24) -#define DVS_SOURCE_KEY REG_BIT(22) -#define DVS_RGB_ORDER_XBGR REG_BIT(20) -#define DVS_YUV_FORMAT_BT709 REG_BIT(18) -#define DVS_YUV_ORDER_MASK REG_GENMASK(17, 16) -#define DVS_YUV_ORDER_YUYV REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 0) -#define DVS_YUV_ORDER_UYVY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 1) -#define DVS_YUV_ORDER_YVYU REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 2) -#define DVS_YUV_ORDER_VYUY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 3) -#define DVS_ROTATE_180 REG_BIT(15) -#define DVS_TRICKLE_FEED_DISABLE REG_BIT(14) -#define DVS_TILED REG_BIT(10) -#define DVS_DEST_KEY REG_BIT(2) -#define _DVSALINOFF 0x72184 -#define _DVSASTRIDE 0x72188 -#define _DVSAPOS 0x7218c -#define DVS_POS_Y_MASK REG_GENMASK(31, 16) -#define DVS_POS_Y(y) REG_FIELD_PREP(DVS_POS_Y_MASK, (y)) -#define DVS_POS_X_MASK REG_GENMASK(15, 0) -#define DVS_POS_X(x) REG_FIELD_PREP(DVS_POS_X_MASK, (x)) -#define _DVSASIZE 0x72190 -#define DVS_HEIGHT_MASK REG_GENMASK(31, 16) -#define DVS_HEIGHT(h) REG_FIELD_PREP(DVS_HEIGHT_MASK, (h)) -#define DVS_WIDTH_MASK REG_GENMASK(15, 0) -#define DVS_WIDTH(w) REG_FIELD_PREP(DVS_WIDTH_MASK, (w)) -#define _DVSAKEYVAL 0x72194 -#define _DVSAKEYMSK 0x72198 -#define _DVSASURF 0x7219c -#define DVS_ADDR_MASK REG_GENMASK(31, 12) -#define _DVSAKEYMAXVAL 0x721a0 -#define _DVSATILEOFF 0x721a4 -#define DVS_OFFSET_Y_MASK REG_GENMASK(31, 16) -#define DVS_OFFSET_Y(y) REG_FIELD_PREP(DVS_OFFSET_Y_MASK, (y)) -#define DVS_OFFSET_X_MASK REG_GENMASK(15, 0) -#define DVS_OFFSET_X(x) REG_FIELD_PREP(DVS_OFFSET_X_MASK, (x)) -#define _DVSASURFLIVE 0x721ac -#define _DVSAGAMC_G4X 0x721e0 /* g4x */ -#define _DVSASCALE 0x72204 -#define DVS_SCALE_ENABLE REG_BIT(31) -#define DVS_FILTER_MASK REG_GENMASK(30, 29) -#define DVS_FILTER_MEDIUM REG_FIELD_PREP(DVS_FILTER_MASK, 0) -#define DVS_FILTER_ENHANCING REG_FIELD_PREP(DVS_FILTER_MASK, 1) -#define DVS_FILTER_SOFTENING REG_FIELD_PREP(DVS_FILTER_MASK, 2) -#define DVS_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */ -#define DVS_VERTICAL_OFFSET_ENABLE REG_BIT(27) -#define DVS_SRC_WIDTH_MASK REG_GENMASK(26, 16) -#define DVS_SRC_WIDTH(w) REG_FIELD_PREP(DVS_SRC_WIDTH_MASK, (w)) -#define DVS_SRC_HEIGHT_MASK REG_GENMASK(10, 0) -#define DVS_SRC_HEIGHT(h) REG_FIELD_PREP(DVS_SRC_HEIGHT_MASK, (h)) -#define _DVSAGAMC_ILK 0x72300 /* ilk/snb */ -#define _DVSAGAMCMAX_ILK 0x72340 /* ilk/snb */ - -#define _DVSBCNTR 0x73180 -#define _DVSBLINOFF 0x73184 -#define _DVSBSTRIDE 0x73188 -#define _DVSBPOS 0x7318c -#define _DVSBSIZE 0x73190 -#define _DVSBKEYVAL 0x73194 -#define _DVSBKEYMSK 0x73198 -#define _DVSBSURF 0x7319c -#define _DVSBKEYMAXVAL 0x731a0 -#define _DVSBTILEOFF 0x731a4 -#define _DVSBSURFLIVE 0x731ac -#define _DVSBGAMC_G4X 0x731e0 /* g4x */ -#define _DVSBSCALE 0x73204 -#define _DVSBGAMC_ILK 0x73300 /* ilk/snb */ -#define _DVSBGAMCMAX_ILK 0x73340 /* ilk/snb */ - -#define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR) -#define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF) -#define DVSSTRIDE(pipe) _MMIO_PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE) -#define DVSPOS(pipe) _MMIO_PIPE(pipe, _DVSAPOS, _DVSBPOS) -#define DVSSURF(pipe) _MMIO_PIPE(pipe, _DVSASURF, _DVSBSURF) -#define DVSKEYMAX(pipe) _MMIO_PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL) -#define DVSSIZE(pipe) _MMIO_PIPE(pipe, _DVSASIZE, _DVSBSIZE) -#define DVSSCALE(pipe) _MMIO_PIPE(pipe, _DVSASCALE, _DVSBSCALE) -#define DVSTILEOFF(pipe) _MMIO_PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) -#define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) -#define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) -#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) -#define DVSGAMC_G4X(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_G4X, _DVSBGAMC_G4X) + (5 - (i)) * 4) /* 6 x u0.8 */ -#define DVSGAMC_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_ILK, _DVSBGAMC_ILK) + (i) * 4) /* 16 x u0.10 */ -#define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */ - -#define _SPRA_CTL 0x70280 -#define SPRITE_ENABLE REG_BIT(31) -#define SPRITE_PIPE_GAMMA_ENABLE REG_BIT(30) -#define SPRITE_YUV_RANGE_CORRECTION_DISABLE REG_BIT(28) -#define SPRITE_FORMAT_MASK REG_GENMASK(27, 25) -#define SPRITE_FORMAT_YUV422 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 0) -#define SPRITE_FORMAT_RGBX101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 1) -#define SPRITE_FORMAT_RGBX888 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 2) -#define SPRITE_FORMAT_RGBX161616 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 3) -#define SPRITE_FORMAT_YUV444 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 4) -#define SPRITE_FORMAT_XR_BGR101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 5) /* Extended range */ -#define SPRITE_PIPE_CSC_ENABLE REG_BIT(24) -#define SPRITE_SOURCE_KEY REG_BIT(22) -#define SPRITE_RGB_ORDER_RGBX REG_BIT(20) /* only for 888 and 161616 */ -#define SPRITE_YUV_TO_RGB_CSC_DISABLE REG_BIT(19) -#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 REG_BIT(18) /* 0 is BT601 */ -#define SPRITE_YUV_ORDER_MASK REG_GENMASK(17, 16) -#define SPRITE_YUV_ORDER_YUYV REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 0) -#define SPRITE_YUV_ORDER_UYVY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 1) -#define SPRITE_YUV_ORDER_YVYU REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 2) -#define SPRITE_YUV_ORDER_VYUY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 3) -#define SPRITE_ROTATE_180 REG_BIT(15) -#define SPRITE_TRICKLE_FEED_DISABLE REG_BIT(14) -#define SPRITE_PLANE_GAMMA_DISABLE REG_BIT(13) -#define SPRITE_TILED REG_BIT(10) -#define SPRITE_DEST_KEY REG_BIT(2) -#define _SPRA_LINOFF 0x70284 -#define _SPRA_STRIDE 0x70288 -#define _SPRA_POS 0x7028c -#define SPRITE_POS_Y_MASK REG_GENMASK(31, 16) -#define SPRITE_POS_Y(y) REG_FIELD_PREP(SPRITE_POS_Y_MASK, (y)) -#define SPRITE_POS_X_MASK REG_GENMASK(15, 0) -#define SPRITE_POS_X(x) REG_FIELD_PREP(SPRITE_POS_X_MASK, (x)) -#define _SPRA_SIZE 0x70290 -#define SPRITE_HEIGHT_MASK REG_GENMASK(31, 16) -#define SPRITE_HEIGHT(h) REG_FIELD_PREP(SPRITE_HEIGHT_MASK, (h)) -#define SPRITE_WIDTH_MASK REG_GENMASK(15, 0) -#define SPRITE_WIDTH(w) REG_FIELD_PREP(SPRITE_WIDTH_MASK, (w)) -#define _SPRA_KEYVAL 0x70294 -#define _SPRA_KEYMSK 0x70298 -#define _SPRA_SURF 0x7029c -#define SPRITE_ADDR_MASK REG_GENMASK(31, 12) -#define _SPRA_KEYMAX 0x702a0 -#define _SPRA_TILEOFF 0x702a4 -#define SPRITE_OFFSET_Y_MASK REG_GENMASK(31, 16) -#define SPRITE_OFFSET_Y(y) REG_FIELD_PREP(SPRITE_OFFSET_Y_MASK, (y)) -#define SPRITE_OFFSET_X_MASK REG_GENMASK(15, 0) -#define SPRITE_OFFSET_X(x) REG_FIELD_PREP(SPRITE_OFFSET_X_MASK, (x)) -#define _SPRA_OFFSET 0x702a4 -#define _SPRA_SURFLIVE 0x702ac -#define _SPRA_SCALE 0x70304 -#define SPRITE_SCALE_ENABLE REG_BIT(31) -#define SPRITE_FILTER_MASK REG_GENMASK(30, 29) -#define SPRITE_FILTER_MEDIUM REG_FIELD_PREP(SPRITE_FILTER_MASK, 0) -#define SPRITE_FILTER_ENHANCING REG_FIELD_PREP(SPRITE_FILTER_MASK, 1) -#define SPRITE_FILTER_SOFTENING REG_FIELD_PREP(SPRITE_FILTER_MASK, 2) -#define SPRITE_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */ -#define SPRITE_VERTICAL_OFFSET_ENABLE REG_BIT(27) -#define SPRITE_SRC_WIDTH_MASK REG_GENMASK(26, 16) -#define SPRITE_SRC_WIDTH(w) REG_FIELD_PREP(SPRITE_SRC_WIDTH_MASK, (w)) -#define SPRITE_SRC_HEIGHT_MASK REG_GENMASK(10, 0) -#define SPRITE_SRC_HEIGHT(h) REG_FIELD_PREP(SPRITE_SRC_HEIGHT_MASK, (h)) -#define _SPRA_GAMC 0x70400 -#define _SPRA_GAMC16 0x70440 -#define _SPRA_GAMC17 0x7044c - -#define _SPRB_CTL 0x71280 -#define _SPRB_LINOFF 0x71284 -#define _SPRB_STRIDE 0x71288 -#define _SPRB_POS 0x7128c -#define _SPRB_SIZE 0x71290 -#define _SPRB_KEYVAL 0x71294 -#define _SPRB_KEYMSK 0x71298 -#define _SPRB_SURF 0x7129c -#define _SPRB_KEYMAX 0x712a0 -#define _SPRB_TILEOFF 0x712a4 -#define _SPRB_OFFSET 0x712a4 -#define _SPRB_SURFLIVE 0x712ac -#define _SPRB_SCALE 0x71304 -#define _SPRB_GAMC 0x71400 -#define _SPRB_GAMC16 0x71440 -#define _SPRB_GAMC17 0x7144c - -#define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL) -#define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF) -#define SPRSTRIDE(pipe) _MMIO_PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE) -#define SPRPOS(pipe) _MMIO_PIPE(pipe, _SPRA_POS, _SPRB_POS) -#define SPRSIZE(pipe) _MMIO_PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE) -#define SPRKEYVAL(pipe) _MMIO_PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL) -#define SPRKEYMSK(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK) -#define SPRSURF(pipe) _MMIO_PIPE(pipe, _SPRA_SURF, _SPRB_SURF) -#define SPRKEYMAX(pipe) _MMIO_PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) -#define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) -#define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET) -#define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) -#define SPRGAMC(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) + (i) * 4) /* 16 x u0.10 */ -#define SPRGAMC16(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC16, _SPRB_GAMC16) + (i) * 4) /* 3 x u1.10 */ -#define SPRGAMC17(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC17, _SPRB_GAMC17) + (i) * 4) /* 3 x u2.10 */ -#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) - -#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180) -#define SP_ENABLE REG_BIT(31) -#define SP_PIPE_GAMMA_ENABLE REG_BIT(30) -#define SP_FORMAT_MASK REG_GENMASK(29, 26) -#define SP_FORMAT_YUV422 REG_FIELD_PREP(SP_FORMAT_MASK, 0) -#define SP_FORMAT_8BPP REG_FIELD_PREP(SP_FORMAT_MASK, 2) -#define SP_FORMAT_BGR565 REG_FIELD_PREP(SP_FORMAT_MASK, 5) -#define SP_FORMAT_BGRX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 6) -#define SP_FORMAT_BGRA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 7) -#define SP_FORMAT_RGBX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 8) -#define SP_FORMAT_RGBA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 9) -#define SP_FORMAT_BGRX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 10) /* CHV pipe B */ -#define SP_FORMAT_BGRA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 11) /* CHV pipe B */ -#define SP_FORMAT_RGBX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 14) -#define SP_FORMAT_RGBA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 15) -#define SP_ALPHA_PREMULTIPLY REG_BIT(23) /* CHV pipe B */ -#define SP_SOURCE_KEY REG_BIT(22) -#define SP_YUV_FORMAT_BT709 REG_BIT(18) -#define SP_YUV_ORDER_MASK REG_GENMASK(17, 16) -#define SP_YUV_ORDER_YUYV REG_FIELD_PREP(SP_YUV_ORDER_MASK, 0) -#define SP_YUV_ORDER_UYVY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 1) -#define SP_YUV_ORDER_YVYU REG_FIELD_PREP(SP_YUV_ORDER_MASK, 2) -#define SP_YUV_ORDER_VYUY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 3) -#define SP_ROTATE_180 REG_BIT(15) -#define SP_TILED REG_BIT(10) -#define SP_MIRROR REG_BIT(8) /* CHV pipe B */ -#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184) -#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188) -#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c) -#define SP_POS_Y_MASK REG_GENMASK(31, 16) -#define SP_POS_Y(y) REG_FIELD_PREP(SP_POS_Y_MASK, (y)) -#define SP_POS_X_MASK REG_GENMASK(15, 0) -#define SP_POS_X(x) REG_FIELD_PREP(SP_POS_X_MASK, (x)) -#define _SPASIZE (VLV_DISPLAY_BASE + 0x72190) -#define SP_HEIGHT_MASK REG_GENMASK(31, 16) -#define SP_HEIGHT(h) REG_FIELD_PREP(SP_HEIGHT_MASK, (h)) -#define SP_WIDTH_MASK REG_GENMASK(15, 0) -#define SP_WIDTH(w) REG_FIELD_PREP(SP_WIDTH_MASK, (w)) -#define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194) -#define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198) -#define _SPASURF (VLV_DISPLAY_BASE + 0x7219c) -#define SP_ADDR_MASK REG_GENMASK(31, 12) -#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0) -#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4) -#define SP_OFFSET_Y_MASK REG_GENMASK(31, 16) -#define SP_OFFSET_Y(y) REG_FIELD_PREP(SP_OFFSET_Y_MASK, (y)) -#define SP_OFFSET_X_MASK REG_GENMASK(15, 0) -#define SP_OFFSET_X(x) REG_FIELD_PREP(SP_OFFSET_X_MASK, (x)) -#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8) -#define SP_CONST_ALPHA_ENABLE REG_BIT(31) -#define SP_CONST_ALPHA_MASK REG_GENMASK(7, 0) -#define SP_CONST_ALPHA(alpha) REG_FIELD_PREP(SP_CONST_ALPHA_MASK, (alpha)) -#define _SPASURFLIVE (VLV_DISPLAY_BASE + 0x721ac) -#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0) -#define SP_CONTRAST_MASK REG_GENMASK(26, 18) -#define SP_CONTRAST(x) REG_FIELD_PREP(SP_CONTRAST_MASK, (x)) /* u3.6 */ -#define SP_BRIGHTNESS_MASK REG_GENMASK(7, 0) -#define SP_BRIGHTNESS(x) REG_FIELD_PREP(SP_BRIGHTNESS_MASK, (x)) /* s8 */ -#define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4) -#define SP_SH_SIN_MASK REG_GENMASK(26, 16) -#define SP_SH_SIN(x) REG_FIELD_PREP(SP_SH_SIN_MASK, (x)) /* s4.7 */ -#define SP_SH_COS_MASK REG_GENMASK(9, 0) -#define SP_SH_COS(x) REG_FIELD_PREP(SP_SH_COS_MASK, (x)) /* u3.7 */ -#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721e0) - -#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280) -#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284) -#define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288) -#define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c) -#define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290) -#define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294) -#define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298) -#define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c) -#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0) -#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4) -#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) -#define _SPBSURFLIVE (VLV_DISPLAY_BASE + 0x722ac) -#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0) -#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4) -#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0) - -#define _VLV_SPR(pipe, plane_id, reg_a, reg_b) \ - _PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b)) -#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \ - _MMIO(_VLV_SPR((pipe), (plane_id), (reg_a), (reg_b))) - -#define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR) -#define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF) -#define SPSTRIDE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASTRIDE, _SPBSTRIDE) -#define SPPOS(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAPOS, _SPBPOS) -#define SPSIZE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASIZE, _SPBSIZE) -#define SPKEYMINVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMINVAL, _SPBKEYMINVAL) -#define SPKEYMSK(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMSK, _SPBKEYMSK) -#define SPSURF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURF, _SPBSURF) -#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL) -#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF) -#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA) -#define SPSURFLIVE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURFLIVE, _SPBSURFLIVE) -#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0) -#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1) -#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */ - -/* - * CHV pipe B sprite CSC - * - * |cr| |c0 c1 c2| |cr + cr_ioff| |cr_ooff| - * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff| - * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff| - */ -#define _MMIO_CHV_SPCSC(plane_id, reg) \ - _MMIO(VLV_DISPLAY_BASE + ((plane_id) - PLANE_SPRITE0) * 0x1000 + (reg)) - -#define SPCSCYGOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d900) -#define SPCSCCBOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d904) -#define SPCSCCROFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d908) -#define SPCSC_OOFF_MASK REG_GENMASK(26, 16) -#define SPCSC_OOFF(x) REG_FIELD_PREP(SPCSC_OOFF_MASK, (x) & 0x7ff) /* s11 */ -#define SPCSC_IOFF_MASK REG_GENMASK(10, 0) -#define SPCSC_IOFF(x) REG_FIELD_PREP(SPCSC_IOFF_MASK, (x) & 0x7ff) /* s11 */ - -#define SPCSCC01(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d90c) -#define SPCSCC23(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d910) -#define SPCSCC45(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d914) -#define SPCSCC67(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d918) -#define SPCSCC8(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d91c) -#define SPCSC_C1_MASK REG_GENMASK(30, 16) -#define SPCSC_C1(x) REG_FIELD_PREP(SPCSC_C1_MASK, (x) & 0x7fff) /* s3.12 */ -#define SPCSC_C0_MASK REG_GENMASK(14, 0) -#define SPCSC_C0(x) REG_FIELD_PREP(SPCSC_C0_MASK, (x) & 0x7fff) /* s3.12 */ - -#define SPCSCYGICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d920) -#define SPCSCCBICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d924) -#define SPCSCCRICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d928) -#define SPCSC_IMAX_MASK REG_GENMASK(26, 16) -#define SPCSC_IMAX(x) REG_FIELD_PREP(SPCSC_IMAX_MASK, (x) & 0x7ff) /* s11 */ -#define SPCSC_IMIN_MASK REG_GENMASK(10, 0) -#define SPCSC_IMIN(x) REG_FIELD_PREP(SPCSC_IMIN_MASK, (x) & 0x7ff) /* s11 */ - -#define SPCSCYGOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d92c) -#define SPCSCCBOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d930) -#define SPCSCCROCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d934) -#define SPCSC_OMAX_MASK REG_GENMASK(25, 16) -#define SPCSC_OMAX(x) REG_FIELD_PREP(SPCSC_OMAX_MASK, (x)) /* u10 */ -#define SPCSC_OMIN_MASK REG_GENMASK(9, 0) -#define SPCSC_OMIN(x) REG_FIELD_PREP(SPCSC_OMIN_MASK, (x)) /* u10 */ - /* Skylake plane registers */ #define _PLANE_CTL_1_A 0x70180 @@ -3990,14 +2890,14 @@ #define _PIPEB_LINK_M2 0x61048 #define _PIPEB_LINK_N2 0x6104c -#define PIPE_DATA_M1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M1) -#define PIPE_DATA_N1(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N1) -#define PIPE_DATA_M2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_M2) -#define PIPE_DATA_N2(tran) _MMIO_TRANS2(tran, _PIPEA_DATA_N2) -#define PIPE_LINK_M1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M1) -#define PIPE_LINK_N1(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N1) -#define PIPE_LINK_M2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_M2) -#define PIPE_LINK_N2(tran) _MMIO_TRANS2(tran, _PIPEA_LINK_N2) +#define PIPE_DATA_M1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M1) +#define PIPE_DATA_N1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N1) +#define PIPE_DATA_M2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M2) +#define PIPE_DATA_N2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N2) +#define PIPE_LINK_M1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M1) +#define PIPE_LINK_N1(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N1) +#define PIPE_LINK_M2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M2) +#define PIPE_LINK_N2(tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2) /* CPU panel fitter */ /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ @@ -4555,6 +3455,11 @@ #define GLK_CL1_PWR_DOWN REG_BIT(11) #define GLK_CL0_PWR_DOWN REG_BIT(10) +#define CHICKEN_MISC_3 _MMIO(0x42088) +#define DP_MST_DPT_DPTP_ALIGN_WA(trans) REG_BIT(9 + (trans) - TRANSCODER_A) +#define DP_MST_SHORT_HBLANK_WA(trans) REG_BIT(5 + (trans) - TRANSCODER_A) +#define DP_MST_FEC_BS_JITTER_WA(trans) REG_BIT(0 + (trans) - TRANSCODER_A) + #define CHICKEN_MISC_4 _MMIO(0x4208c) #define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13) #define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0) @@ -4611,7 +3516,9 @@ #define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */ #define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */ #define PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15) +#define DP_FEC_BS_JITTER_WA REG_BIT(15) #define PSR2_VSC_ENABLE_PROG_HEADER REG_BIT(12) +#define DP_DSC_INSERT_SF_AT_EOL_WA REG_BIT(4) #define DISP_ARB_CTL _MMIO(0x45000) #define DISP_FBC_MEMORY_WAKE REG_BIT(31) @@ -5010,27 +3917,29 @@ #define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) /* Per-transcoder DIP controls (VLV) */ -#define _VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) -#define _VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) -#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) - -#define _VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170) -#define _VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) -#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) - -#define _CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0) -#define _CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4) -#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8) - -#define VLV_TVIDEO_DIP_CTL(pipe) \ - _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_CTL_A, \ - _VLV_VIDEO_DIP_CTL_B, _CHV_VIDEO_DIP_CTL_C) -#define VLV_TVIDEO_DIP_DATA(pipe) \ - _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_DATA_A, \ - _VLV_VIDEO_DIP_DATA_B, _CHV_VIDEO_DIP_DATA_C) -#define VLV_TVIDEO_DIP_GCP(pipe) \ - _MMIO_PIPE3((pipe), _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \ - _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, _CHV_VIDEO_DIP_GDCP_PAYLOAD_C) +#define _VLV_VIDEO_DIP_CTL_A 0x60200 +#define _VLV_VIDEO_DIP_CTL_B 0x61170 +#define _CHV_VIDEO_DIP_CTL_C 0x611f0 +#define VLV_TVIDEO_DIP_CTL(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \ + _VLV_VIDEO_DIP_CTL_A, \ + _VLV_VIDEO_DIP_CTL_B, \ + _CHV_VIDEO_DIP_CTL_C) + +#define _VLV_VIDEO_DIP_DATA_A 0x60208 +#define _VLV_VIDEO_DIP_DATA_B 0x61174 +#define _CHV_VIDEO_DIP_DATA_C 0x611f4 +#define VLV_TVIDEO_DIP_DATA(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \ + _VLV_VIDEO_DIP_DATA_A, \ + _VLV_VIDEO_DIP_DATA_B, \ + _CHV_VIDEO_DIP_DATA_C) + +#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210 +#define _VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178 +#define _CHV_VIDEO_DIP_GDCP_PAYLOAD_C 0x611f8 +#define VLV_TVIDEO_DIP_GCP(pipe) _MMIO_BASE_PIPE3(VLV_DISPLAY_BASE, (pipe), \ + _VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \ + _VLV_VIDEO_DIP_GDCP_PAYLOAD_B, \ + _CHV_VIDEO_DIP_GDCP_PAYLOAD_C) /* Haswell DIP controls */ @@ -5040,6 +3949,7 @@ #define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0 #define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0 #define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320 +#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484 #define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440 #define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240 #define _HSW_VIDEO_DIP_VS_ECC_A 0x60280 @@ -5054,6 +3964,7 @@ #define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0 #define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0 #define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320 +#define _ADL_VIDEO_DIP_AS_DATA_B 0x61484 #define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440 #define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240 #define _HSW_VIDEO_DIP_VS_ECC_B 0x61280 @@ -5073,22 +3984,25 @@ #define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4 #define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4 -#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A) -#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A) -#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4) -#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4) -#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4) -#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4) -#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4) -#define GLK_TVIDEO_DIP_DRM_DATA(trans, i) _MMIO_TRANS2(trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4) -#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4) -#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4) +#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A) +#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A) +#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4) +#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4) +#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4) +#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4) +#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4) +#define GLK_TVIDEO_DIP_DRM_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4) +#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4) +#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4) +/*ADLP and later: */ +#define ADL_TVIDEO_DIP_AS_SDP_DATA(trans, i) _MMIO_TRANS2(dev_priv, trans,\ + _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4) #define _HSW_STEREO_3D_CTL_A 0x70020 #define S3D_ENABLE (1 << 31) #define _HSW_STEREO_3D_CTL_B 0x71020 -#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A) +#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(dev_priv, trans, _HSW_STEREO_3D_CTL_A) #define _PCH_TRANS_HTOTAL_B 0xe1000 #define _PCH_TRANS_HBLANK_B 0xe1004 @@ -5401,7 +4315,7 @@ #define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */ #define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0) #define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23 -#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* xehpsdv, pvc */ +#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* pvc */ /* XEHP_PCODE_FREQUENCY_CONFIG sub-commands (param1) */ #define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0 #define PCODE_MBOX_FC_SC_READ_FUSED_PN 0x1 @@ -5566,15 +4480,6 @@ enum skl_power_gate { ((pw_idx) - ICL_PW_CTL_IDX_PW_1 + SKL_PG1) #define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg))) -#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) -#define _ICL_AUX_ANAOVRD1_A 0x162398 -#define _ICL_AUX_ANAOVRD1_B 0x6C398 -#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \ - _ICL_AUX_ANAOVRD1_A, \ - _ICL_AUX_ANAOVRD1_B)) -#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7) -#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0) - /* Per-pipe DDI Function Control */ #define _TRANS_DDI_FUNC_CTL_A 0x60400 #define _TRANS_DDI_FUNC_CTL_B 0x61400 @@ -5583,7 +4488,7 @@ enum skl_power_gate { #define _TRANS_DDI_FUNC_CTL_EDP 0x6F400 #define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400 #define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00 -#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A) +#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL_A) #define TRANS_DDI_FUNC_ENABLE (1 << 31) /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ @@ -5638,7 +4543,7 @@ enum skl_power_gate { #define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404 #define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404 #define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04 -#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL2_A) +#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL2_A) #define PORT_SYNC_MODE_ENABLE REG_BIT(4) #define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0) #define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x)) @@ -5651,7 +4556,7 @@ enum skl_power_gate { #define _DP_TP_CTL_B 0x64140 #define _TGL_DP_TP_CTL_A 0x60540 #define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B) -#define TGL_DP_TP_CTL(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_CTL_A) +#define TGL_DP_TP_CTL(tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_CTL_A) #define DP_TP_CTL_ENABLE (1 << 31) #define DP_TP_CTL_FEC_ENABLE (1 << 30) #define DP_TP_CTL_MODE_SST (0 << 27) @@ -5677,7 +4582,7 @@ enum skl_power_gate { #define _DP_TP_STATUS_B 0x64144 #define _TGL_DP_TP_STATUS_A 0x60544 #define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B) -#define TGL_DP_TP_STATUS(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_STATUS_A) +#define TGL_DP_TP_STATUS(tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_STATUS_A) #define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28) #define DP_TP_STATUS_IDLE_DONE (1 << 25) #define DP_TP_STATUS_ACT_SENT (1 << 24) @@ -5858,14 +4763,14 @@ enum skl_power_gate { #define _TRANSB_MSA_MISC 0x61410 #define _TRANSC_MSA_MISC 0x62410 #define _TRANS_EDP_MSA_MISC 0x6f410 -#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC) +#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(dev_priv, tran, _TRANSA_MSA_MISC) /* See DP_MSA_MISC_* for the bit definitions */ #define _TRANS_A_SET_CONTEXT_LATENCY 0x6007C #define _TRANS_B_SET_CONTEXT_LATENCY 0x6107C #define _TRANS_C_SET_CONTEXT_LATENCY 0x6207C #define _TRANS_D_SET_CONTEXT_LATENCY 0x6307C -#define TRANS_SET_CONTEXT_LATENCY(tran) _MMIO_TRANS2(tran, _TRANS_A_SET_CONTEXT_LATENCY) +#define TRANS_SET_CONTEXT_LATENCY(tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_A_SET_CONTEXT_LATENCY) #define TRANS_SET_CONTEXT_LATENCY_MASK REG_GENMASK(15, 0) #define TRANS_SET_CONTEXT_LATENCY_VALUE(x) REG_FIELD_PREP(TRANS_SET_CONTEXT_LATENCY_MASK, (x)) @@ -5900,7 +4805,9 @@ enum skl_power_gate { #define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1) #define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2) #define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3) -#define MDCLK_SOURCE_SEL_CDCLK_PLL REG_BIT(25) +#define MDCLK_SOURCE_SEL_MASK REG_GENMASK(25, 25) +#define MDCLK_SOURCE_SEL_CD2XCLK REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 0) +#define MDCLK_SOURCE_SEL_CDCLK_PLL REG_FIELD_PREP(MDCLK_SOURCE_SEL_MASK, 1) #define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22) #define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0) #define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1) @@ -6317,7 +5224,7 @@ enum skl_power_gate { #define _VLV_PIPE_MSA_MISC_A 0x70048 #define VLV_PIPE_MSA_MISC(pipe) \ - _MMIO_PIPE2(pipe, _VLV_PIPE_MSA_MISC_A) + _MMIO_PIPE2(dev_priv, pipe, _VLV_PIPE_MSA_MISC_A) #define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31) #define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */ @@ -6390,7 +5297,7 @@ enum skl_power_gate { #define _MTL_CLKGATE_DIS_TRANS_A 0x604E8 #define _MTL_CLKGATE_DIS_TRANS_B 0x614E8 -#define MTL_CLKGATE_DIS_TRANS(trans) _MMIO_TRANS2(trans, _MTL_CLKGATE_DIS_TRANS_A) +#define MTL_CLKGATE_DIS_TRANS(trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A) #define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7) #define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700) diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c index 0d735d5c2b..942345548b 100644 --- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c +++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c @@ -126,7 +126,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, return 0; err_free_blocks: - drm_buddy_free_list(mm, &bman_res->blocks); + drm_buddy_free_list(mm, &bman_res->blocks, 0); mutex_unlock(&bman->lock); err_free_res: ttm_resource_fini(man, &bman_res->base); @@ -141,7 +141,7 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man, struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); mutex_lock(&bman->lock); - drm_buddy_free_list(&bman->mm, &bman_res->blocks); + drm_buddy_free_list(&bman->mm, &bman_res->blocks, 0); bman->visible_avail += bman_res->used_visible_size; mutex_unlock(&bman->lock); @@ -345,7 +345,7 @@ int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type) ttm_set_driver_manager(bdev, type, NULL); mutex_lock(&bman->lock); - drm_buddy_free_list(mm, &bman->reserved); + drm_buddy_free_list(mm, &bman->reserved, 0); drm_buddy_fini(mm); bman->visible_avail += bman->visible_reserved; WARN_ON_ONCE(bman->visible_avail != bman->visible_size); diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index b45ef05606..06ec6ceb61 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -73,20 +73,6 @@ bool i915_error_injected(void); __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ fmt, ##__VA_ARGS__) -#if defined(GCC_VERSION) && GCC_VERSION >= 70000 -#define add_overflows_t(T, A, B) \ - __builtin_add_overflow_p((A), (B), (T)0) -#else -#define add_overflows_t(T, A, B) ({ \ - typeof(A) a = (A); \ - typeof(B) b = (B); \ - (T)(a + b) < a; \ -}) -#endif - -#define add_overflows(A, B) \ - add_overflows_t(typeof((A) + (B)), (A), (B)) - #define range_overflows(start, size, max) ({ \ typeof(start) start__ = (start); \ typeof(size) size__ = (size); \ diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index b70715b141..d2f064d252 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1776,8 +1776,6 @@ static void release_references(struct i915_vma *vma, struct intel_gt *gt, if (vm_ddestroy) i915_vm_resv_put(vma->vm); - /* Wait for async active retire */ - i915_active_wait(&vma->active); i915_active_fini(&vma->active); GEM_WARN_ON(vma->resource); i915_vma_free(vma); diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c index 9c21ce69bd..1dc5281b2a 100644 --- a/drivers/gpu/drm/i915/intel_clock_gating.c +++ b/drivers/gpu/drm/i915/intel_clock_gating.c @@ -28,6 +28,7 @@ #include "display/intel_de.h" #include "display/intel_display.h" #include "display/intel_display_trace.h" +#include "display/intel_fbc_regs.h" #include "display/skl_watermark.h" #include "gt/intel_engine_regs.h" @@ -105,12 +106,6 @@ static void bxt_init_clock_gating(struct drm_i915_private *i915) * Display WA #0562: bxt */ intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS); - - /* - * WaFbcHighMemBwCorruptionAvoidance:bxt - * Display WA #0883: bxt - */ - intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 0, DPFC_DISABLE_DUMMY0); } static void glk_init_clock_gating(struct drm_i915_private *i915) @@ -349,13 +344,6 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *i915, intel_uncore_write(&i915->uncore, GEN7_MISCCPCTL, misccpctl); } -static void xehpsdv_init_clock_gating(struct drm_i915_private *i915) -{ - /* Wa_22010146351:xehpsdv */ - if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) - intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS); -} - static void dg2_init_clock_gating(struct drm_i915_private *i915) { /* Wa_22010954014:dg2 */ @@ -363,17 +351,6 @@ static void dg2_init_clock_gating(struct drm_i915_private *i915) SGSI_SIDECLK_DIS); } -static void pvc_init_clock_gating(struct drm_i915_private *i915) -{ - /* Wa_14012385139:pvc */ - if (IS_PVC_BD_STEP(i915, STEP_A0, STEP_B0)) - intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS); - - /* Wa_22010954014:pvc */ - if (IS_PVC_BD_STEP(i915, STEP_A0, STEP_B0)) - intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS); -} - static void cnp_init_clock_gating(struct drm_i915_private *i915) { if (!HAS_PCH_CNP(i915)) @@ -396,13 +373,6 @@ static void cfl_init_clock_gating(struct drm_i915_private *i915) * Display WA #0562: cfl */ intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS); - - /* - * WaFbcNukeOnHostModify:cfl - * Display WA #0873: cfl - */ - intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), - 0, DPFC_NUKE_ON_ANY_MODIFICATION); } static void kbl_init_clock_gating(struct drm_i915_private *i915) @@ -427,13 +397,6 @@ static void kbl_init_clock_gating(struct drm_i915_private *i915) * Display WA #0562: kbl */ intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS); - - /* - * WaFbcNukeOnHostModify:kbl - * Display WA #0873: kbl - */ - intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), - 0, DPFC_NUKE_ON_ANY_MODIFICATION); } static void skl_init_clock_gating(struct drm_i915_private *i915) @@ -452,19 +415,6 @@ static void skl_init_clock_gating(struct drm_i915_private *i915) * Display WA #0562: skl */ intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS); - - /* - * WaFbcNukeOnHostModify:skl - * Display WA #0873: skl - */ - intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), - 0, DPFC_NUKE_ON_ANY_MODIFICATION); - - /* - * WaFbcHighMemBwCorruptionAvoidance:skl - * Display WA #0883: skl - */ - intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 0, DPFC_DISABLE_DUMMY0); } static void bdw_init_clock_gating(struct drm_i915_private *i915) @@ -762,9 +712,7 @@ static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs = .init_clock_gating = platform##_init_clock_gating, \ } -CG_FUNCS(pvc); CG_FUNCS(dg2); -CG_FUNCS(xehpsdv); CG_FUNCS(cfl); CG_FUNCS(skl); CG_FUNCS(kbl); @@ -797,12 +745,8 @@ CG_FUNCS(nop); */ void intel_clock_gating_hooks_init(struct drm_i915_private *i915) { - if (IS_PONTEVECCHIO(i915)) - i915->clock_gating_funcs = &pvc_clock_gating_funcs; - else if (IS_DG2(i915)) + if (IS_DG2(i915)) i915->clock_gating_funcs = &dg2_clock_gating_funcs; - else if (IS_XEHPSDV(i915)) - i915->clock_gating_funcs = &xehpsdv_clock_gating_funcs; else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) i915->clock_gating_funcs = &cfl_clock_gating_funcs; else if (IS_SKYLAKE(i915)) diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 59bea1398c..a0a43ea07f 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -70,9 +70,7 @@ static const char * const platform_names[] = { PLATFORM_NAME(DG1), PLATFORM_NAME(ALDERLAKE_S), PLATFORM_NAME(ALDERLAKE_P), - PLATFORM_NAME(XEHPSDV), PLATFORM_NAME(DG2), - PLATFORM_NAME(PONTEVECCHIO), PLATFORM_NAME(METEORLAKE), }; #undef PLATFORM_NAME diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index eba2f0b919..d1a2abc7e5 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -87,9 +87,7 @@ enum intel_platform { INTEL_DG1, INTEL_ALDERLAKE_S, INTEL_ALDERLAKE_P, - INTEL_XEHPSDV, INTEL_DG2, - INTEL_PONTEVECCHIO, INTEL_METEORLAKE, INTEL_MAX_PLATFORMS }; diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 9b6d87c8b5..5a01d60e51 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -28,6 +28,7 @@ #include "gt/intel_context.h" #include "gt/intel_ring.h" #include "gt/shmem_utils.h" +#include /** * DOC: Intel GVT-g host support diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c index 87ecc5104f..e1a35f70b5 100644 --- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c +++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c @@ -3,6 +3,7 @@ * Copyright © 2020 Intel Corporation */ +#include "display/bxt_dpio_phy_regs.h" #include "display/intel_audio_regs.h" #include "display/intel_backlight_regs.h" #include "display/intel_color_regs.h" @@ -10,9 +11,11 @@ #include "display/intel_dmc_regs.h" #include "display/intel_dp_aux_regs.h" #include "display/intel_dpio_phy.h" +#include "display/intel_fbc_regs.h" #include "display/intel_fdi_regs.h" #include "display/intel_lvds_regs.h" #include "display/intel_psr_regs.h" +#include "display/intel_sprite_regs.h" #include "display/skl_watermark_regs.h" #include "display/vlv_dsi_pll_regs.h" #include "gt/intel_engine_regs.h" @@ -1155,11 +1158,11 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter) MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0)); MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0)); MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0)); - MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW2_LN(DPIO_PHY0, DPIO_CH0, 0)); MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0)); - MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH0, 0)); MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0)); - MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW4_LN(DPIO_PHY0, DPIO_CH0, 0)); MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0)); MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0)); MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1)); @@ -1180,11 +1183,11 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter) MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1)); MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1)); MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1)); - MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW2_LN(DPIO_PHY0, DPIO_CH1, 0)); MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1)); - MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH1, 0)); MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1)); - MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW4_LN(DPIO_PHY0, DPIO_CH1, 0)); MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1)); MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0)); MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1)); @@ -1205,11 +1208,11 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter) MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0)); MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0)); MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0)); - MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW2_LN(DPIO_PHY1, DPIO_CH0, 0)); MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0)); - MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW3_LN(DPIO_PHY1, DPIO_CH0, 0)); MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0)); - MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW4_LN(DPIO_PHY1, DPIO_CH0, 0)); MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0)); MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0)); MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1)); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index d4e8441288..2d0647aca9 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -272,15 +272,11 @@ intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm) * intel_runtime_pm_get_noresume - grab a runtime pm reference * @rpm: the intel_runtime_pm structure * - * This function grabs a device-level runtime pm reference (mostly used for GEM - * code to ensure the GTT or GT is on). + * This function grabs a device-level runtime pm reference. * - * It will _not_ power up the device but instead only check that it's powered - * on. Therefore it is only valid to call this functions from contexts where - * the device is known to be powered up and where trying to power it up would - * result in hilarity and deadlocks. That pretty much means only the system - * suspend/resume code where this is used to grab runtime pm references for - * delayed setup down in work items. + * It will _not_ resume the device but instead only get an extra wakeref. + * Therefore it is only valid to call this functions from contexts where + * the device is known to be active and with another wakeref previously hold. * * Any runtime pm reference obtained by this function must have a symmetric * call to intel_runtime_pm_put() to release the reference again. @@ -289,7 +285,7 @@ intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm) */ intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm) { - assert_rpm_wakelock_held(rpm); + assert_rpm_raw_wakeref_held(rpm); pm_runtime_get_noresume(rpm->kdev); intel_runtime_pm_acquire(rpm, true); diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c index b4162f1be7..a5adfb5d8f 100644 --- a/drivers/gpu/drm/i915/intel_step.c +++ b/drivers/gpu/drm/i915/intel_step.c @@ -102,13 +102,6 @@ static const struct intel_step_info adlp_revids[] = { [0xC] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 }, }; -static const struct intel_step_info xehpsdv_revids[] = { - [0x0] = { COMMON_GT_MEDIA_STEP(A0) }, - [0x1] = { COMMON_GT_MEDIA_STEP(A1) }, - [0x4] = { COMMON_GT_MEDIA_STEP(B0) }, - [0x8] = { COMMON_GT_MEDIA_STEP(C0) }, -}; - static const struct intel_step_info dg2_g10_revid_step_tbl[] = { [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_A0 }, @@ -153,8 +146,6 @@ static u8 gmd_to_intel_step(struct drm_i915_private *i915, return step; } -static void pvc_step_init(struct drm_i915_private *i915, int pci_revid); - void intel_step_init(struct drm_i915_private *i915) { const struct intel_step_info *revids = NULL; @@ -178,10 +169,7 @@ void intel_step_init(struct drm_i915_private *i915) return; } - if (IS_PONTEVECCHIO(i915)) { - pvc_step_init(i915, revid); - return; - } else if (IS_DG2_G10(i915)) { + if (IS_DG2_G10(i915)) { revids = dg2_g10_revid_step_tbl; size = ARRAY_SIZE(dg2_g10_revid_step_tbl); } else if (IS_DG2_G11(i915)) { @@ -190,9 +178,6 @@ void intel_step_init(struct drm_i915_private *i915) } else if (IS_DG2_G12(i915)) { revids = dg2_g12_revid_step_tbl; size = ARRAY_SIZE(dg2_g12_revid_step_tbl); - } else if (IS_XEHPSDV(i915)) { - revids = xehpsdv_revids; - size = ARRAY_SIZE(xehpsdv_revids); } else if (IS_ALDERLAKE_P_N(i915)) { revids = adlp_n_revids; size = ARRAY_SIZE(adlp_n_revids); @@ -277,69 +262,6 @@ void intel_step_init(struct drm_i915_private *i915) RUNTIME_INFO(i915)->step = step; } -#define PVC_BD_REVID GENMASK(5, 3) -#define PVC_CT_REVID GENMASK(2, 0) - -static const int pvc_bd_subids[] = { - [0x0] = STEP_A0, - [0x3] = STEP_B0, - [0x4] = STEP_B1, - [0x5] = STEP_B3, -}; - -static const int pvc_ct_subids[] = { - [0x3] = STEP_A0, - [0x5] = STEP_B0, - [0x6] = STEP_B1, - [0x7] = STEP_C0, -}; - -static int -pvc_step_lookup(struct drm_i915_private *i915, const char *type, - const int *table, int size, int subid) -{ - if (subid < size && table[subid] != STEP_NONE) - return table[subid]; - - drm_warn(&i915->drm, "Unknown %s id 0x%02x\n", type, subid); - - /* - * As on other platforms, try to use the next higher ID if we land on a - * gap in the table. - */ - while (subid < size && table[subid] == STEP_NONE) - subid++; - - if (subid < size) { - drm_dbg(&i915->drm, "Using steppings for %s id 0x%02x\n", - type, subid); - return table[subid]; - } - - drm_dbg(&i915->drm, "Using future steppings\n"); - return STEP_FUTURE; -} - -/* - * PVC needs special handling since we don't lookup the - * revid in a table, but rather specific bitfields within - * the revid for various components. - */ -static void pvc_step_init(struct drm_i915_private *i915, int pci_revid) -{ - int ct_subid, bd_subid; - - bd_subid = FIELD_GET(PVC_BD_REVID, pci_revid); - ct_subid = FIELD_GET(PVC_CT_REVID, pci_revid); - - RUNTIME_INFO(i915)->step.basedie_step = - pvc_step_lookup(i915, "Base Die", pvc_bd_subids, - ARRAY_SIZE(pvc_bd_subids), bd_subid); - RUNTIME_INFO(i915)->step.graphics_step = - pvc_step_lookup(i915, "Compute Tile", pvc_ct_subids, - ARRAY_SIZE(pvc_ct_subids), ct_subid); -} - #define STEP_NAME_CASE(name) \ case STEP_##name: \ return #name; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 76400e9c40..729409a4ba 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1106,45 +1106,6 @@ static const struct i915_range dg2_shadowed_regs[] = { { .start = 0x1F8510, .end = 0x1F8550 }, }; -static const struct i915_range pvc_shadowed_regs[] = { - { .start = 0x2030, .end = 0x2030 }, - { .start = 0x2510, .end = 0x2550 }, - { .start = 0xA008, .end = 0xA00C }, - { .start = 0xA188, .end = 0xA188 }, - { .start = 0xA278, .end = 0xA278 }, - { .start = 0xA540, .end = 0xA56C }, - { .start = 0xC4C8, .end = 0xC4C8 }, - { .start = 0xC4E0, .end = 0xC4E0 }, - { .start = 0xC600, .end = 0xC600 }, - { .start = 0xC658, .end = 0xC658 }, - { .start = 0x22030, .end = 0x22030 }, - { .start = 0x22510, .end = 0x22550 }, - { .start = 0x1C0030, .end = 0x1C0030 }, - { .start = 0x1C0510, .end = 0x1C0550 }, - { .start = 0x1C4030, .end = 0x1C4030 }, - { .start = 0x1C4510, .end = 0x1C4550 }, - { .start = 0x1C8030, .end = 0x1C8030 }, - { .start = 0x1C8510, .end = 0x1C8550 }, - { .start = 0x1D0030, .end = 0x1D0030 }, - { .start = 0x1D0510, .end = 0x1D0550 }, - { .start = 0x1D4030, .end = 0x1D4030 }, - { .start = 0x1D4510, .end = 0x1D4550 }, - { .start = 0x1D8030, .end = 0x1D8030 }, - { .start = 0x1D8510, .end = 0x1D8550 }, - { .start = 0x1E0030, .end = 0x1E0030 }, - { .start = 0x1E0510, .end = 0x1E0550 }, - { .start = 0x1E4030, .end = 0x1E4030 }, - { .start = 0x1E4510, .end = 0x1E4550 }, - { .start = 0x1E8030, .end = 0x1E8030 }, - { .start = 0x1E8510, .end = 0x1E8550 }, - { .start = 0x1F0030, .end = 0x1F0030 }, - { .start = 0x1F0510, .end = 0x1F0550 }, - { .start = 0x1F4030, .end = 0x1F4030 }, - { .start = 0x1F4510, .end = 0x1F4550 }, - { .start = 0x1F8030, .end = 0x1F8030 }, - { .start = 0x1F8510, .end = 0x1F8550 }, -}; - static const struct i915_range mtl_shadowed_regs[] = { { .start = 0x2030, .end = 0x2030 }, { .start = 0x2510, .end = 0x2550 }, @@ -1471,195 +1432,31 @@ static const struct intel_forcewake_range __gen12_fw_ranges[] = { 0x1d3f00 - 0x1d3fff: VD2 */ }; -/* - * Graphics IP version 12.55 brings a slight change to the 0xd800 range, - * switching it from the GT domain to the render domain. - */ -#define XEHP_FWRANGES(FW_RANGE_D800) \ - GEN_FW_RANGE(0x0, 0x1fff, 0), /* \ - 0x0 - 0xaff: reserved \ - 0xb00 - 0x1fff: always on */ \ - GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), \ - GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT), \ - GEN_FW_RANGE(0x4b00, 0x51ff, 0), /* \ - 0x4b00 - 0x4fff: reserved \ - 0x5000 - 0x51ff: always on */ \ - GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), \ - GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), \ - GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), \ - GEN_FW_RANGE(0x8160, 0x81ff, 0), /* \ - 0x8160 - 0x817f: reserved \ - 0x8180 - 0x81ff: always on */ \ - GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT), \ - GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), \ - GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /* \ - 0x8500 - 0x87ff: gt \ - 0x8800 - 0x8c7f: reserved \ - 0x8c80 - 0x8cff: gt (DG2 only) */ \ - GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /* \ - 0x8d00 - 0x8dff: render (DG2 only) \ - 0x8e00 - 0x8fff: reserved */ \ - GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /* \ - 0x9000 - 0x947f: gt \ - 0x9480 - 0x94cf: reserved */ \ - GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), \ - GEN_FW_RANGE(0x9560, 0x967f, 0), /* \ - 0x9560 - 0x95ff: always on \ - 0x9600 - 0x967f: reserved */ \ - GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /* \ - 0x9680 - 0x96ff: render (DG2 only) \ - 0x9700 - 0x97ff: reserved */ \ - GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /* \ - 0x9800 - 0xb4ff: gt \ - 0xb500 - 0xbfff: reserved \ - 0xc000 - 0xcfff: gt */ \ - GEN_FW_RANGE(0xd000, 0xd7ff, 0), \ - GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800), \ - GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT), \ - GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER), \ - GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /* \ - 0xdd00 - 0xddff: gt \ - 0xde00 - 0xde7f: reserved */ \ - GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /* \ - 0xde80 - 0xdfff: render \ - 0xe000 - 0xe0ff: reserved \ - 0xe100 - 0xe8ff: render */ \ - GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /* \ - 0xe900 - 0xe9ff: gt \ - 0xea00 - 0xefff: reserved \ - 0xf000 - 0xffff: gt */ \ - GEN_FW_RANGE(0x10000, 0x12fff, 0), /* \ - 0x10000 - 0x11fff: reserved \ - 0x12000 - 0x127ff: always on \ - 0x12800 - 0x12fff: reserved */ \ - GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */ \ - GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /* \ - 0x13200 - 0x133ff: VD2 (DG2 only) \ - 0x13400 - 0x13fff: reserved */ \ - GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */ \ - GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */ \ - GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */ \ - GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */ \ - GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER), \ - GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /* \ - 0x15000 - 0x15fff: gt (DG2 only) \ - 0x16000 - 0x16dff: reserved */ \ - GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER), \ - GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /* \ - 0x20000 - 0x20fff: VD0 (XEHPSDV only) \ - 0x21000 - 0x21fff: reserved */ \ - GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), \ - GEN_FW_RANGE(0x24000, 0x2417f, 0), /* \ - 0x24000 - 0x2407f: always on \ - 0x24080 - 0x2417f: reserved */ \ - GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /* \ - 0x24180 - 0x241ff: gt \ - 0x24200 - 0x249ff: reserved */ \ - GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /* \ - 0x24a00 - 0x24a7f: render \ - 0x24a80 - 0x251ff: reserved */ \ - GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /* \ - 0x25200 - 0x252ff: gt \ - 0x25300 - 0x25fff: reserved */ \ - GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /* \ - 0x26000 - 0x27fff: render \ - 0x28000 - 0x29fff: reserved \ - 0x2a000 - 0x2ffff: undocumented */ \ - GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), \ - GEN_FW_RANGE(0x40000, 0x1bffff, 0), \ - GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /* \ - 0x1c0000 - 0x1c2bff: VD0 \ - 0x1c2c00 - 0x1c2cff: reserved \ - 0x1c2d00 - 0x1c2dff: VD0 \ - 0x1c2e00 - 0x1c3eff: VD0 (DG2 only) \ - 0x1c3f00 - 0x1c3fff: VD0 */ \ - GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /* \ - 0x1c4000 - 0x1c6bff: VD1 \ - 0x1c6c00 - 0x1c6cff: reserved \ - 0x1c6d00 - 0x1c6dff: VD1 \ - 0x1c6e00 - 0x1c7fff: reserved */ \ - GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /* \ - 0x1c8000 - 0x1ca0ff: VE0 \ - 0x1ca100 - 0x1cbfff: reserved */ \ - GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0), \ - GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2), \ - GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4), \ - GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6), \ - GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /* \ - 0x1d0000 - 0x1d2bff: VD2 \ - 0x1d2c00 - 0x1d2cff: reserved \ - 0x1d2d00 - 0x1d2dff: VD2 \ - 0x1d2e00 - 0x1d3dff: VD2 (DG2 only) \ - 0x1d3e00 - 0x1d3eff: reserved \ - 0x1d3f00 - 0x1d3fff: VD2 */ \ - GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /* \ - 0x1d4000 - 0x1d6bff: VD3 \ - 0x1d6c00 - 0x1d6cff: reserved \ - 0x1d6d00 - 0x1d6dff: VD3 \ - 0x1d6e00 - 0x1d7fff: reserved */ \ - GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /* \ - 0x1d8000 - 0x1da0ff: VE1 \ - 0x1da100 - 0x1dffff: reserved */ \ - GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /* \ - 0x1e0000 - 0x1e2bff: VD4 \ - 0x1e2c00 - 0x1e2cff: reserved \ - 0x1e2d00 - 0x1e2dff: VD4 \ - 0x1e2e00 - 0x1e3eff: reserved \ - 0x1e3f00 - 0x1e3fff: VD4 */ \ - GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /* \ - 0x1e4000 - 0x1e6bff: VD5 \ - 0x1e6c00 - 0x1e6cff: reserved \ - 0x1e6d00 - 0x1e6dff: VD5 \ - 0x1e6e00 - 0x1e7fff: reserved */ \ - GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /* \ - 0x1e8000 - 0x1ea0ff: VE2 \ - 0x1ea100 - 0x1effff: reserved */ \ - GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /* \ - 0x1f0000 - 0x1f2bff: VD6 \ - 0x1f2c00 - 0x1f2cff: reserved \ - 0x1f2d00 - 0x1f2dff: VD6 \ - 0x1f2e00 - 0x1f3eff: reserved \ - 0x1f3f00 - 0x1f3fff: VD6 */ \ - GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /* \ - 0x1f4000 - 0x1f6bff: VD7 \ - 0x1f6c00 - 0x1f6cff: reserved \ - 0x1f6d00 - 0x1f6dff: VD7 \ - 0x1f6e00 - 0x1f7fff: reserved */ \ - GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3), - -static const struct intel_forcewake_range __xehp_fw_ranges[] = { - XEHP_FWRANGES(FORCEWAKE_GT) -}; - static const struct intel_forcewake_range __dg2_fw_ranges[] = { - XEHP_FWRANGES(FORCEWAKE_RENDER) -}; - -static const struct intel_forcewake_range __pvc_fw_ranges[] = { - GEN_FW_RANGE(0x0, 0xaff, 0), - GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT), - GEN_FW_RANGE(0xc00, 0xfff, 0), - GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT), + GEN_FW_RANGE(0x0, 0x1fff, 0), /* + 0x0 - 0xaff: reserved + 0xb00 - 0x1fff: always on */ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT), - GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x4000, 0x813f, FORCEWAKE_GT), /* - 0x4000 - 0x4aff: gt + GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT), + GEN_FW_RANGE(0x4b00, 0x51ff, 0), /* 0x4b00 - 0x4fff: reserved - 0x5000 - 0x51ff: gt - 0x5200 - 0x52ff: reserved - 0x5300 - 0x53ff: gt - 0x5400 - 0x7fff: reserved - 0x8000 - 0x813f: gt */ - GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x8180, 0x81ff, 0), - GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /* - 0x8200 - 0x82ff: gt - 0x8300 - 0x84ff: reserved - 0x8500 - 0x887f: gt - 0x8880 - 0x8a7f: reserved - 0x8a80 - 0x8aff: gt - 0x8b00 - 0x8fff: reserved + 0x5000 - 0x51ff: always on */ + GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), + GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x8160, 0x81ff, 0), /* + 0x8160 - 0x817f: reserved + 0x8180 - 0x81ff: always on */ + GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT), + GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /* + 0x8500 - 0x87ff: gt + 0x8800 - 0x8c7f: reserved + 0x8c80 - 0x8cff: gt (DG2 only) */ + GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /* + 0x8d00 - 0x8dff: render (DG2 only) + 0x8e00 - 0x8fff: reserved */ + GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /* 0x9000 - 0x947f: gt 0x9480 - 0x94cf: reserved */ GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), @@ -1673,65 +1470,114 @@ static const struct intel_forcewake_range __pvc_fw_ranges[] = { 0x9800 - 0xb4ff: gt 0xb500 - 0xbfff: reserved 0xc000 - 0xcfff: gt */ - GEN_FW_RANGE(0xd000, 0xd3ff, 0), - GEN_FW_RANGE(0xd400, 0xdbff, FORCEWAKE_GT), + GEN_FW_RANGE(0xd000, 0xd7ff, 0), + GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER), + GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT), GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER), GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /* 0xdd00 - 0xddff: gt 0xde00 - 0xde7f: reserved */ GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /* - 0xde80 - 0xdeff: render - 0xdf00 - 0xe1ff: reserved - 0xe200 - 0xe7ff: render - 0xe800 - 0xe8ff: reserved */ - GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT), /* - 0xe900 - 0xe9ff: gt - 0xea00 - 0xebff: reserved - 0xec00 - 0xffff: gt - 0x10000 - 0x11fff: reserved */ - GEN_FW_RANGE(0x12000, 0x12fff, 0), /* + 0xde80 - 0xdfff: render + 0xe000 - 0xe0ff: reserved + 0xe100 - 0xe8ff: render */ + GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /* + 0xe900 - 0xe9ff: gt + 0xea00 - 0xefff: reserved + 0xf000 - 0xffff: gt */ + GEN_FW_RANGE(0x10000, 0x12fff, 0), /* + 0x10000 - 0x11fff: reserved 0x12000 - 0x127ff: always on 0x12800 - 0x12fff: reserved */ - GEN_FW_RANGE(0x13000, 0x19fff, FORCEWAKE_GT), /* - 0x13000 - 0x135ff: gt - 0x13600 - 0x147ff: reserved - 0x14800 - 0x153ff: gt - 0x15400 - 0x19fff: reserved */ - GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /* - 0x1a000 - 0x1ffff: render + GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), + GEN_FW_RANGE(0x13200, 0x147ff, FORCEWAKE_MEDIA_VDBOX2), /* + 0x13200 - 0x133ff: VD2 (DG2 only) + 0x13400 - 0x147ff: reserved */ + GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /* + 0x15000 - 0x15fff: gt (DG2 only) + 0x16000 - 0x16dff: reserved */ + GEN_FW_RANGE(0x16e00, 0x21fff, FORCEWAKE_RENDER), /* + 0x16e00 - 0x1ffff: render 0x20000 - 0x21fff: reserved */ GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), GEN_FW_RANGE(0x24000, 0x2417f, 0), /* - 24000 - 0x2407f: always on - 24080 - 0x2417f: reserved */ - GEN_FW_RANGE(0x24180, 0x25fff, FORCEWAKE_GT), /* + 0x24000 - 0x2407f: always on + 0x24080 - 0x2417f: reserved */ + GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /* 0x24180 - 0x241ff: gt - 0x24200 - 0x251ff: reserved + 0x24200 - 0x249ff: reserved */ + GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /* + 0x24a00 - 0x24a7f: render + 0x24a80 - 0x251ff: reserved */ + GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /* 0x25200 - 0x252ff: gt 0x25300 - 0x25fff: reserved */ GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /* 0x26000 - 0x27fff: render - 0x28000 - 0x2ffff: reserved */ + 0x28000 - 0x29fff: reserved + 0x2a000 - 0x2ffff: undocumented */ GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), GEN_FW_RANGE(0x40000, 0x1bffff, 0), GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /* 0x1c0000 - 0x1c2bff: VD0 0x1c2c00 - 0x1c2cff: reserved 0x1c2d00 - 0x1c2dff: VD0 - 0x1c2e00 - 0x1c3eff: reserved + 0x1c2e00 - 0x1c3eff: VD0 0x1c3f00 - 0x1c3fff: VD0 */ - GEN_FW_RANGE(0x1c4000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX1), /* - 0x1c4000 - 0x1c6aff: VD1 - 0x1c6b00 - 0x1c7eff: reserved - 0x1c7f00 - 0x1c7fff: VD1 - 0x1c8000 - 0x1cffff: reserved */ - GEN_FW_RANGE(0x1d0000, 0x23ffff, FORCEWAKE_MEDIA_VDBOX2), /* - 0x1d0000 - 0x1d2aff: VD2 - 0x1d2b00 - 0x1d3eff: reserved - 0x1d3f00 - 0x1d3fff: VD2 - 0x1d4000 - 0x23ffff: reserved */ - GEN_FW_RANGE(0x240000, 0x3dffff, 0), - GEN_FW_RANGE(0x3e0000, 0x3effff, FORCEWAKE_GT), + GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /* + 0x1c4000 - 0x1c6bff: VD1 + 0x1c6c00 - 0x1c6cff: reserved + 0x1c6d00 - 0x1c6dff: VD1 + 0x1c6e00 - 0x1c7fff: reserved */ + GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /* + 0x1c8000 - 0x1ca0ff: VE0 + 0x1ca100 - 0x1cbfff: reserved */ + GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0), + GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2), + GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4), + GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6), + GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /* + 0x1d0000 - 0x1d2bff: VD2 + 0x1d2c00 - 0x1d2cff: reserved + 0x1d2d00 - 0x1d2dff: VD2 + 0x1d2e00 - 0x1d3dff: VD2 + 0x1d3e00 - 0x1d3eff: reserved + 0x1d3f00 - 0x1d3fff: VD2 */ + GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /* + 0x1d4000 - 0x1d6bff: VD3 + 0x1d6c00 - 0x1d6cff: reserved + 0x1d6d00 - 0x1d6dff: VD3 + 0x1d6e00 - 0x1d7fff: reserved */ + GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /* + 0x1d8000 - 0x1da0ff: VE1 + 0x1da100 - 0x1dffff: reserved */ + GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /* + 0x1e0000 - 0x1e2bff: VD4 + 0x1e2c00 - 0x1e2cff: reserved + 0x1e2d00 - 0x1e2dff: VD4 + 0x1e2e00 - 0x1e3eff: reserved + 0x1e3f00 - 0x1e3fff: VD4 */ + GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /* + 0x1e4000 - 0x1e6bff: VD5 + 0x1e6c00 - 0x1e6cff: reserved + 0x1e6d00 - 0x1e6dff: VD5 + 0x1e6e00 - 0x1e7fff: reserved */ + GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /* + 0x1e8000 - 0x1ea0ff: VE2 + 0x1ea100 - 0x1effff: reserved */ + GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /* + 0x1f0000 - 0x1f2bff: VD6 + 0x1f2c00 - 0x1f2cff: reserved + 0x1f2d00 - 0x1f2dff: VD6 + 0x1f2e00 - 0x1f3eff: reserved + 0x1f3f00 - 0x1f3fff: VD6 */ + GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /* + 0x1f4000 - 0x1f6bff: VD7 + 0x1f6c00 - 0x1f6cff: reserved + 0x1f6d00 - 0x1f6dff: VD7 + 0x1f6e00 - 0x1f7fff: reserved */ + GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3), }; static const struct intel_forcewake_range __mtl_fw_ranges[] = { @@ -2576,18 +2422,10 @@ static int uncore_forcewake_init(struct intel_uncore *uncore) ASSIGN_FW_DOMAINS_TABLE(uncore, __mtl_fw_ranges); ASSIGN_SHADOW_TABLE(uncore, mtl_shadowed_regs); ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); - } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60)) { - ASSIGN_FW_DOMAINS_TABLE(uncore, __pvc_fw_ranges); - ASSIGN_SHADOW_TABLE(uncore, pvc_shadowed_regs); - ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) { ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges); ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs); ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); - } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { - ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges); - ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs); - ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); } else if (GRAPHICS_VER(i915) >= 12) { ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges); ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs); @@ -2734,7 +2572,7 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, * the forcewake domain if any of the other engines * in the same media slice are present. */ - if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) { + if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 55) && i % 2 == 0) { if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1))) continue; diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index ee79e0809a..fee76c1d2f 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -154,6 +154,30 @@ __wait_gsc_proxy_completed(struct drm_i915_private *i915) pr_warn(DRIVER_NAME "Timed out waiting for gsc_proxy_completion!\n"); } +static void +__wait_gsc_huc_load_completed(struct drm_i915_private *i915) +{ + /* this only applies to DG2, so we only care about GT0 */ + struct intel_huc *huc = &to_gt(i915)->uc.huc; + bool need_to_wait = (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && + intel_huc_wait_required(huc)); + /* + * The GSC and PXP mei bringup depends on the kernel boot ordering, so + * to account for the worst case scenario the HuC code waits for up to + * 10s for the GSC driver to load and then another 5s for the PXP + * component to bind before giving up, even though those steps normally + * complete in less than a second from the i915 load. We match that + * timeout here, but we expect to bail early due to the fence being + * signalled even in a failure case, as it is extremely unlikely that + * both components will use their full timeout. + */ + unsigned long timeout_ms = 15000; + + if (need_to_wait && + wait_for(i915_sw_fence_done(&huc->delayed_load.fence), timeout_ms)) + pr_warn(DRIVER_NAME "Timed out waiting for huc load via GSC!\n"); +} + static int __run_selftests(const char *name, struct selftest *st, unsigned int count, @@ -228,14 +252,16 @@ int i915_mock_selftests(void) int i915_live_selftests(struct pci_dev *pdev) { + struct drm_i915_private *i915 = pdev_to_i915(pdev); int err; if (!i915_selftest.live) return 0; - __wait_gsc_proxy_completed(pdev_to_i915(pdev)); + __wait_gsc_proxy_completed(i915); + __wait_gsc_huc_load_completed(i915); - err = run_selftests(live, pdev_to_i915(pdev)); + err = run_selftests(live, i915); if (err) { i915_selftest.live = err; return err; @@ -251,14 +277,16 @@ int i915_live_selftests(struct pci_dev *pdev) int i915_perf_selftests(struct pci_dev *pdev) { + struct drm_i915_private *i915 = pdev_to_i915(pdev); int err; if (!i915_selftest.perf) return 0; - __wait_gsc_proxy_completed(pdev_to_i915(pdev)); + __wait_gsc_proxy_completed(i915); + __wait_gsc_huc_load_completed(i915); - err = run_selftests(perf, pdev_to_i915(pdev)); + err = run_selftests(perf, i915); if (err) { i915_selftest.perf = err; return err; diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index 4f98aa8a86..41eaa9b7f6 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -71,7 +71,6 @@ static int intel_shadow_table_check(void) { gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) }, { gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) }, { dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) }, - { pvc_shadowed_regs, ARRAY_SIZE(pvc_shadowed_regs) }, { mtl_shadowed_regs, ARRAY_SIZE(mtl_shadowed_regs) }, { xelpmp_shadowed_regs, ARRAY_SIZE(xelpmp_shadowed_regs) }, }; @@ -119,8 +118,6 @@ int intel_uncore_mock_selftests(void) { __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true }, { __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true }, { __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true }, - { __xehp_fw_ranges, ARRAY_SIZE(__xehp_fw_ranges), true }, - { __pvc_fw_ranges, ARRAY_SIZE(__pvc_fw_ranges), true }, { __mtl_fw_ranges, ARRAY_SIZE(__mtl_fw_ranges), true }, { __xelpmp_fw_ranges, ARRAY_SIZE(__xelpmp_fw_ranges), true }, }; diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c index 15492b69f6..e3287f1de7 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.c +++ b/drivers/gpu/drm/i915/soc/intel_dram.c @@ -681,6 +681,8 @@ void intel_dram_detect(struct drm_i915_private *i915) if (ret) return; + drm_dbg_kms(&i915->drm, "Num qgv points %u\n", dram_info->num_qgv_points); + drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels); drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n", diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c index ffa195560d..68291412f4 100644 --- a/drivers/gpu/drm/i915/vlv_sideband.c +++ b/drivers/gpu/drm/i915/vlv_sideband.c @@ -9,7 +9,6 @@ #include "vlv_sideband.h" #include "display/intel_dpio_phy.h" -#include "display/intel_display_types.h" /* * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile index ec6db8e9b4..9bc6a3884c 100644 --- a/drivers/gpu/drm/imagination/Makefile +++ b/drivers/gpu/drm/imagination/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only OR MIT # Copyright (c) 2023 Imagination Technologies Ltd. -subdir-ccflags-y := -I$(srctree)/$(src) +subdir-ccflags-y := -I$(src) powervr-y := \ pvr_ccb.o \ diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c index 31199e45b7..73707daa4e 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_trace.c +++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c @@ -12,6 +12,7 @@ #include #include +#include #include #include diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c index 4f99b4af87..94af854547 100644 --- a/drivers/gpu/drm/imagination/pvr_vm_mips.c +++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c @@ -14,6 +14,7 @@ #include #include #include +#include /** * pvr_vm_mips_init() - Initialise MIPS FW pagetable diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c index dade8b59fe..704c549750 100644 --- a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c @@ -773,6 +773,13 @@ static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = { .atomic_update = ipu_plane_atomic_update, }; +static const struct drm_plane_helper_funcs ipu_primary_plane_helper_funcs = { + .atomic_check = ipu_plane_atomic_check, + .atomic_disable = ipu_plane_atomic_disable, + .atomic_update = ipu_plane_atomic_update, + .get_scanout_buffer = drm_fb_dma_get_scanout_buffer, +}; + bool ipu_plane_atomic_update_pending(struct drm_plane *plane) { struct ipu_plane *ipu_plane = to_ipu_plane(plane); @@ -916,7 +923,10 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, ipu_plane->dma = dma; ipu_plane->dp_flow = dp; - drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs); + if (type == DRM_PLANE_TYPE_PRIMARY) + drm_plane_helper_add(&ipu_plane->base, &ipu_primary_plane_helper_funcs); + else + drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs); if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG) ret = drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0, diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index 10fd9154cc..739c865b55 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -371,6 +371,7 @@ static int lima_pdev_probe(struct platform_device *pdev) { struct lima_device *ldev; struct drm_device *ddev; + const struct lima_compatible *comp; int err; err = lima_sched_slab_init(); @@ -384,7 +385,13 @@ static int lima_pdev_probe(struct platform_device *pdev) } ldev->dev = &pdev->dev; - ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev); + comp = of_device_get_match_data(&pdev->dev); + if (!comp) { + err = -ENODEV; + goto err_out0; + } + + ldev->id = comp->id; platform_set_drvdata(pdev, ldev); @@ -459,9 +466,17 @@ static void lima_pdev_remove(struct platform_device *pdev) lima_sched_slab_fini(); } +static const struct lima_compatible lima_mali400_data = { + .id = lima_gpu_mali400, +}; + +static const struct lima_compatible lima_mali450_data = { + .id = lima_gpu_mali450, +}; + static const struct of_device_id dt_match[] = { - { .compatible = "arm,mali-400", .data = (void *)lima_gpu_mali400 }, - { .compatible = "arm,mali-450", .data = (void *)lima_gpu_mali450 }, + { .compatible = "arm,mali-400", .data = &lima_mali400_data }, + { .compatible = "arm,mali-450", .data = &lima_mali450_data }, {} }; MODULE_DEVICE_TABLE(of, dt_match); diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h index c738d28854..6706c19b16 100644 --- a/drivers/gpu/drm/lima/lima_drv.h +++ b/drivers/gpu/drm/lima/lima_drv.h @@ -7,6 +7,7 @@ #include #include "lima_ctx.h" +#include "lima_device.h" extern int lima_sched_timeout_ms; extern uint lima_heap_init_nr_pages; @@ -39,6 +40,10 @@ struct lima_submit { struct lima_sched_task *task; }; +struct lima_compatible { + enum lima_gpu_id id; +}; + static inline struct lima_drm_priv * to_lima_drm_priv(struct drm_file *file) { diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 7ea244d876..9bb997dbb4 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -185,7 +185,7 @@ static int lima_gem_pin(struct drm_gem_object *obj) if (bo->heap_size) return -EINVAL; - return drm_gem_shmem_pin(&bo->base); + return drm_gem_shmem_pin_locked(&bo->base); } static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) diff --git a/drivers/gpu/drm/lima/lima_trace.h b/drivers/gpu/drm/lima/lima_trace.h index 494b9790b1..3a349d1030 100644 --- a/drivers/gpu/drm/lima/lima_trace.h +++ b/drivers/gpu/drm/lima/lima_trace.h @@ -24,7 +24,7 @@ DECLARE_EVENT_CLASS(lima_task, __entry->task_id = task->base.id; __entry->context = task->base.s_fence->finished.context; __entry->seqno = task->base.s_fence->finished.seqno; - __assign_str(pipe, task->base.sched->name); + __assign_str(pipe); ), TP_printk("task=%llu, context=%u seqno=%u pipe=%s", diff --git a/drivers/gpu/drm/loongson/lsdc_crtc.c b/drivers/gpu/drm/loongson/lsdc_crtc.c index 827acab580..03958b79f2 100644 --- a/drivers/gpu/drm/loongson/lsdc_crtc.c +++ b/drivers/gpu/drm/loongson/lsdc_crtc.c @@ -3,6 +3,7 @@ * Copyright (C) 2023 Loongson Technology Corporation Limited */ +#include #include #include diff --git a/drivers/gpu/drm/loongson/lsdc_gem.c b/drivers/gpu/drm/loongson/lsdc_gem.c index 04293df2f0..a720d8f532 100644 --- a/drivers/gpu/drm/loongson/lsdc_gem.c +++ b/drivers/gpu/drm/loongson/lsdc_gem.c @@ -19,33 +19,24 @@ static int lsdc_gem_prime_pin(struct drm_gem_object *obj) struct lsdc_bo *lbo = gem_to_lsdc_bo(obj); int ret; - ret = lsdc_bo_reserve(lbo); - if (unlikely(ret)) - return ret; + dma_resv_assert_held(obj->resv); ret = lsdc_bo_pin(lbo, LSDC_GEM_DOMAIN_GTT, NULL); if (likely(ret == 0)) lbo->sharing_count++; - lsdc_bo_unreserve(lbo); - return ret; } static void lsdc_gem_prime_unpin(struct drm_gem_object *obj) { struct lsdc_bo *lbo = gem_to_lsdc_bo(obj); - int ret; - ret = lsdc_bo_reserve(lbo); - if (unlikely(ret)) - return; + dma_resv_assert_held(obj->resv); lsdc_bo_unpin(lbo); if (lbo->sharing_count) lbo->sharing_count--; - - lsdc_bo_unreserve(lbo); } static struct sg_table *lsdc_gem_prime_get_sg_table(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index 76cab28e01..96cbe020f4 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -26,7 +26,7 @@ config DRM_MEDIATEK_DP select PHY_MTK_DP select DRM_DISPLAY_HELPER select DRM_DISPLAY_DP_HELPER - select DRM_DP_AUX_BUS + select DRM_DISPLAY_DP_AUX_BUS help DRM/KMS Display Port driver for MediaTek SoCs. diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile index 5e4436403b..32a2ed6c0c 100644 --- a/drivers/gpu/drm/mediatek/Makefile +++ b/drivers/gpu/drm/mediatek/Makefile @@ -1,6 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 -mediatek-drm-y := mtk_disp_aal.o \ +mediatek-drm-y := mtk_crtc.o \ + mtk_ddp_comp.o \ + mtk_disp_aal.o \ mtk_disp_ccorr.o \ mtk_disp_color.o \ mtk_disp_gamma.o \ @@ -8,16 +10,14 @@ mediatek-drm-y := mtk_disp_aal.o \ mtk_disp_ovl.o \ mtk_disp_ovl_adaptor.o \ mtk_disp_rdma.o \ - mtk_drm_crtc.o \ - mtk_drm_ddp_comp.o \ mtk_drm_drv.o \ - mtk_drm_gem.o \ - mtk_drm_plane.o \ mtk_dsi.o \ mtk_dpi.o \ mtk_ethdr.o \ + mtk_gem.o \ mtk_mdp_rdma.o \ - mtk_padding.o + mtk_padding.o \ + mtk_plane.o obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c new file mode 100644 index 0000000000..6f34f573e1 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_crtc.c @@ -0,0 +1,1138 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" +#include "mtk_drm_drv.h" +#include "mtk_gem.h" +#include "mtk_plane.h" + +/* + * struct mtk_crtc - MediaTek specific crtc structure. + * @base: crtc object. + * @enabled: records whether crtc_enable succeeded + * @planes: array of 4 drm_plane structures, one for each overlay plane + * @pending_planes: whether any plane has pending changes to be applied + * @mmsys_dev: pointer to the mmsys device for configuration registers + * @mutex: handle to one of the ten disp_mutex streams + * @ddp_comp_nr: number of components in ddp_comp + * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc + * + * TODO: Needs update: this header is missing a bunch of member descriptions. + */ +struct mtk_crtc { + struct drm_crtc base; + bool enabled; + + bool pending_needs_vblank; + struct drm_pending_vblank_event *event; + + struct drm_plane *planes; + unsigned int layer_nr; + bool pending_planes; + bool pending_async_planes; + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct cmdq_client cmdq_client; + struct cmdq_pkt cmdq_handle; + u32 cmdq_event; + u32 cmdq_vblank_cnt; + wait_queue_head_t cb_blocking_queue; +#endif + + struct device *mmsys_dev; + struct device *dma_dev; + struct mtk_mutex *mutex; + unsigned int ddp_comp_nr; + struct mtk_ddp_comp **ddp_comp; + unsigned int num_conn_routes; + const struct mtk_drm_route *conn_routes; + + /* lock for display hardware access */ + struct mutex hw_lock; + bool config_updating; +}; + +struct mtk_crtc_state { + struct drm_crtc_state base; + + bool pending_config; + unsigned int pending_width; + unsigned int pending_height; + unsigned int pending_vrefresh; +}; + +static inline struct mtk_crtc *to_mtk_crtc(struct drm_crtc *c) +{ + return container_of(c, struct mtk_crtc, base); +} + +static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s) +{ + return container_of(s, struct mtk_crtc_state, base); +} + +static void mtk_crtc_finish_page_flip(struct mtk_crtc *mtk_crtc) +{ + struct drm_crtc *crtc = &mtk_crtc->base; + unsigned long flags; + + if (mtk_crtc->event) { + spin_lock_irqsave(&crtc->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, mtk_crtc->event); + drm_crtc_vblank_put(crtc); + mtk_crtc->event = NULL; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } +} + +static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc) +{ + drm_crtc_handle_vblank(&mtk_crtc->base); + if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) { + mtk_crtc_finish_page_flip(mtk_crtc); + mtk_crtc->pending_needs_vblank = false; + } +} + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) +static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, + size_t size) +{ + struct device *dev; + dma_addr_t dma_addr; + + pkt->va_base = kzalloc(size, GFP_KERNEL); + if (!pkt->va_base) + return -ENOMEM; + + pkt->buf_size = size; + pkt->cl = (void *)client; + + dev = client->chan->mbox->dev; + dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size); + kfree(pkt->va_base); + return -ENOMEM; + } + + pkt->pa_base = dma_addr; + + return 0; +} + +static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt) +{ + struct cmdq_client *client = (struct cmdq_client *)pkt->cl; + + dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size, + DMA_TO_DEVICE); + kfree(pkt->va_base); +} +#endif + +static void mtk_crtc_destroy(struct drm_crtc *crtc) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + int i; + + mtk_mutex_put(mtk_crtc->mutex); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle); + + if (mtk_crtc->cmdq_client.chan) { + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; + } +#endif + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + struct mtk_ddp_comp *comp; + + comp = mtk_crtc->ddp_comp[i]; + mtk_ddp_comp_unregister_vblank_cb(comp); + } + + drm_crtc_cleanup(crtc); +} + +static void mtk_crtc_reset(struct drm_crtc *crtc) +{ + struct mtk_crtc_state *state; + + if (crtc->state) + __drm_atomic_helper_crtc_destroy_state(crtc->state); + + kfree(to_mtk_crtc_state(crtc->state)); + crtc->state = NULL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) + __drm_atomic_helper_crtc_reset(crtc, &state->base); +} + +static struct drm_crtc_state *mtk_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct mtk_crtc_state *state; + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); + + WARN_ON(state->base.crtc != crtc); + state->base.crtc = crtc; + state->pending_config = false; + + return &state->base; +} + +static void mtk_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + __drm_atomic_helper_crtc_destroy_state(state); + kfree(to_mtk_crtc_state(state)); +} + +static enum drm_mode_status +mtk_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + enum drm_mode_status status = MODE_OK; + int i; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + status = mtk_ddp_comp_mode_valid(mtk_crtc->ddp_comp[i], mode); + if (status != MODE_OK) + break; + } + return status; +} + +static bool mtk_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + /* Nothing to do here, but this callback is mandatory. */ + return true; +} + +static void mtk_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state); + + state->pending_width = crtc->mode.hdisplay; + state->pending_height = crtc->mode.vdisplay; + state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode); + wmb(); /* Make sure the above parameters are set before update */ + state->pending_config = true; +} + +static int mtk_crtc_ddp_clk_enable(struct mtk_crtc *mtk_crtc) +{ + int ret; + int i; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]); + if (ret) { + DRM_ERROR("Failed to enable clock %d: %d\n", i, ret); + goto err; + } + } + + return 0; +err: + while (--i >= 0) + mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]); + return ret; +} + +static void mtk_crtc_ddp_clk_disable(struct mtk_crtc *mtk_crtc) +{ + int i; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]); +} + +static +struct mtk_ddp_comp *mtk_ddp_comp_for_plane(struct drm_crtc *crtc, + struct drm_plane *plane, + unsigned int *local_layer) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_ddp_comp *comp; + int i, count = 0; + unsigned int local_index = plane - mtk_crtc->planes; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + comp = mtk_crtc->ddp_comp[i]; + if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) { + *local_layer = local_index - count; + return comp; + } + count += mtk_ddp_comp_layer_nr(comp); + } + + WARN(1, "Failed to find component for plane %d\n", plane->index); + return NULL; +} + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) +static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) +{ + struct cmdq_cb_data *data = mssg; + struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client); + struct mtk_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_crtc, cmdq_client); + struct mtk_crtc_state *state; + unsigned int i; + + if (data->sta < 0) + return; + + state = to_mtk_crtc_state(mtk_crtc->base.state); + + state->pending_config = false; + + if (mtk_crtc->pending_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + plane_state->pending.config = false; + } + mtk_crtc->pending_planes = false; + } + + if (mtk_crtc->pending_async_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + plane_state->pending.async_config = false; + } + mtk_crtc->pending_async_planes = false; + } + + mtk_crtc->cmdq_vblank_cnt = 0; + wake_up(&mtk_crtc->cb_blocking_queue); +} +#endif + +static int mtk_crtc_ddp_hw_init(struct mtk_crtc *mtk_crtc) +{ + struct drm_crtc *crtc = &mtk_crtc->base; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct drm_connector_list_iter conn_iter; + unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC; + int ret; + int i; + + if (WARN_ON(!crtc->state)) + return -EINVAL; + + width = crtc->state->adjusted_mode.hdisplay; + height = crtc->state->adjusted_mode.vdisplay; + vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode); + + drm_for_each_encoder(encoder, crtc->dev) { + if (encoder->crtc != crtc) + continue; + + drm_connector_list_iter_begin(crtc->dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->encoder != encoder) + continue; + if (connector->display_info.bpc != 0 && + bpc > connector->display_info.bpc) + bpc = connector->display_info.bpc; + } + drm_connector_list_iter_end(&conn_iter); + } + + ret = pm_runtime_resume_and_get(crtc->dev->dev); + if (ret < 0) { + DRM_ERROR("Failed to enable power domain: %d\n", ret); + return ret; + } + + ret = mtk_mutex_prepare(mtk_crtc->mutex); + if (ret < 0) { + DRM_ERROR("Failed to enable mutex clock: %d\n", ret); + goto err_pm_runtime_put; + } + + ret = mtk_crtc_ddp_clk_enable(mtk_crtc); + if (ret < 0) { + DRM_ERROR("Failed to enable component clocks: %d\n", ret); + goto err_mutex_unprepare; + } + + for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) { + if (!mtk_ddp_comp_connect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev, + mtk_crtc->ddp_comp[i + 1]->id)) + mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev, + mtk_crtc->ddp_comp[i]->id, + mtk_crtc->ddp_comp[i + 1]->id); + if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) + mtk_mutex_add_comp(mtk_crtc->mutex, + mtk_crtc->ddp_comp[i]->id); + } + if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) + mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id); + mtk_mutex_enable(mtk_crtc->mutex); + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i]; + + if (i == 1) + mtk_ddp_comp_bgclr_in_on(comp); + + mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL); + mtk_ddp_comp_start(comp); + } + + /* Initially configure all planes */ + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + struct mtk_ddp_comp *comp; + unsigned int local_layer; + + plane_state = to_mtk_plane_state(plane->state); + + /* should not enable layer before crtc enabled */ + plane_state->pending.enable = false; + comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer); + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, NULL); + } + + return 0; + +err_mutex_unprepare: + mtk_mutex_unprepare(mtk_crtc->mutex); +err_pm_runtime_put: + pm_runtime_put(crtc->dev->dev); + return ret; +} + +static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc) +{ + struct drm_device *drm = mtk_crtc->base.dev; + struct drm_crtc *crtc = &mtk_crtc->base; + int i; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]); + if (i == 1) + mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]); + } + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) + mtk_mutex_remove_comp(mtk_crtc->mutex, + mtk_crtc->ddp_comp[i]->id); + mtk_mutex_disable(mtk_crtc->mutex); + for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) { + if (!mtk_ddp_comp_disconnect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev, + mtk_crtc->ddp_comp[i + 1]->id)) + mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev, + mtk_crtc->ddp_comp[i]->id, + mtk_crtc->ddp_comp[i + 1]->id); + if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) + mtk_mutex_remove_comp(mtk_crtc->mutex, + mtk_crtc->ddp_comp[i]->id); + } + if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) + mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id); + mtk_crtc_ddp_clk_disable(mtk_crtc); + mtk_mutex_unprepare(mtk_crtc->mutex); + + pm_runtime_put(drm->dev); + + if (crtc->state->event && !crtc->state->active) { + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irq(&crtc->dev->event_lock); + } +} + +static void mtk_crtc_ddp_config(struct drm_crtc *crtc, + struct cmdq_pkt *cmdq_handle) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; + unsigned int i; + unsigned int local_layer; + + /* + * TODO: instead of updating the registers here, we should prepare + * working registers in atomic_commit and let the hardware command + * queue update module registers on vblank. + */ + if (state->pending_config) { + mtk_ddp_comp_config(comp, state->pending_width, + state->pending_height, + state->pending_vrefresh, 0, + cmdq_handle); + + if (!cmdq_handle) + state->pending_config = false; + } + + if (mtk_crtc->pending_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + if (!plane_state->pending.config) + continue; + + comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer); + + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, + cmdq_handle); + if (!cmdq_handle) + plane_state->pending.config = false; + } + + if (!cmdq_handle) + mtk_crtc->pending_planes = false; + } + + if (mtk_crtc->pending_async_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + if (!plane_state->pending.async_config) + continue; + + comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer); + + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, + cmdq_handle); + if (!cmdq_handle) + plane_state->pending.async_config = false; + } + + if (!cmdq_handle) + mtk_crtc->pending_async_planes = false; + } +} + +static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle; +#endif + struct drm_crtc *crtc = &mtk_crtc->base; + struct mtk_drm_private *priv = crtc->dev->dev_private; + unsigned int pending_planes = 0, pending_async_planes = 0; + int i; + + mutex_lock(&mtk_crtc->hw_lock); + mtk_crtc->config_updating = true; + if (needs_vblank) + mtk_crtc->pending_needs_vblank = true; + + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + if (plane_state->pending.dirty) { + plane_state->pending.config = true; + plane_state->pending.dirty = false; + pending_planes |= BIT(i); + } else if (plane_state->pending.async_dirty) { + plane_state->pending.async_config = true; + plane_state->pending.async_dirty = false; + pending_async_planes |= BIT(i); + } + } + if (pending_planes) + mtk_crtc->pending_planes = true; + if (pending_async_planes) + mtk_crtc->pending_async_planes = true; + + if (priv->data->shadow_register) { + mtk_mutex_acquire(mtk_crtc->mutex); + mtk_crtc_ddp_config(crtc, NULL); + mtk_mutex_release(mtk_crtc->mutex); + } +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (mtk_crtc->cmdq_client.chan) { + mbox_flush(mtk_crtc->cmdq_client.chan, 2000); + cmdq_handle->cmd_buf_size = 0; + cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); + cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); + mtk_crtc_ddp_config(crtc, cmdq_handle); + cmdq_pkt_finalize(cmdq_handle); + dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev, + cmdq_handle->pa_base, + cmdq_handle->cmd_buf_size, + DMA_TO_DEVICE); + /* + * CMDQ command should execute in next 3 vblank. + * One vblank interrupt before send message (occasionally) + * and one vblank interrupt after cmdq done, + * so it's timeout after 3 vblank interrupt. + * If it fail to execute in next 3 vblank, timeout happen. + */ + mtk_crtc->cmdq_vblank_cnt = 3; + + mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle); + mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); + } +#endif + mtk_crtc->config_updating = false; + mutex_unlock(&mtk_crtc->hw_lock); +} + +static void mtk_crtc_ddp_irq(void *data) +{ + struct drm_crtc *crtc = data; + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_drm_private *priv = crtc->dev->dev_private; + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan) + mtk_crtc_ddp_config(crtc, NULL); + else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0) + DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n", + drm_crtc_index(&mtk_crtc->base)); +#else + if (!priv->data->shadow_register) + mtk_crtc_ddp_config(crtc, NULL); +#endif + mtk_drm_finish_page_flip(mtk_crtc); +} + +static int mtk_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; + + mtk_ddp_comp_enable_vblank(comp); + + return 0; +} + +static void mtk_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; + + mtk_ddp_comp_disable_vblank(comp); +} + +static void mtk_crtc_update_output(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + int crtc_index = drm_crtc_index(crtc); + int i; + struct device *dev; + struct drm_crtc_state *crtc_state = state->crtcs[crtc_index].new_state; + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_drm_private *priv; + unsigned int encoder_mask = crtc_state->encoder_mask; + + if (!crtc_state->connectors_changed) + return; + + if (!mtk_crtc->num_conn_routes) + return; + + priv = ((struct mtk_drm_private *)crtc->dev->dev_private)->all_drm_private[crtc_index]; + dev = priv->dev; + + dev_dbg(dev, "connector change:%d, encoder mask:0x%x for crtc:%d\n", + crtc_state->connectors_changed, encoder_mask, crtc_index); + + for (i = 0; i < mtk_crtc->num_conn_routes; i++) { + unsigned int comp_id = mtk_crtc->conn_routes[i].route_ddp; + struct mtk_ddp_comp *comp = &priv->ddp_comp[comp_id]; + + if (comp->encoder_index >= 0 && + (encoder_mask & BIT(comp->encoder_index))) { + mtk_crtc->ddp_comp[mtk_crtc->ddp_comp_nr - 1] = comp; + dev_dbg(dev, "Add comp_id: %d at path index %d\n", + comp->id, mtk_crtc->ddp_comp_nr - 1); + break; + } + } +} + +int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, + struct mtk_plane_state *state) +{ + unsigned int local_layer; + struct mtk_ddp_comp *comp; + + comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer); + if (comp) + return mtk_ddp_comp_layer_check(comp, local_layer, state); + return 0; +} + +void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + + if (!mtk_crtc->enabled) + return; + + mtk_crtc_update_config(mtk_crtc, false); +} + +static void mtk_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; + int ret; + + DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); + + ret = mtk_ddp_comp_power_on(comp); + if (ret < 0) { + DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret); + return; + } + + mtk_crtc_update_output(crtc, state); + + ret = mtk_crtc_ddp_hw_init(mtk_crtc); + if (ret) { + mtk_ddp_comp_power_off(comp); + return; + } + + drm_crtc_vblank_on(crtc); + mtk_crtc->enabled = true; +} + +static void mtk_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; + int i; + + DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); + if (!mtk_crtc->enabled) + return; + + /* Set all pending plane state to disabled */ + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + plane_state->pending.enable = false; + plane_state->pending.config = true; + } + mtk_crtc->pending_planes = true; + + mtk_crtc_update_config(mtk_crtc, false); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + /* Wait for planes to be disabled by cmdq */ + if (mtk_crtc->cmdq_client.chan) + wait_event_timeout(mtk_crtc->cb_blocking_queue, + mtk_crtc->cmdq_vblank_cnt == 0, + msecs_to_jiffies(500)); +#endif + /* Wait for planes to be disabled */ + drm_crtc_wait_one_vblank(crtc); + + drm_crtc_vblank_off(crtc); + mtk_crtc_ddp_hw_fini(mtk_crtc); + mtk_ddp_comp_power_off(comp); + + mtk_crtc->enabled = false; +} + +static void mtk_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state); + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + unsigned long flags; + + if (mtk_crtc->event && mtk_crtc_state->base.event) + DRM_ERROR("new event while there is still a pending event\n"); + + if (mtk_crtc_state->base.event) { + mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc); + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + mtk_crtc->event = mtk_crtc_state->base.event; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + mtk_crtc_state->base.event = NULL; + } +} + +static void mtk_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + int i; + + if (crtc->state->color_mgmt_changed) + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state); + mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state); + } + mtk_crtc_update_config(mtk_crtc, !!mtk_crtc->event); +} + +static const struct drm_crtc_funcs mtk_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .destroy = mtk_crtc_destroy, + .reset = mtk_crtc_reset, + .atomic_duplicate_state = mtk_crtc_duplicate_state, + .atomic_destroy_state = mtk_crtc_destroy_state, + .enable_vblank = mtk_crtc_enable_vblank, + .disable_vblank = mtk_crtc_disable_vblank, +}; + +static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = { + .mode_fixup = mtk_crtc_mode_fixup, + .mode_set_nofb = mtk_crtc_mode_set_nofb, + .mode_valid = mtk_crtc_mode_valid, + .atomic_begin = mtk_crtc_atomic_begin, + .atomic_flush = mtk_crtc_atomic_flush, + .atomic_enable = mtk_crtc_atomic_enable, + .atomic_disable = mtk_crtc_atomic_disable, +}; + +static int mtk_crtc_init(struct drm_device *drm, struct mtk_crtc *mtk_crtc, + unsigned int pipe) +{ + struct drm_plane *primary = NULL; + struct drm_plane *cursor = NULL; + int i, ret; + + for (i = 0; i < mtk_crtc->layer_nr; i++) { + if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY) + primary = &mtk_crtc->planes[i]; + else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR) + cursor = &mtk_crtc->planes[i]; + } + + ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor, + &mtk_crtc_funcs, NULL); + if (ret) + goto err_cleanup_crtc; + + drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs); + + return 0; + +err_cleanup_crtc: + drm_crtc_cleanup(&mtk_crtc->base); + return ret; +} + +static int mtk_crtc_num_comp_planes(struct mtk_crtc *mtk_crtc, int comp_idx) +{ + struct mtk_ddp_comp *comp; + + if (comp_idx > 1) + return 0; + + comp = mtk_crtc->ddp_comp[comp_idx]; + if (!comp->funcs) + return 0; + + if (comp_idx == 1 && !comp->funcs->bgclr_in_on) + return 0; + + return mtk_ddp_comp_layer_nr(comp); +} + +static inline +enum drm_plane_type mtk_crtc_plane_type(unsigned int plane_idx, + unsigned int num_planes) +{ + if (plane_idx == 0) + return DRM_PLANE_TYPE_PRIMARY; + else if (plane_idx == (num_planes - 1)) + return DRM_PLANE_TYPE_CURSOR; + else + return DRM_PLANE_TYPE_OVERLAY; + +} + +static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev, + struct mtk_crtc *mtk_crtc, + int comp_idx, int pipe) +{ + int num_planes = mtk_crtc_num_comp_planes(mtk_crtc, comp_idx); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx]; + int i, ret; + + for (i = 0; i < num_planes; i++) { + ret = mtk_plane_init(drm_dev, + &mtk_crtc->planes[mtk_crtc->layer_nr], + BIT(pipe), + mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes), + mtk_ddp_comp_supported_rotations(comp), + mtk_ddp_comp_get_formats(comp), + mtk_ddp_comp_get_num_formats(comp)); + if (ret) + return ret; + + mtk_crtc->layer_nr++; + } + return 0; +} + +struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc) +{ + struct mtk_crtc *mtk_crtc = NULL; + + if (!crtc) + return NULL; + + mtk_crtc = to_mtk_crtc(crtc); + if (!mtk_crtc) + return NULL; + + return mtk_crtc->dma_dev; +} + +int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path, + unsigned int path_len, int priv_data_index, + const struct mtk_drm_route *conn_routes, + unsigned int num_conn_routes) +{ + struct mtk_drm_private *priv = drm_dev->dev_private; + struct device *dev = drm_dev->dev; + struct mtk_crtc *mtk_crtc; + unsigned int num_comp_planes = 0; + int ret; + int i; + bool has_ctm = false; + uint gamma_lut_size = 0; + struct drm_crtc *tmp; + int crtc_i = 0; + + if (!path) + return 0; + + priv = priv->all_drm_private[priv_data_index]; + + drm_for_each_crtc(tmp, drm_dev) + crtc_i++; + + for (i = 0; i < path_len; i++) { + enum mtk_ddp_comp_id comp_id = path[i]; + struct device_node *node; + struct mtk_ddp_comp *comp; + + node = priv->comp_node[comp_id]; + comp = &priv->ddp_comp[comp_id]; + + /* Not all drm components have a DTS device node, such as ovl_adaptor, + * which is the drm bring up sub driver + */ + if (!node && comp_id != DDP_COMPONENT_DRM_OVL_ADAPTOR) { + dev_info(dev, + "Not creating crtc %d because component %d is disabled or missing\n", + crtc_i, comp_id); + return 0; + } + + if (!comp->dev) { + dev_err(dev, "Component %pOF not initialized\n", node); + return -ENODEV; + } + } + + mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL); + if (!mtk_crtc) + return -ENOMEM; + + mtk_crtc->mmsys_dev = priv->mmsys_dev; + mtk_crtc->ddp_comp_nr = path_len; + mtk_crtc->ddp_comp = devm_kcalloc(dev, + mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0), + sizeof(*mtk_crtc->ddp_comp), + GFP_KERNEL); + if (!mtk_crtc->ddp_comp) + return -ENOMEM; + + mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev); + if (IS_ERR(mtk_crtc->mutex)) { + ret = PTR_ERR(mtk_crtc->mutex); + dev_err(dev, "Failed to get mutex: %d\n", ret); + return ret; + } + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + unsigned int comp_id = path[i]; + struct mtk_ddp_comp *comp; + + comp = &priv->ddp_comp[comp_id]; + mtk_crtc->ddp_comp[i] = comp; + + if (comp->funcs) { + if (comp->funcs->gamma_set && comp->funcs->gamma_get_lut_size) { + unsigned int lut_sz = mtk_ddp_gamma_get_lut_size(comp); + + if (lut_sz) + gamma_lut_size = lut_sz; + } + + if (comp->funcs->ctm_set) + has_ctm = true; + } + + mtk_ddp_comp_register_vblank_cb(comp, mtk_crtc_ddp_irq, + &mtk_crtc->base); + } + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + num_comp_planes += mtk_crtc_num_comp_planes(mtk_crtc, i); + + mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes, + sizeof(struct drm_plane), GFP_KERNEL); + if (!mtk_crtc->planes) + return -ENOMEM; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + ret = mtk_crtc_init_comp_planes(drm_dev, mtk_crtc, i, crtc_i); + if (ret) + return ret; + } + + /* + * Default to use the first component as the dma dev. + * In the case of ovl_adaptor sub driver, it needs to use the + * dma_dev_get function to get representative dma dev. + */ + mtk_crtc->dma_dev = mtk_ddp_comp_dma_dev_get(&priv->ddp_comp[path[0]]); + + ret = mtk_crtc_init(drm_dev, mtk_crtc, crtc_i); + if (ret < 0) + return ret; + + if (gamma_lut_size) + drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size); + drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size); + mutex_init(&mtk_crtc->hw_lock); + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + i = priv->mbox_index++; + mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev; + mtk_crtc->cmdq_client.client.tx_block = false; + mtk_crtc->cmdq_client.client.knows_txdone = true; + mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb; + mtk_crtc->cmdq_client.chan = + mbox_request_channel(&mtk_crtc->cmdq_client.client, i); + if (IS_ERR(mtk_crtc->cmdq_client.chan)) { + dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", + drm_crtc_index(&mtk_crtc->base)); + mtk_crtc->cmdq_client.chan = NULL; + } + + if (mtk_crtc->cmdq_client.chan) { + ret = of_property_read_u32_index(priv->mutex_node, + "mediatek,gce-events", + i, + &mtk_crtc->cmdq_event); + if (ret) { + dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", + drm_crtc_index(&mtk_crtc->base)); + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; + } else { + ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client, + &mtk_crtc->cmdq_handle, + PAGE_SIZE); + if (ret) { + dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n", + drm_crtc_index(&mtk_crtc->base)); + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; + } + } + + /* for sending blocking cmd in crtc disable */ + init_waitqueue_head(&mtk_crtc->cb_blocking_queue); + } +#endif + + if (conn_routes) { + for (i = 0; i < num_conn_routes; i++) { + unsigned int comp_id = conn_routes[i].route_ddp; + struct device_node *node = priv->comp_node[comp_id]; + struct mtk_ddp_comp *comp = &priv->ddp_comp[comp_id]; + + if (!comp->dev) { + dev_dbg(dev, "comp_id:%d, Component %pOF not initialized\n", + comp_id, node); + /* mark encoder_index to -1, if route comp device is not enabled */ + comp->encoder_index = -1; + continue; + } + + mtk_ddp_comp_encoder_index_set(&priv->ddp_comp[comp_id]); + } + + mtk_crtc->num_conn_routes = num_conn_routes; + mtk_crtc->conn_routes = conn_routes; + + /* increase ddp_comp_nr at the end of mtk_crtc_create */ + mtk_crtc->ddp_comp_nr++; + } + + return 0; +} diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.h b/drivers/gpu/drm/mediatek/mtk_crtc.h new file mode 100644 index 0000000000..388e900b6f --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_crtc.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#ifndef MTK_CRTC_H +#define MTK_CRTC_H + +#include +#include "mtk_ddp_comp.h" +#include "mtk_drm_drv.h" +#include "mtk_plane.h" + +#define MTK_MAX_BPC 10 +#define MTK_MIN_BPC 3 + +void mtk_crtc_commit(struct drm_crtc *crtc); +int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path, + unsigned int path_len, int priv_data_index, + const struct mtk_drm_route *conn_routes, + unsigned int num_conn_routes); +int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, + struct mtk_plane_state *state); +void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, + struct drm_atomic_state *plane_state); +struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc); + +#endif /* MTK_CRTC_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c new file mode 100644 index 0000000000..be66d94be3 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c @@ -0,0 +1,684 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 MediaTek Inc. + * Authors: + * YT Shen + * CK Hu + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" +#include "mtk_drm_drv.h" +#include "mtk_plane.h" + + +#define DISP_REG_DITHER_EN 0x0000 +#define DITHER_EN BIT(0) +#define DISP_REG_DITHER_CFG 0x0020 +#define DITHER_RELAY_MODE BIT(0) +#define DITHER_ENGINE_EN BIT(1) +#define DISP_DITHERING BIT(2) +#define DISP_REG_DITHER_SIZE 0x0030 +#define DISP_REG_DITHER_5 0x0114 +#define DISP_REG_DITHER_7 0x011c +#define DISP_REG_DITHER_15 0x013c +#define DITHER_LSB_ERR_SHIFT_R(x) (((x) & 0x7) << 28) +#define DITHER_ADD_LSHIFT_R(x) (((x) & 0x7) << 20) +#define DITHER_NEW_BIT_MODE BIT(0) +#define DISP_REG_DITHER_16 0x0140 +#define DITHER_LSB_ERR_SHIFT_B(x) (((x) & 0x7) << 28) +#define DITHER_ADD_LSHIFT_B(x) (((x) & 0x7) << 20) +#define DITHER_LSB_ERR_SHIFT_G(x) (((x) & 0x7) << 12) +#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) + +#define DISP_REG_DSC_CON 0x0000 +#define DSC_EN BIT(0) +#define DSC_DUAL_INOUT BIT(2) +#define DSC_BYPASS BIT(4) +#define DSC_UFOE_SEL BIT(16) + +#define DISP_REG_OD_EN 0x0000 +#define DISP_REG_OD_CFG 0x0020 +#define OD_RELAYMODE BIT(0) +#define DISP_REG_OD_SIZE 0x0030 + +#define DISP_REG_POSTMASK_EN 0x0000 +#define POSTMASK_EN BIT(0) +#define DISP_REG_POSTMASK_CFG 0x0020 +#define POSTMASK_RELAY_MODE BIT(0) +#define DISP_REG_POSTMASK_SIZE 0x0030 + +#define DISP_REG_UFO_START 0x0000 +#define UFO_BYPASS BIT(2) + +struct mtk_ddp_comp_dev { + struct clk *clk; + void __iomem *regs; + struct cmdq_client_reg cmdq_reg; +}; + +void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct cmdq_client_reg *cmdq_reg, void __iomem *regs, + unsigned int offset) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) + cmdq_pkt_write(cmdq_pkt, cmdq_reg->subsys, + cmdq_reg->offset + offset, value); + else +#endif + writel(value, regs + offset); +} + +void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct cmdq_client_reg *cmdq_reg, void __iomem *regs, + unsigned int offset) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) + cmdq_pkt_write(cmdq_pkt, cmdq_reg->subsys, + cmdq_reg->offset + offset, value); + else +#endif + writel_relaxed(value, regs + offset); +} + +void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct cmdq_client_reg *cmdq_reg, void __iomem *regs, + unsigned int offset, unsigned int mask) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) { + cmdq_pkt_write_mask(cmdq_pkt, cmdq_reg->subsys, + cmdq_reg->offset + offset, value, mask); + } else { +#endif + u32 tmp = readl(regs + offset); + + tmp = (tmp & ~mask) | (value & mask); + writel(tmp, regs + offset); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + } +#endif +} + +static int mtk_ddp_clk_enable(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + return clk_prepare_enable(priv->clk); +} + +static void mtk_ddp_clk_disable(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + clk_disable_unprepare(priv->clk); +} + +void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg, + unsigned int bpc, unsigned int cfg, + unsigned int dither_en, struct cmdq_pkt *cmdq_pkt) +{ + /* If bpc equal to 0, the dithering function didn't be enabled */ + if (bpc == 0) + return; + + if (bpc >= MTK_MIN_BPC) { + mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_5); + mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_7); + mtk_ddp_write(cmdq_pkt, + DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) | + DITHER_NEW_BIT_MODE, + cmdq_reg, regs, DISP_REG_DITHER_15); + mtk_ddp_write(cmdq_pkt, + DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) | + DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc), + cmdq_reg, regs, DISP_REG_DITHER_16); + mtk_ddp_write(cmdq_pkt, dither_en, cmdq_reg, regs, cfg); + } +} + +static void mtk_dither_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE); + mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, + DISP_REG_DITHER_CFG); + mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG, + DITHER_ENGINE_EN, cmdq_pkt); +} + +static void mtk_dither_start(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel(DITHER_EN, priv->regs + DISP_REG_DITHER_EN); +} + +static void mtk_dither_stop(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel_relaxed(0x0, priv->regs + DISP_REG_DITHER_EN); +} + +static void mtk_dither_set(struct device *dev, unsigned int bpc, + unsigned int cfg, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, cfg, + DISP_DITHERING, cmdq_pkt); +} + +static void mtk_dsc_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + /* dsc bypass mode */ + mtk_ddp_write_mask(cmdq_pkt, DSC_BYPASS, &priv->cmdq_reg, priv->regs, + DISP_REG_DSC_CON, DSC_BYPASS); + mtk_ddp_write_mask(cmdq_pkt, DSC_UFOE_SEL, &priv->cmdq_reg, priv->regs, + DISP_REG_DSC_CON, DSC_UFOE_SEL); + mtk_ddp_write_mask(cmdq_pkt, DSC_DUAL_INOUT, &priv->cmdq_reg, priv->regs, + DISP_REG_DSC_CON, DSC_DUAL_INOUT); +} + +static void mtk_dsc_start(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + /* write with mask to reserve the value set in mtk_dsc_config */ + mtk_ddp_write_mask(NULL, DSC_EN, &priv->cmdq_reg, priv->regs, DISP_REG_DSC_CON, DSC_EN); +} + +static void mtk_dsc_stop(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel_relaxed(0x0, priv->regs + DISP_REG_DSC_CON); +} + +static void mtk_od_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_OD_SIZE); + mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_REG_OD_CFG); + mtk_dither_set(dev, bpc, DISP_REG_OD_CFG, cmdq_pkt); +} + +static void mtk_od_start(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel(1, priv->regs + DISP_REG_OD_EN); +} + +static void mtk_postmask_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, + DISP_REG_POSTMASK_SIZE); + mtk_ddp_write(cmdq_pkt, POSTMASK_RELAY_MODE, &priv->cmdq_reg, + priv->regs, DISP_REG_POSTMASK_CFG); +} + +static void mtk_postmask_start(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel(POSTMASK_EN, priv->regs + DISP_REG_POSTMASK_EN); +} + +static void mtk_postmask_stop(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel_relaxed(0x0, priv->regs + DISP_REG_POSTMASK_EN); +} + +static void mtk_ufoe_start(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START); +} + +static const struct mtk_ddp_comp_funcs ddp_aal = { + .clk_enable = mtk_aal_clk_enable, + .clk_disable = mtk_aal_clk_disable, + .gamma_get_lut_size = mtk_aal_gamma_get_lut_size, + .gamma_set = mtk_aal_gamma_set, + .config = mtk_aal_config, + .start = mtk_aal_start, + .stop = mtk_aal_stop, +}; + +static const struct mtk_ddp_comp_funcs ddp_ccorr = { + .clk_enable = mtk_ccorr_clk_enable, + .clk_disable = mtk_ccorr_clk_disable, + .config = mtk_ccorr_config, + .start = mtk_ccorr_start, + .stop = mtk_ccorr_stop, + .ctm_set = mtk_ccorr_ctm_set, +}; + +static const struct mtk_ddp_comp_funcs ddp_color = { + .clk_enable = mtk_color_clk_enable, + .clk_disable = mtk_color_clk_disable, + .config = mtk_color_config, + .start = mtk_color_start, +}; + +static const struct mtk_ddp_comp_funcs ddp_dither = { + .clk_enable = mtk_ddp_clk_enable, + .clk_disable = mtk_ddp_clk_disable, + .config = mtk_dither_config, + .start = mtk_dither_start, + .stop = mtk_dither_stop, +}; + +static const struct mtk_ddp_comp_funcs ddp_dpi = { + .start = mtk_dpi_start, + .stop = mtk_dpi_stop, + .encoder_index = mtk_dpi_encoder_index, +}; + +static const struct mtk_ddp_comp_funcs ddp_dsc = { + .clk_enable = mtk_ddp_clk_enable, + .clk_disable = mtk_ddp_clk_disable, + .config = mtk_dsc_config, + .start = mtk_dsc_start, + .stop = mtk_dsc_stop, +}; + +static const struct mtk_ddp_comp_funcs ddp_dsi = { + .start = mtk_dsi_ddp_start, + .stop = mtk_dsi_ddp_stop, + .encoder_index = mtk_dsi_encoder_index, +}; + +static const struct mtk_ddp_comp_funcs ddp_gamma = { + .clk_enable = mtk_gamma_clk_enable, + .clk_disable = mtk_gamma_clk_disable, + .gamma_get_lut_size = mtk_gamma_get_lut_size, + .gamma_set = mtk_gamma_set, + .config = mtk_gamma_config, + .start = mtk_gamma_start, + .stop = mtk_gamma_stop, +}; + +static const struct mtk_ddp_comp_funcs ddp_merge = { + .clk_enable = mtk_merge_clk_enable, + .clk_disable = mtk_merge_clk_disable, + .start = mtk_merge_start, + .stop = mtk_merge_stop, + .config = mtk_merge_config, +}; + +static const struct mtk_ddp_comp_funcs ddp_od = { + .clk_enable = mtk_ddp_clk_enable, + .clk_disable = mtk_ddp_clk_disable, + .config = mtk_od_config, + .start = mtk_od_start, +}; + +static const struct mtk_ddp_comp_funcs ddp_ovl = { + .clk_enable = mtk_ovl_clk_enable, + .clk_disable = mtk_ovl_clk_disable, + .config = mtk_ovl_config, + .start = mtk_ovl_start, + .stop = mtk_ovl_stop, + .register_vblank_cb = mtk_ovl_register_vblank_cb, + .unregister_vblank_cb = mtk_ovl_unregister_vblank_cb, + .enable_vblank = mtk_ovl_enable_vblank, + .disable_vblank = mtk_ovl_disable_vblank, + .supported_rotations = mtk_ovl_supported_rotations, + .layer_nr = mtk_ovl_layer_nr, + .layer_check = mtk_ovl_layer_check, + .layer_config = mtk_ovl_layer_config, + .bgclr_in_on = mtk_ovl_bgclr_in_on, + .bgclr_in_off = mtk_ovl_bgclr_in_off, + .get_formats = mtk_ovl_get_formats, + .get_num_formats = mtk_ovl_get_num_formats, +}; + +static const struct mtk_ddp_comp_funcs ddp_postmask = { + .clk_enable = mtk_ddp_clk_enable, + .clk_disable = mtk_ddp_clk_disable, + .config = mtk_postmask_config, + .start = mtk_postmask_start, + .stop = mtk_postmask_stop, +}; + +static const struct mtk_ddp_comp_funcs ddp_rdma = { + .clk_enable = mtk_rdma_clk_enable, + .clk_disable = mtk_rdma_clk_disable, + .config = mtk_rdma_config, + .start = mtk_rdma_start, + .stop = mtk_rdma_stop, + .register_vblank_cb = mtk_rdma_register_vblank_cb, + .unregister_vblank_cb = mtk_rdma_unregister_vblank_cb, + .enable_vblank = mtk_rdma_enable_vblank, + .disable_vblank = mtk_rdma_disable_vblank, + .layer_nr = mtk_rdma_layer_nr, + .layer_config = mtk_rdma_layer_config, + .get_formats = mtk_rdma_get_formats, + .get_num_formats = mtk_rdma_get_num_formats, +}; + +static const struct mtk_ddp_comp_funcs ddp_ufoe = { + .clk_enable = mtk_ddp_clk_enable, + .clk_disable = mtk_ddp_clk_disable, + .start = mtk_ufoe_start, +}; + +static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = { + .power_on = mtk_ovl_adaptor_power_on, + .power_off = mtk_ovl_adaptor_power_off, + .clk_enable = mtk_ovl_adaptor_clk_enable, + .clk_disable = mtk_ovl_adaptor_clk_disable, + .config = mtk_ovl_adaptor_config, + .start = mtk_ovl_adaptor_start, + .stop = mtk_ovl_adaptor_stop, + .layer_nr = mtk_ovl_adaptor_layer_nr, + .layer_config = mtk_ovl_adaptor_layer_config, + .register_vblank_cb = mtk_ovl_adaptor_register_vblank_cb, + .unregister_vblank_cb = mtk_ovl_adaptor_unregister_vblank_cb, + .enable_vblank = mtk_ovl_adaptor_enable_vblank, + .disable_vblank = mtk_ovl_adaptor_disable_vblank, + .dma_dev_get = mtk_ovl_adaptor_dma_dev_get, + .connect = mtk_ovl_adaptor_connect, + .disconnect = mtk_ovl_adaptor_disconnect, + .add = mtk_ovl_adaptor_add_comp, + .remove = mtk_ovl_adaptor_remove_comp, + .get_formats = mtk_ovl_adaptor_get_formats, + .get_num_formats = mtk_ovl_adaptor_get_num_formats, + .mode_valid = mtk_ovl_adaptor_mode_valid, +}; + +static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = { + [MTK_DISP_AAL] = "aal", + [MTK_DISP_BLS] = "bls", + [MTK_DISP_CCORR] = "ccorr", + [MTK_DISP_COLOR] = "color", + [MTK_DISP_DITHER] = "dither", + [MTK_DISP_DSC] = "dsc", + [MTK_DISP_GAMMA] = "gamma", + [MTK_DISP_MERGE] = "merge", + [MTK_DISP_MUTEX] = "mutex", + [MTK_DISP_OD] = "od", + [MTK_DISP_OVL] = "ovl", + [MTK_DISP_OVL_2L] = "ovl-2l", + [MTK_DISP_OVL_ADAPTOR] = "ovl_adaptor", + [MTK_DISP_POSTMASK] = "postmask", + [MTK_DISP_PWM] = "pwm", + [MTK_DISP_RDMA] = "rdma", + [MTK_DISP_UFOE] = "ufoe", + [MTK_DISP_WDMA] = "wdma", + [MTK_DP_INTF] = "dp-intf", + [MTK_DPI] = "dpi", + [MTK_DSI] = "dsi", +}; + +struct mtk_ddp_comp_match { + enum mtk_ddp_comp_type type; + int alias_id; + const struct mtk_ddp_comp_funcs *funcs; +}; + +static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_DRM_ID_MAX] = { + [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal }, + [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal }, + [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, + [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr }, + [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color }, + [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color }, + [DDP_COMPONENT_DITHER0] = { MTK_DISP_DITHER, 0, &ddp_dither }, + [DDP_COMPONENT_DP_INTF0] = { MTK_DP_INTF, 0, &ddp_dpi }, + [DDP_COMPONENT_DP_INTF1] = { MTK_DP_INTF, 1, &ddp_dpi }, + [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi }, + [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi }, + [DDP_COMPONENT_DRM_OVL_ADAPTOR] = { MTK_DISP_OVL_ADAPTOR, 0, &ddp_ovl_adaptor }, + [DDP_COMPONENT_DSC0] = { MTK_DISP_DSC, 0, &ddp_dsc }, + [DDP_COMPONENT_DSC1] = { MTK_DISP_DSC, 1, &ddp_dsc }, + [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi }, + [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi }, + [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi }, + [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi }, + [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma }, + [DDP_COMPONENT_MERGE0] = { MTK_DISP_MERGE, 0, &ddp_merge }, + [DDP_COMPONENT_MERGE1] = { MTK_DISP_MERGE, 1, &ddp_merge }, + [DDP_COMPONENT_MERGE2] = { MTK_DISP_MERGE, 2, &ddp_merge }, + [DDP_COMPONENT_MERGE3] = { MTK_DISP_MERGE, 3, &ddp_merge }, + [DDP_COMPONENT_MERGE4] = { MTK_DISP_MERGE, 4, &ddp_merge }, + [DDP_COMPONENT_MERGE5] = { MTK_DISP_MERGE, 5, &ddp_merge }, + [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od }, + [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od }, + [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl }, + [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl }, + [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl }, + [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl }, + [DDP_COMPONENT_OVL_2L2] = { MTK_DISP_OVL_2L, 2, &ddp_ovl }, + [DDP_COMPONENT_POSTMASK0] = { MTK_DISP_POSTMASK, 0, &ddp_postmask }, + [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL }, + [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL }, + [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL }, + [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma }, + [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma }, + [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma }, + [DDP_COMPONENT_RDMA4] = { MTK_DISP_RDMA, 4, &ddp_rdma }, + [DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe }, + [DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL }, + [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, +}; + +static bool mtk_ddp_comp_find(struct device *dev, + const unsigned int *path, + unsigned int path_len, + struct mtk_ddp_comp *ddp_comp) +{ + unsigned int i; + + if (path == NULL) + return false; + + for (i = 0U; i < path_len; i++) + if (dev == ddp_comp[path[i]].dev) + return true; + + return false; +} + +static int mtk_ddp_comp_find_in_route(struct device *dev, + const struct mtk_drm_route *routes, + unsigned int num_routes, + struct mtk_ddp_comp *ddp_comp) +{ + unsigned int i; + + if (!routes) + return -EINVAL; + + for (i = 0; i < num_routes; i++) + if (dev == ddp_comp[routes[i].route_ddp].dev) + return BIT(routes[i].crtc_id); + + return -ENODEV; +} + +static bool mtk_ddp_path_available(const unsigned int *path, + unsigned int path_len, + struct device_node **comp_node) +{ + unsigned int i; + + if (!path || !path_len) + return false; + + for (i = 0U; i < path_len; i++) { + /* OVL_ADAPTOR doesn't have a device node */ + if (path[i] == DDP_COMPONENT_DRM_OVL_ADAPTOR) + continue; + + if (!comp_node[path[i]]) + return false; + } + + return true; +} + +int mtk_ddp_comp_get_id(struct device_node *node, + enum mtk_ddp_comp_type comp_type) +{ + int id = of_alias_get_id(node, mtk_ddp_comp_stem[comp_type]); + int i; + + for (i = 0; i < ARRAY_SIZE(mtk_ddp_matches); i++) { + if (comp_type == mtk_ddp_matches[i].type && + (id < 0 || id == mtk_ddp_matches[i].alias_id)) + return i; + } + + return -EINVAL; +} + +int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev) +{ + struct mtk_drm_private *private = drm->dev_private; + const struct mtk_mmsys_driver_data *data; + struct mtk_drm_private *priv_n; + int i = 0, j; + int ret; + + for (j = 0; j < private->data->mmsys_dev_num; j++) { + priv_n = private->all_drm_private[j]; + data = priv_n->data; + + if (mtk_ddp_path_available(data->main_path, data->main_len, + priv_n->comp_node)) { + if (mtk_ddp_comp_find(dev, data->main_path, + data->main_len, + priv_n->ddp_comp)) + return BIT(i); + i++; + } + + if (mtk_ddp_path_available(data->ext_path, data->ext_len, + priv_n->comp_node)) { + if (mtk_ddp_comp_find(dev, data->ext_path, + data->ext_len, + priv_n->ddp_comp)) + return BIT(i); + i++; + } + + if (mtk_ddp_path_available(data->third_path, data->third_len, + priv_n->comp_node)) { + if (mtk_ddp_comp_find(dev, data->third_path, + data->third_len, + priv_n->ddp_comp)) + return BIT(i); + i++; + } + } + + ret = mtk_ddp_comp_find_in_route(dev, + private->data->conn_routes, + private->data->num_conn_routes, + private->ddp_comp); + + if (ret < 0) + DRM_INFO("Failed to find comp in ddp table, ret = %d\n", ret); + + return ret; +} + +int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp, + unsigned int comp_id) +{ + struct platform_device *comp_pdev; + enum mtk_ddp_comp_type type; + struct mtk_ddp_comp_dev *priv; +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + int ret; +#endif + + if (comp_id >= DDP_COMPONENT_DRM_ID_MAX) + return -EINVAL; + + type = mtk_ddp_matches[comp_id].type; + + comp->id = comp_id; + comp->funcs = mtk_ddp_matches[comp_id].funcs; + /* Not all drm components have a DTS device node, such as ovl_adaptor, + * which is the drm bring up sub driver + */ + if (!node) + return 0; + + comp_pdev = of_find_device_by_node(node); + if (!comp_pdev) { + DRM_INFO("Waiting for device %s\n", node->full_name); + return -EPROBE_DEFER; + } + comp->dev = &comp_pdev->dev; + + if (type == MTK_DISP_AAL || + type == MTK_DISP_BLS || + type == MTK_DISP_CCORR || + type == MTK_DISP_COLOR || + type == MTK_DISP_GAMMA || + type == MTK_DISP_MERGE || + type == MTK_DISP_OVL || + type == MTK_DISP_OVL_2L || + type == MTK_DISP_PWM || + type == MTK_DISP_RDMA || + type == MTK_DPI || + type == MTK_DP_INTF || + type == MTK_DSI) + return 0; + + priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->regs = of_iomap(node, 0); + priv->clk = of_clk_get(node, 0); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(comp->dev, &priv->cmdq_reg, 0); + if (ret) + dev_dbg(comp->dev, "get mediatek,gce-client-reg fail!\n"); +#endif + + platform_set_drvdata(comp_pdev, priv); + + return 0; +} diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h new file mode 100644 index 0000000000..ecf6dc283c --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h @@ -0,0 +1,346 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#ifndef MTK_DDP_COMP_H +#define MTK_DDP_COMP_H + +#include +#include +#include +#include +#include + +#include + +struct device; +struct device_node; +struct drm_crtc; +struct drm_device; +struct mtk_plane_state; +struct drm_crtc_state; + +enum mtk_ddp_comp_type { + MTK_DISP_AAL, + MTK_DISP_BLS, + MTK_DISP_CCORR, + MTK_DISP_COLOR, + MTK_DISP_DITHER, + MTK_DISP_DSC, + MTK_DISP_GAMMA, + MTK_DISP_MERGE, + MTK_DISP_MUTEX, + MTK_DISP_OD, + MTK_DISP_OVL, + MTK_DISP_OVL_2L, + MTK_DISP_OVL_ADAPTOR, + MTK_DISP_POSTMASK, + MTK_DISP_PWM, + MTK_DISP_RDMA, + MTK_DISP_UFOE, + MTK_DISP_WDMA, + MTK_DPI, + MTK_DP_INTF, + MTK_DSI, + MTK_DDP_COMP_TYPE_MAX, +}; + +struct mtk_ddp_comp; +struct cmdq_pkt; +struct mtk_ddp_comp_funcs { + int (*power_on)(struct device *dev); + void (*power_off)(struct device *dev); + int (*clk_enable)(struct device *dev); + void (*clk_disable)(struct device *dev); + void (*config)(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt); + void (*start)(struct device *dev); + void (*stop)(struct device *dev); + void (*register_vblank_cb)(struct device *dev, + void (*vblank_cb)(void *), + void *vblank_cb_data); + void (*unregister_vblank_cb)(struct device *dev); + void (*enable_vblank)(struct device *dev); + void (*disable_vblank)(struct device *dev); + unsigned int (*supported_rotations)(struct device *dev); + unsigned int (*layer_nr)(struct device *dev); + int (*layer_check)(struct device *dev, + unsigned int idx, + struct mtk_plane_state *state); + void (*layer_config)(struct device *dev, unsigned int idx, + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt); + unsigned int (*gamma_get_lut_size)(struct device *dev); + void (*gamma_set)(struct device *dev, + struct drm_crtc_state *state); + void (*bgclr_in_on)(struct device *dev); + void (*bgclr_in_off)(struct device *dev); + void (*ctm_set)(struct device *dev, + struct drm_crtc_state *state); + struct device * (*dma_dev_get)(struct device *dev); + const u32 *(*get_formats)(struct device *dev); + size_t (*get_num_formats)(struct device *dev); + void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next); + void (*disconnect)(struct device *dev, struct device *mmsys_dev, unsigned int next); + void (*add)(struct device *dev, struct mtk_mutex *mutex); + void (*remove)(struct device *dev, struct mtk_mutex *mutex); + unsigned int (*encoder_index)(struct device *dev); + enum drm_mode_status (*mode_valid)(struct device *dev, const struct drm_display_mode *mode); +}; + +struct mtk_ddp_comp { + struct device *dev; + int irq; + unsigned int id; + int encoder_index; + const struct mtk_ddp_comp_funcs *funcs; +}; + +static inline int mtk_ddp_comp_power_on(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->power_on) + return comp->funcs->power_on(comp->dev); + else + return pm_runtime_resume_and_get(comp->dev); + return 0; +} + +static inline void mtk_ddp_comp_power_off(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->power_off) + comp->funcs->power_off(comp->dev); + else + pm_runtime_put(comp->dev); +} + +static inline int mtk_ddp_comp_clk_enable(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->clk_enable) + return comp->funcs->clk_enable(comp->dev); + + return 0; +} + +static inline void mtk_ddp_comp_clk_disable(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->clk_disable) + comp->funcs->clk_disable(comp->dev); +} + +static inline +enum drm_mode_status mtk_ddp_comp_mode_valid(struct mtk_ddp_comp *comp, + const struct drm_display_mode *mode) +{ + if (comp && comp->funcs && comp->funcs->mode_valid) + return comp->funcs->mode_valid(comp->dev, mode); + return MODE_OK; +} + +static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp, + unsigned int w, unsigned int h, + unsigned int vrefresh, unsigned int bpc, + struct cmdq_pkt *cmdq_pkt) +{ + if (comp->funcs && comp->funcs->config) + comp->funcs->config(comp->dev, w, h, vrefresh, bpc, cmdq_pkt); +} + +static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->start) + comp->funcs->start(comp->dev); +} + +static inline void mtk_ddp_comp_stop(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->stop) + comp->funcs->stop(comp->dev); +} + +static inline void mtk_ddp_comp_register_vblank_cb(struct mtk_ddp_comp *comp, + void (*vblank_cb)(void *), + void *vblank_cb_data) +{ + if (comp->funcs && comp->funcs->register_vblank_cb) + comp->funcs->register_vblank_cb(comp->dev, vblank_cb, + vblank_cb_data); +} + +static inline void mtk_ddp_comp_unregister_vblank_cb(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->unregister_vblank_cb) + comp->funcs->unregister_vblank_cb(comp->dev); +} + +static inline void mtk_ddp_comp_enable_vblank(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->enable_vblank) + comp->funcs->enable_vblank(comp->dev); +} + +static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->disable_vblank) + comp->funcs->disable_vblank(comp->dev); +} + +static inline +unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->supported_rotations) + return comp->funcs->supported_rotations(comp->dev); + + /* + * In order to pass IGT tests, DRM_MODE_ROTATE_0 is required when + * rotation is not supported. + */ + return DRM_MODE_ROTATE_0; +} + +static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->layer_nr) + return comp->funcs->layer_nr(comp->dev); + + return 0; +} + +static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp, + unsigned int idx, + struct mtk_plane_state *state) +{ + if (comp->funcs && comp->funcs->layer_check) + return comp->funcs->layer_check(comp->dev, idx, state); + return 0; +} + +static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp, + unsigned int idx, + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt) +{ + if (comp->funcs && comp->funcs->layer_config) + comp->funcs->layer_config(comp->dev, idx, state, cmdq_pkt); +} + +static inline unsigned int mtk_ddp_gamma_get_lut_size(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->gamma_get_lut_size) + return comp->funcs->gamma_get_lut_size(comp->dev); + + return 0; +} + +static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state) +{ + if (comp->funcs && comp->funcs->gamma_set) + comp->funcs->gamma_set(comp->dev, state); +} + +static inline void mtk_ddp_comp_bgclr_in_on(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->bgclr_in_on) + comp->funcs->bgclr_in_on(comp->dev); +} + +static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->bgclr_in_off) + comp->funcs->bgclr_in_off(comp->dev); +} + +static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state) +{ + if (comp->funcs && comp->funcs->ctm_set) + comp->funcs->ctm_set(comp->dev, state); +} + +static inline struct device *mtk_ddp_comp_dma_dev_get(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->dma_dev_get) + return comp->funcs->dma_dev_get(comp->dev); + return comp->dev; +} + +static inline +const u32 *mtk_ddp_comp_get_formats(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->get_formats) + return comp->funcs->get_formats(comp->dev); + + return NULL; +} + +static inline +size_t mtk_ddp_comp_get_num_formats(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->get_num_formats) + return comp->funcs->get_num_formats(comp->dev); + + return 0; +} + +static inline bool mtk_ddp_comp_add(struct mtk_ddp_comp *comp, struct mtk_mutex *mutex) +{ + if (comp->funcs && comp->funcs->add) { + comp->funcs->add(comp->dev, mutex); + return true; + } + return false; +} + +static inline bool mtk_ddp_comp_remove(struct mtk_ddp_comp *comp, struct mtk_mutex *mutex) +{ + if (comp->funcs && comp->funcs->remove) { + comp->funcs->remove(comp->dev, mutex); + return true; + } + return false; +} + +static inline bool mtk_ddp_comp_connect(struct mtk_ddp_comp *comp, struct device *mmsys_dev, + unsigned int next) +{ + if (comp->funcs && comp->funcs->connect) { + comp->funcs->connect(comp->dev, mmsys_dev, next); + return true; + } + return false; +} + +static inline bool mtk_ddp_comp_disconnect(struct mtk_ddp_comp *comp, struct device *mmsys_dev, + unsigned int next) +{ + if (comp->funcs && comp->funcs->disconnect) { + comp->funcs->disconnect(comp->dev, mmsys_dev, next); + return true; + } + return false; +} + +static inline void mtk_ddp_comp_encoder_index_set(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->encoder_index) + comp->encoder_index = (int)comp->funcs->encoder_index(comp->dev); +} + +int mtk_ddp_comp_get_id(struct device_node *node, + enum mtk_ddp_comp_type comp_type); +int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev); +int mtk_ddp_comp_init(struct device_node *comp_node, struct mtk_ddp_comp *comp, + unsigned int comp_id); +enum mtk_ddp_comp_type mtk_ddp_comp_get_type(unsigned int comp_id); +void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct cmdq_client_reg *cmdq_reg, void __iomem *regs, + unsigned int offset); +void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct cmdq_client_reg *cmdq_reg, void __iomem *regs, + unsigned int offset); +void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct cmdq_client_reg *cmdq_reg, void __iomem *regs, + unsigned int offset, unsigned int mask); +#endif /* MTK_DDP_COMP_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_disp_aal.c b/drivers/gpu/drm/mediatek/mtk_disp_aal.c index 40fe403086..3ce8f32b06 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_aal.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_aal.c @@ -11,9 +11,9 @@ #include #include +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" #include "mtk_disp_drv.h" -#include "mtk_drm_crtc.h" -#include "mtk_drm_ddp_comp.h" #include "mtk_drm_drv.h" #define DISP_AAL_EN 0x0000 @@ -223,7 +223,6 @@ struct platform_driver mtk_disp_aal_driver = { .remove_new = mtk_disp_aal_remove, .driver = { .name = "mediatek-disp-aal", - .owner = THIS_MODULE, .of_match_table = mtk_disp_aal_driver_dt_match, }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c index 465cddce0d..df35e90dd2 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c @@ -10,9 +10,9 @@ #include #include +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" #include "mtk_disp_drv.h" -#include "mtk_drm_crtc.h" -#include "mtk_drm_ddp_comp.h" #include "mtk_drm_drv.h" #define DISP_CCORR_EN 0x0000 @@ -214,7 +214,6 @@ struct platform_driver mtk_disp_ccorr_driver = { .remove_new = mtk_disp_ccorr_remove, .driver = { .name = "mediatek-disp-ccorr", - .owner = THIS_MODULE, .of_match_table = mtk_disp_ccorr_driver_dt_match, }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c index 78ea99f144..7f0085be56 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_color.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c @@ -10,9 +10,9 @@ #include #include +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" #include "mtk_disp_drv.h" -#include "mtk_drm_crtc.h" -#include "mtk_drm_ddp_comp.h" #include "mtk_drm_drv.h" #define DISP_COLOR_CFG_MAIN 0x0400 @@ -164,7 +164,6 @@ struct platform_driver mtk_disp_color_driver = { .remove_new = mtk_disp_color_remove, .driver = { .name = "mediatek-disp-color", - .owner = THIS_MODULE, .of_match_table = mtk_disp_color_driver_dt_match, }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h index 90e64467ea..082ac18fe0 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h @@ -9,8 +9,8 @@ #include #include #include -#include "mtk_drm_plane.h" #include "mtk_mdp_rdma.h" +#include "mtk_plane.h" int mtk_aal_clk_enable(struct device *dev); void mtk_aal_clk_disable(struct device *dev); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c index c1bc8b00d9..ca8d1f3aca 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c @@ -11,9 +11,9 @@ #include #include +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" #include "mtk_disp_drv.h" -#include "mtk_drm_crtc.h" -#include "mtk_drm_ddp_comp.h" #include "mtk_drm_drv.h" #define DISP_GAMMA_EN 0x0000 @@ -334,7 +334,6 @@ struct platform_driver mtk_disp_gamma_driver = { .remove_new = mtk_disp_gamma_remove, .driver = { .name = "mediatek-disp-gamma", - .owner = THIS_MODULE, .of_match_table = mtk_disp_gamma_driver_dt_match, }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c index 32a29924bd..77c057e0e6 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c @@ -10,7 +10,7 @@ #include #include -#include "mtk_drm_ddp_comp.h" +#include "mtk_ddp_comp.h" #include "mtk_drm_drv.h" #include "mtk_disp_drv.h" @@ -376,7 +376,6 @@ struct platform_driver mtk_disp_merge_driver = { .remove_new = mtk_disp_merge_remove, .driver = { .name = "mediatek-disp-merge", - .owner = THIS_MODULE, .of_match_table = mtk_disp_merge_driver_dt_match, }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 2bffe42454..26b598b9f7 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -15,9 +15,9 @@ #include #include +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" #include "mtk_disp_drv.h" -#include "mtk_drm_crtc.h" -#include "mtk_drm_ddp_comp.h" #include "mtk_drm_drv.h" #define DISP_REG_OVL_INTEN 0x0004 @@ -38,6 +38,7 @@ #define DISP_REG_OVL_PITCH_MSB(n) (0x0040 + 0x20 * (n)) #define OVL_PITCH_MSB_2ND_SUBBUF BIT(16) #define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n)) +#define OVL_CONST_BLEND BIT(28) #define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n)) #define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n)) #define DISP_REG_OVL_ADDR_MT2701 0x0040 @@ -71,6 +72,8 @@ #define OVL_CON_VIRT_FLIP BIT(9) #define OVL_CON_HORZ_FLIP BIT(10) +#define OVL_COLOR_ALPHA GENMASK(31, 24) + static const u32 mt8173_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, @@ -273,7 +276,13 @@ void mtk_ovl_config(struct device *dev, unsigned int w, if (w != 0 && h != 0) mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_ROI_SIZE); - mtk_ddp_write_relaxed(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_ROI_BGCLR); + + /* + * The background color must be opaque black (ARGB), + * otherwise the alpha blending will have no effect + */ + mtk_ddp_write_relaxed(cmdq_pkt, OVL_COLOR_ALPHA, &ovl->cmdq_reg, + ovl->regs, DISP_REG_OVL_ROI_BGCLR); mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST); mtk_ddp_write(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST); @@ -296,27 +305,20 @@ int mtk_ovl_layer_check(struct device *dev, unsigned int idx, struct mtk_plane_state *mtk_state) { struct drm_plane_state *state = &mtk_state->base; - unsigned int rotation = 0; - rotation = drm_rotation_simplify(state->rotation, - DRM_MODE_ROTATE_0 | - DRM_MODE_REFLECT_X | - DRM_MODE_REFLECT_Y); - rotation &= ~DRM_MODE_ROTATE_0; - - /* We can only do reflection, not rotation */ - if ((rotation & DRM_MODE_ROTATE_MASK) != 0) + /* check if any unsupported rotation is set */ + if (state->rotation & ~mtk_ovl_supported_rotations(dev)) return -EINVAL; /* * TODO: Rotating/reflecting YUV buffers is not supported at this time. * Only RGB[AX] variants are supported. + * Since DRM_MODE_ROTATE_0 means "no rotation", we should not + * reject layers with this property. */ - if (state->fb->format->is_yuv && rotation != 0) + if (state->fb->format->is_yuv && (state->rotation & ~DRM_MODE_ROTATE_0)) return -EINVAL; - state->rotation = rotation; - return 0; } @@ -407,6 +409,7 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx, unsigned int fmt = pending->format; unsigned int offset = (pending->y << 16) | pending->x; unsigned int src_size = (pending->height << 16) | pending->width; + unsigned int ignore_pixel_alpha = 0; unsigned int con; bool is_afbc = pending->modifier != DRM_FORMAT_MOD_LINEAR; union overlay_pitch { @@ -428,6 +431,14 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx, if (state->base.fb && state->base.fb->format->has_alpha) con |= OVL_CON_AEN | OVL_CON_ALPHA; + /* CONST_BLD must be enabled for XRGB formats although the alpha channel + * can be ignored, or OVL will still read the value from memory. + * For RGB888 related formats, whether CONST_BLD is enabled or not won't + * affect the result. Therefore we use !has_alpha as the condition. + */ + if (state->base.fb && !state->base.fb->format->has_alpha) + ignore_pixel_alpha = OVL_CONST_BLEND; + if (pending->rotation & DRM_MODE_REFLECT_Y) { con |= OVL_CON_VIRT_FLIP; addr += (pending->height - 1) * pending->pitch; @@ -443,8 +454,8 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx, mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_CON(idx)); - mtk_ddp_write_relaxed(cmdq_pkt, overlay_pitch.split_pitch.lsb, &ovl->cmdq_reg, ovl->regs, - DISP_REG_OVL_PITCH(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, overlay_pitch.split_pitch.lsb | ignore_pixel_alpha, + &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH(idx)); mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_SRC_SIZE(idx)); mtk_ddp_write_relaxed(cmdq_pkt, offset, &ovl->cmdq_reg, ovl->regs, @@ -659,7 +670,6 @@ struct platform_driver mtk_disp_ovl_driver = { .remove_new = mtk_disp_ovl_remove, .driver = { .name = "mediatek-disp-ovl", - .owner = THIS_MODULE, .of_match_table = mtk_disp_ovl_driver_dt_match, }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c index 034d31824d..2b62d64759 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c @@ -17,9 +17,9 @@ #include #include +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" #include "mtk_disp_drv.h" -#include "mtk_drm_crtc.h" -#include "mtk_drm_ddp_comp.h" #include "mtk_drm_drv.h" #include "mtk_ethdr.h" @@ -158,7 +158,7 @@ void mtk_ovl_adaptor_layer_config(struct device *dev, unsigned int idx, merge = ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_MERGE0 + idx]; ethdr = ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]; - if (!pending->enable) { + if (!pending->enable || !pending->width || !pending->height) { mtk_merge_stop_cmdq(merge, cmdq_pkt); mtk_mdp_rdma_stop(rdma_l, cmdq_pkt); mtk_mdp_rdma_stop(rdma_r, cmdq_pkt); @@ -629,6 +629,5 @@ struct platform_driver mtk_disp_ovl_adaptor_driver = { .remove_new = mtk_disp_ovl_adaptor_remove, .driver = { .name = "mediatek-disp-ovl-adaptor", - .owner = THIS_MODULE, }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index faa907f2f4..7b1a6e6312 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -13,9 +13,9 @@ #include #include +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" #include "mtk_disp_drv.h" -#include "mtk_drm_crtc.h" -#include "mtk_drm_ddp_comp.h" #include "mtk_drm_drv.h" #define DISP_REG_RDMA_INT_ENABLE 0x0000 @@ -428,7 +428,6 @@ struct platform_driver mtk_disp_rdma_driver = { .remove_new = mtk_disp_rdma_remove, .driver = { .name = "mediatek-disp-rdma", - .owner = THIS_MODULE, .of_match_table = mtk_disp_rdma_driver_dt_match, }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index 5363669564..ada12927bb 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -2073,9 +2073,15 @@ static const struct drm_edid *mtk_dp_edid_read(struct drm_bridge *bridge, */ const struct edid *edid = drm_edid_raw(drm_edid); struct cea_sad *sads; + int ret; - audio_caps->sad_count = drm_edid_to_sad(edid, &sads); - kfree(sads); + ret = drm_edid_to_sad(edid, &sads); + /* Ignore any errors */ + if (ret < 0) + ret = 0; + if (ret) + kfree(sads); + audio_caps->sad_count = ret; /* * FIXME: This should use connector->display_info.has_audio from diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index beb7d9d08e..a08d206549 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -26,9 +26,9 @@ #include #include +#include "mtk_ddp_comp.h" #include "mtk_disp_drv.h" #include "mtk_dpi_regs.h" -#include "mtk_drm_ddp_comp.h" #include "mtk_drm_drv.h" enum mtk_dpi_out_bit_num { @@ -805,7 +805,10 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data) return ret; } - dpi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm_dev, dpi->dev); + ret = mtk_find_possible_crtcs(drm_dev, dpi->dev); + if (ret < 0) + goto err_cleanup; + dpi->encoder.possible_crtcs = ret; ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c deleted file mode 100644 index 29207b2756..0000000000 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ /dev/null @@ -1,1146 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2015 MediaTek Inc. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - -#include "mtk_drm_drv.h" -#include "mtk_drm_crtc.h" -#include "mtk_drm_ddp_comp.h" -#include "mtk_drm_gem.h" -#include "mtk_drm_plane.h" - -/* - * struct mtk_drm_crtc - MediaTek specific crtc structure. - * @base: crtc object. - * @enabled: records whether crtc_enable succeeded - * @planes: array of 4 drm_plane structures, one for each overlay plane - * @pending_planes: whether any plane has pending changes to be applied - * @mmsys_dev: pointer to the mmsys device for configuration registers - * @mutex: handle to one of the ten disp_mutex streams - * @ddp_comp_nr: number of components in ddp_comp - * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc - * - * TODO: Needs update: this header is missing a bunch of member descriptions. - */ -struct mtk_drm_crtc { - struct drm_crtc base; - bool enabled; - - bool pending_needs_vblank; - struct drm_pending_vblank_event *event; - - struct drm_plane *planes; - unsigned int layer_nr; - bool pending_planes; - bool pending_async_planes; - -#if IS_REACHABLE(CONFIG_MTK_CMDQ) - struct cmdq_client cmdq_client; - struct cmdq_pkt cmdq_handle; - u32 cmdq_event; - u32 cmdq_vblank_cnt; - wait_queue_head_t cb_blocking_queue; -#endif - - struct device *mmsys_dev; - struct device *dma_dev; - struct mtk_mutex *mutex; - unsigned int ddp_comp_nr; - struct mtk_ddp_comp **ddp_comp; - unsigned int num_conn_routes; - const struct mtk_drm_route *conn_routes; - - /* lock for display hardware access */ - struct mutex hw_lock; - bool config_updating; -}; - -struct mtk_crtc_state { - struct drm_crtc_state base; - - bool pending_config; - unsigned int pending_width; - unsigned int pending_height; - unsigned int pending_vrefresh; -}; - -static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c) -{ - return container_of(c, struct mtk_drm_crtc, base); -} - -static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s) -{ - return container_of(s, struct mtk_crtc_state, base); -} - -static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) -{ - struct drm_crtc *crtc = &mtk_crtc->base; - unsigned long flags; - - if (mtk_crtc->event) { - spin_lock_irqsave(&crtc->dev->event_lock, flags); - drm_crtc_send_vblank_event(crtc, mtk_crtc->event); - drm_crtc_vblank_put(crtc); - mtk_crtc->event = NULL; - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - } -} - -static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) -{ - drm_crtc_handle_vblank(&mtk_crtc->base); - if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) { - mtk_drm_crtc_finish_page_flip(mtk_crtc); - mtk_crtc->pending_needs_vblank = false; - } -} - -#if IS_REACHABLE(CONFIG_MTK_CMDQ) -static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, - size_t size) -{ - struct device *dev; - dma_addr_t dma_addr; - - pkt->va_base = kzalloc(size, GFP_KERNEL); - if (!pkt->va_base) - return -ENOMEM; - - pkt->buf_size = size; - pkt->cl = (void *)client; - - dev = client->chan->mbox->dev; - dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_addr)) { - dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size); - kfree(pkt->va_base); - return -ENOMEM; - } - - pkt->pa_base = dma_addr; - - return 0; -} - -static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt) -{ - struct cmdq_client *client = (struct cmdq_client *)pkt->cl; - - dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size, - DMA_TO_DEVICE); - kfree(pkt->va_base); -} -#endif - -static void mtk_drm_crtc_destroy(struct drm_crtc *crtc) -{ - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - int i; - - mtk_mutex_put(mtk_crtc->mutex); -#if IS_REACHABLE(CONFIG_MTK_CMDQ) - mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle); - - if (mtk_crtc->cmdq_client.chan) { - mbox_free_channel(mtk_crtc->cmdq_client.chan); - mtk_crtc->cmdq_client.chan = NULL; - } -#endif - - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { - struct mtk_ddp_comp *comp; - - comp = mtk_crtc->ddp_comp[i]; - mtk_ddp_comp_unregister_vblank_cb(comp); - } - - drm_crtc_cleanup(crtc); -} - -static void mtk_drm_crtc_reset(struct drm_crtc *crtc) -{ - struct mtk_crtc_state *state; - - if (crtc->state) - __drm_atomic_helper_crtc_destroy_state(crtc->state); - - kfree(to_mtk_crtc_state(crtc->state)); - crtc->state = NULL; - - state = kzalloc(sizeof(*state), GFP_KERNEL); - if (state) - __drm_atomic_helper_crtc_reset(crtc, &state->base); -} - -static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc) -{ - struct mtk_crtc_state *state; - - state = kmalloc(sizeof(*state), GFP_KERNEL); - if (!state) - return NULL; - - __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); - - WARN_ON(state->base.crtc != crtc); - state->base.crtc = crtc; - state->pending_config = false; - - return &state->base; -} - -static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ - __drm_atomic_helper_crtc_destroy_state(state); - kfree(to_mtk_crtc_state(state)); -} - -static enum drm_mode_status -mtk_drm_crtc_mode_valid(struct drm_crtc *crtc, - const struct drm_display_mode *mode) -{ - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - enum drm_mode_status status = MODE_OK; - int i; - - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { - status = mtk_ddp_comp_mode_valid(mtk_crtc->ddp_comp[i], mode); - if (status != MODE_OK) - break; - } - return status; -} - -static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - /* Nothing to do here, but this callback is mandatory. */ - return true; -} - -static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) -{ - struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state); - - state->pending_width = crtc->mode.hdisplay; - state->pending_height = crtc->mode.vdisplay; - state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode); - wmb(); /* Make sure the above parameters are set before update */ - state->pending_config = true; -} - -static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) -{ - int ret; - int i; - - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { - ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]); - if (ret) { - DRM_ERROR("Failed to enable clock %d: %d\n", i, ret); - goto err; - } - } - - return 0; -err: - while (--i >= 0) - mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]); - return ret; -} - -static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc) -{ - int i; - - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) - mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]); -} - -static -struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, - struct drm_plane *plane, - unsigned int *local_layer) -{ - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *comp; - int i, count = 0; - unsigned int local_index = plane - mtk_crtc->planes; - - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { - comp = mtk_crtc->ddp_comp[i]; - if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) { - *local_layer = local_index - count; - return comp; - } - count += mtk_ddp_comp_layer_nr(comp); - } - - WARN(1, "Failed to find component for plane %d\n", plane->index); - return NULL; -} - -#if IS_REACHABLE(CONFIG_MTK_CMDQ) -static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) -{ - struct cmdq_cb_data *data = mssg; - struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client); - struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client); - struct mtk_crtc_state *state; - unsigned int i; - - if (data->sta < 0) - return; - - state = to_mtk_crtc_state(mtk_crtc->base.state); - - state->pending_config = false; - - if (mtk_crtc->pending_planes) { - for (i = 0; i < mtk_crtc->layer_nr; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i]; - struct mtk_plane_state *plane_state; - - plane_state = to_mtk_plane_state(plane->state); - - plane_state->pending.config = false; - } - mtk_crtc->pending_planes = false; - } - - if (mtk_crtc->pending_async_planes) { - for (i = 0; i < mtk_crtc->layer_nr; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i]; - struct mtk_plane_state *plane_state; - - plane_state = to_mtk_plane_state(plane->state); - - plane_state->pending.async_config = false; - } - mtk_crtc->pending_async_planes = false; - } - - mtk_crtc->cmdq_vblank_cnt = 0; - wake_up(&mtk_crtc->cb_blocking_queue); -} -#endif - -static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) -{ - struct drm_crtc *crtc = &mtk_crtc->base; - struct drm_connector *connector; - struct drm_encoder *encoder; - struct drm_connector_list_iter conn_iter; - unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC; - int ret; - int i; - - if (WARN_ON(!crtc->state)) - return -EINVAL; - - width = crtc->state->adjusted_mode.hdisplay; - height = crtc->state->adjusted_mode.vdisplay; - vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode); - - drm_for_each_encoder(encoder, crtc->dev) { - if (encoder->crtc != crtc) - continue; - - drm_connector_list_iter_begin(crtc->dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - if (connector->encoder != encoder) - continue; - if (connector->display_info.bpc != 0 && - bpc > connector->display_info.bpc) - bpc = connector->display_info.bpc; - } - drm_connector_list_iter_end(&conn_iter); - } - - ret = pm_runtime_resume_and_get(crtc->dev->dev); - if (ret < 0) { - DRM_ERROR("Failed to enable power domain: %d\n", ret); - return ret; - } - - ret = mtk_mutex_prepare(mtk_crtc->mutex); - if (ret < 0) { - DRM_ERROR("Failed to enable mutex clock: %d\n", ret); - goto err_pm_runtime_put; - } - - ret = mtk_crtc_ddp_clk_enable(mtk_crtc); - if (ret < 0) { - DRM_ERROR("Failed to enable component clocks: %d\n", ret); - goto err_mutex_unprepare; - } - - for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) { - if (!mtk_ddp_comp_connect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev, - mtk_crtc->ddp_comp[i + 1]->id)) - mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev, - mtk_crtc->ddp_comp[i]->id, - mtk_crtc->ddp_comp[i + 1]->id); - if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) - mtk_mutex_add_comp(mtk_crtc->mutex, - mtk_crtc->ddp_comp[i]->id); - } - if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) - mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id); - mtk_mutex_enable(mtk_crtc->mutex); - - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { - struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i]; - - if (i == 1) - mtk_ddp_comp_bgclr_in_on(comp); - - mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL); - mtk_ddp_comp_start(comp); - } - - /* Initially configure all planes */ - for (i = 0; i < mtk_crtc->layer_nr; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i]; - struct mtk_plane_state *plane_state; - struct mtk_ddp_comp *comp; - unsigned int local_layer; - - plane_state = to_mtk_plane_state(plane->state); - - /* should not enable layer before crtc enabled */ - plane_state->pending.enable = false; - comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); - if (comp) - mtk_ddp_comp_layer_config(comp, local_layer, - plane_state, NULL); - } - - return 0; - -err_mutex_unprepare: - mtk_mutex_unprepare(mtk_crtc->mutex); -err_pm_runtime_put: - pm_runtime_put(crtc->dev->dev); - return ret; -} - -static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) -{ - struct drm_device *drm = mtk_crtc->base.dev; - struct drm_crtc *crtc = &mtk_crtc->base; - int i; - - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { - mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]); - if (i == 1) - mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]); - } - - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) - if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) - mtk_mutex_remove_comp(mtk_crtc->mutex, - mtk_crtc->ddp_comp[i]->id); - mtk_mutex_disable(mtk_crtc->mutex); - for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) { - if (!mtk_ddp_comp_disconnect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev, - mtk_crtc->ddp_comp[i + 1]->id)) - mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev, - mtk_crtc->ddp_comp[i]->id, - mtk_crtc->ddp_comp[i + 1]->id); - if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) - mtk_mutex_remove_comp(mtk_crtc->mutex, - mtk_crtc->ddp_comp[i]->id); - } - if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) - mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id); - mtk_crtc_ddp_clk_disable(mtk_crtc); - mtk_mutex_unprepare(mtk_crtc->mutex); - - pm_runtime_put(drm->dev); - - if (crtc->state->event && !crtc->state->active) { - spin_lock_irq(&crtc->dev->event_lock); - drm_crtc_send_vblank_event(crtc, crtc->state->event); - crtc->state->event = NULL; - spin_unlock_irq(&crtc->dev->event_lock); - } -} - -static void mtk_crtc_ddp_config(struct drm_crtc *crtc, - struct cmdq_pkt *cmdq_handle) -{ - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); - struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; - unsigned int i; - unsigned int local_layer; - - /* - * TODO: instead of updating the registers here, we should prepare - * working registers in atomic_commit and let the hardware command - * queue update module registers on vblank. - */ - if (state->pending_config) { - mtk_ddp_comp_config(comp, state->pending_width, - state->pending_height, - state->pending_vrefresh, 0, - cmdq_handle); - - if (!cmdq_handle) - state->pending_config = false; - } - - if (mtk_crtc->pending_planes) { - for (i = 0; i < mtk_crtc->layer_nr; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i]; - struct mtk_plane_state *plane_state; - - plane_state = to_mtk_plane_state(plane->state); - - if (!plane_state->pending.config) - continue; - - comp = mtk_drm_ddp_comp_for_plane(crtc, plane, - &local_layer); - - if (comp) - mtk_ddp_comp_layer_config(comp, local_layer, - plane_state, - cmdq_handle); - if (!cmdq_handle) - plane_state->pending.config = false; - } - - if (!cmdq_handle) - mtk_crtc->pending_planes = false; - } - - if (mtk_crtc->pending_async_planes) { - for (i = 0; i < mtk_crtc->layer_nr; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i]; - struct mtk_plane_state *plane_state; - - plane_state = to_mtk_plane_state(plane->state); - - if (!plane_state->pending.async_config) - continue; - - comp = mtk_drm_ddp_comp_for_plane(crtc, plane, - &local_layer); - - if (comp) - mtk_ddp_comp_layer_config(comp, local_layer, - plane_state, - cmdq_handle); - if (!cmdq_handle) - plane_state->pending.async_config = false; - } - - if (!cmdq_handle) - mtk_crtc->pending_async_planes = false; - } -} - -static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, - bool needs_vblank) -{ -#if IS_REACHABLE(CONFIG_MTK_CMDQ) - struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle; -#endif - struct drm_crtc *crtc = &mtk_crtc->base; - struct mtk_drm_private *priv = crtc->dev->dev_private; - unsigned int pending_planes = 0, pending_async_planes = 0; - int i; - - mutex_lock(&mtk_crtc->hw_lock); - mtk_crtc->config_updating = true; - if (needs_vblank) - mtk_crtc->pending_needs_vblank = true; - - for (i = 0; i < mtk_crtc->layer_nr; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i]; - struct mtk_plane_state *plane_state; - - plane_state = to_mtk_plane_state(plane->state); - if (plane_state->pending.dirty) { - plane_state->pending.config = true; - plane_state->pending.dirty = false; - pending_planes |= BIT(i); - } else if (plane_state->pending.async_dirty) { - plane_state->pending.async_config = true; - plane_state->pending.async_dirty = false; - pending_async_planes |= BIT(i); - } - } - if (pending_planes) - mtk_crtc->pending_planes = true; - if (pending_async_planes) - mtk_crtc->pending_async_planes = true; - - if (priv->data->shadow_register) { - mtk_mutex_acquire(mtk_crtc->mutex); - mtk_crtc_ddp_config(crtc, NULL); - mtk_mutex_release(mtk_crtc->mutex); - } -#if IS_REACHABLE(CONFIG_MTK_CMDQ) - if (mtk_crtc->cmdq_client.chan) { - mbox_flush(mtk_crtc->cmdq_client.chan, 2000); - cmdq_handle->cmd_buf_size = 0; - cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); - cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); - mtk_crtc_ddp_config(crtc, cmdq_handle); - cmdq_pkt_finalize(cmdq_handle); - dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev, - cmdq_handle->pa_base, - cmdq_handle->cmd_buf_size, - DMA_TO_DEVICE); - /* - * CMDQ command should execute in next 3 vblank. - * One vblank interrupt before send message (occasionally) - * and one vblank interrupt after cmdq done, - * so it's timeout after 3 vblank interrupt. - * If it fail to execute in next 3 vblank, timeout happen. - */ - mtk_crtc->cmdq_vblank_cnt = 3; - - mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle); - mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); - } -#endif - mtk_crtc->config_updating = false; - mutex_unlock(&mtk_crtc->hw_lock); -} - -static void mtk_crtc_ddp_irq(void *data) -{ - struct drm_crtc *crtc = data; - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_drm_private *priv = crtc->dev->dev_private; - -#if IS_REACHABLE(CONFIG_MTK_CMDQ) - if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan) - mtk_crtc_ddp_config(crtc, NULL); - else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0) - DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n", - drm_crtc_index(&mtk_crtc->base)); -#else - if (!priv->data->shadow_register) - mtk_crtc_ddp_config(crtc, NULL); -#endif - mtk_drm_finish_page_flip(mtk_crtc); -} - -static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) -{ - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; - - mtk_ddp_comp_enable_vblank(comp); - - return 0; -} - -static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) -{ - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; - - mtk_ddp_comp_disable_vblank(comp); -} - -static void mtk_drm_crtc_update_output(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ - int crtc_index = drm_crtc_index(crtc); - int i; - struct device *dev; - struct drm_crtc_state *crtc_state = state->crtcs[crtc_index].new_state; - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_drm_private *priv; - unsigned int encoder_mask = crtc_state->encoder_mask; - - if (!crtc_state->connectors_changed) - return; - - if (!mtk_crtc->num_conn_routes) - return; - - priv = ((struct mtk_drm_private *)crtc->dev->dev_private)->all_drm_private[crtc_index]; - dev = priv->dev; - - dev_dbg(dev, "connector change:%d, encoder mask:0x%x for crtc:%d\n", - crtc_state->connectors_changed, encoder_mask, crtc_index); - - for (i = 0; i < mtk_crtc->num_conn_routes; i++) { - unsigned int comp_id = mtk_crtc->conn_routes[i].route_ddp; - struct mtk_ddp_comp *comp = &priv->ddp_comp[comp_id]; - - if (comp->encoder_index >= 0 && - (encoder_mask & BIT(comp->encoder_index))) { - mtk_crtc->ddp_comp[mtk_crtc->ddp_comp_nr - 1] = comp; - dev_dbg(dev, "Add comp_id: %d at path index %d\n", - comp->id, mtk_crtc->ddp_comp_nr - 1); - break; - } - } -} - -int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, - struct mtk_plane_state *state) -{ - unsigned int local_layer; - struct mtk_ddp_comp *comp; - - comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); - if (comp) - return mtk_ddp_comp_layer_check(comp, local_layer, state); - return 0; -} - -void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - - if (!mtk_crtc->enabled) - return; - - mtk_drm_crtc_update_config(mtk_crtc, false); -} - -static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; - int ret; - - DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); - - ret = mtk_ddp_comp_power_on(comp); - if (ret < 0) { - DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret); - return; - } - - mtk_drm_crtc_update_output(crtc, state); - - ret = mtk_crtc_ddp_hw_init(mtk_crtc); - if (ret) { - mtk_ddp_comp_power_off(comp); - return; - } - - drm_crtc_vblank_on(crtc); - mtk_crtc->enabled = true; -} - -static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; - int i; - - DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); - if (!mtk_crtc->enabled) - return; - - /* Set all pending plane state to disabled */ - for (i = 0; i < mtk_crtc->layer_nr; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i]; - struct mtk_plane_state *plane_state; - - plane_state = to_mtk_plane_state(plane->state); - plane_state->pending.enable = false; - plane_state->pending.config = true; - } - mtk_crtc->pending_planes = true; - - mtk_drm_crtc_update_config(mtk_crtc, false); -#if IS_REACHABLE(CONFIG_MTK_CMDQ) - /* Wait for planes to be disabled by cmdq */ - if (mtk_crtc->cmdq_client.chan) - wait_event_timeout(mtk_crtc->cb_blocking_queue, - mtk_crtc->cmdq_vblank_cnt == 0, - msecs_to_jiffies(500)); -#endif - /* Wait for planes to be disabled */ - drm_crtc_wait_one_vblank(crtc); - - drm_crtc_vblank_off(crtc); - mtk_crtc_ddp_hw_fini(mtk_crtc); - mtk_ddp_comp_power_off(comp); - - mtk_crtc->enabled = false; -} - -static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ - struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, - crtc); - struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state); - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - unsigned long flags; - - if (mtk_crtc->event && mtk_crtc_state->base.event) - DRM_ERROR("new event while there is still a pending event\n"); - - if (mtk_crtc_state->base.event) { - mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc); - WARN_ON(drm_crtc_vblank_get(crtc) != 0); - - spin_lock_irqsave(&crtc->dev->event_lock, flags); - mtk_crtc->event = mtk_crtc_state->base.event; - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - - mtk_crtc_state->base.event = NULL; - } -} - -static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - int i; - - if (crtc->state->color_mgmt_changed) - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { - mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state); - mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state); - } - mtk_drm_crtc_update_config(mtk_crtc, !!mtk_crtc->event); -} - -static const struct drm_crtc_funcs mtk_crtc_funcs = { - .set_config = drm_atomic_helper_set_config, - .page_flip = drm_atomic_helper_page_flip, - .destroy = mtk_drm_crtc_destroy, - .reset = mtk_drm_crtc_reset, - .atomic_duplicate_state = mtk_drm_crtc_duplicate_state, - .atomic_destroy_state = mtk_drm_crtc_destroy_state, - .enable_vblank = mtk_drm_crtc_enable_vblank, - .disable_vblank = mtk_drm_crtc_disable_vblank, -}; - -static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = { - .mode_fixup = mtk_drm_crtc_mode_fixup, - .mode_set_nofb = mtk_drm_crtc_mode_set_nofb, - .mode_valid = mtk_drm_crtc_mode_valid, - .atomic_begin = mtk_drm_crtc_atomic_begin, - .atomic_flush = mtk_drm_crtc_atomic_flush, - .atomic_enable = mtk_drm_crtc_atomic_enable, - .atomic_disable = mtk_drm_crtc_atomic_disable, -}; - -static int mtk_drm_crtc_init(struct drm_device *drm, - struct mtk_drm_crtc *mtk_crtc, - unsigned int pipe) -{ - struct drm_plane *primary = NULL; - struct drm_plane *cursor = NULL; - int i, ret; - - for (i = 0; i < mtk_crtc->layer_nr; i++) { - if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY) - primary = &mtk_crtc->planes[i]; - else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR) - cursor = &mtk_crtc->planes[i]; - } - - ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor, - &mtk_crtc_funcs, NULL); - if (ret) - goto err_cleanup_crtc; - - drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs); - - return 0; - -err_cleanup_crtc: - drm_crtc_cleanup(&mtk_crtc->base); - return ret; -} - -static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc, - int comp_idx) -{ - struct mtk_ddp_comp *comp; - - if (comp_idx > 1) - return 0; - - comp = mtk_crtc->ddp_comp[comp_idx]; - if (!comp->funcs) - return 0; - - if (comp_idx == 1 && !comp->funcs->bgclr_in_on) - return 0; - - return mtk_ddp_comp_layer_nr(comp); -} - -static inline -enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx, - unsigned int num_planes) -{ - if (plane_idx == 0) - return DRM_PLANE_TYPE_PRIMARY; - else if (plane_idx == (num_planes - 1)) - return DRM_PLANE_TYPE_CURSOR; - else - return DRM_PLANE_TYPE_OVERLAY; - -} - -static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev, - struct mtk_drm_crtc *mtk_crtc, - int comp_idx, int pipe) -{ - int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx); - struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx]; - int i, ret; - - for (i = 0; i < num_planes; i++) { - ret = mtk_plane_init(drm_dev, - &mtk_crtc->planes[mtk_crtc->layer_nr], - BIT(pipe), - mtk_drm_crtc_plane_type(mtk_crtc->layer_nr, - num_planes), - mtk_ddp_comp_supported_rotations(comp), - mtk_ddp_comp_get_formats(comp), - mtk_ddp_comp_get_num_formats(comp)); - if (ret) - return ret; - - mtk_crtc->layer_nr++; - } - return 0; -} - -struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc) -{ - struct mtk_drm_crtc *mtk_crtc = NULL; - - if (!crtc) - return NULL; - - mtk_crtc = to_mtk_crtc(crtc); - if (!mtk_crtc) - return NULL; - - return mtk_crtc->dma_dev; -} - -int mtk_drm_crtc_create(struct drm_device *drm_dev, - const unsigned int *path, unsigned int path_len, - int priv_data_index, const struct mtk_drm_route *conn_routes, - unsigned int num_conn_routes) -{ - struct mtk_drm_private *priv = drm_dev->dev_private; - struct device *dev = drm_dev->dev; - struct mtk_drm_crtc *mtk_crtc; - unsigned int num_comp_planes = 0; - int ret; - int i; - bool has_ctm = false; - uint gamma_lut_size = 0; - struct drm_crtc *tmp; - int crtc_i = 0; - - if (!path) - return 0; - - priv = priv->all_drm_private[priv_data_index]; - - drm_for_each_crtc(tmp, drm_dev) - crtc_i++; - - for (i = 0; i < path_len; i++) { - enum mtk_ddp_comp_id comp_id = path[i]; - struct device_node *node; - struct mtk_ddp_comp *comp; - - node = priv->comp_node[comp_id]; - comp = &priv->ddp_comp[comp_id]; - - /* Not all drm components have a DTS device node, such as ovl_adaptor, - * which is the drm bring up sub driver - */ - if (!node && comp_id != DDP_COMPONENT_DRM_OVL_ADAPTOR) { - dev_info(dev, - "Not creating crtc %d because component %d is disabled or missing\n", - crtc_i, comp_id); - return 0; - } - - if (!comp->dev) { - dev_err(dev, "Component %pOF not initialized\n", node); - return -ENODEV; - } - } - - mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL); - if (!mtk_crtc) - return -ENOMEM; - - mtk_crtc->mmsys_dev = priv->mmsys_dev; - mtk_crtc->ddp_comp_nr = path_len; - mtk_crtc->ddp_comp = devm_kcalloc(dev, - mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0), - sizeof(*mtk_crtc->ddp_comp), - GFP_KERNEL); - if (!mtk_crtc->ddp_comp) - return -ENOMEM; - - mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev); - if (IS_ERR(mtk_crtc->mutex)) { - ret = PTR_ERR(mtk_crtc->mutex); - dev_err(dev, "Failed to get mutex: %d\n", ret); - return ret; - } - - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { - unsigned int comp_id = path[i]; - struct mtk_ddp_comp *comp; - - comp = &priv->ddp_comp[comp_id]; - mtk_crtc->ddp_comp[i] = comp; - - if (comp->funcs) { - if (comp->funcs->gamma_set && comp->funcs->gamma_get_lut_size) { - unsigned int lut_sz = mtk_ddp_gamma_get_lut_size(comp); - - if (lut_sz) - gamma_lut_size = lut_sz; - } - - if (comp->funcs->ctm_set) - has_ctm = true; - } - - mtk_ddp_comp_register_vblank_cb(comp, mtk_crtc_ddp_irq, - &mtk_crtc->base); - } - - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) - num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i); - - mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes, - sizeof(struct drm_plane), GFP_KERNEL); - if (!mtk_crtc->planes) - return -ENOMEM; - - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { - ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i, - crtc_i); - if (ret) - return ret; - } - - /* - * Default to use the first component as the dma dev. - * In the case of ovl_adaptor sub driver, it needs to use the - * dma_dev_get function to get representative dma dev. - */ - mtk_crtc->dma_dev = mtk_ddp_comp_dma_dev_get(&priv->ddp_comp[path[0]]); - - ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, crtc_i); - if (ret < 0) - return ret; - - if (gamma_lut_size) - drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size); - drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size); - mutex_init(&mtk_crtc->hw_lock); - -#if IS_REACHABLE(CONFIG_MTK_CMDQ) - i = priv->mbox_index++; - mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev; - mtk_crtc->cmdq_client.client.tx_block = false; - mtk_crtc->cmdq_client.client.knows_txdone = true; - mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb; - mtk_crtc->cmdq_client.chan = - mbox_request_channel(&mtk_crtc->cmdq_client.client, i); - if (IS_ERR(mtk_crtc->cmdq_client.chan)) { - dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", - drm_crtc_index(&mtk_crtc->base)); - mtk_crtc->cmdq_client.chan = NULL; - } - - if (mtk_crtc->cmdq_client.chan) { - ret = of_property_read_u32_index(priv->mutex_node, - "mediatek,gce-events", - i, - &mtk_crtc->cmdq_event); - if (ret) { - dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", - drm_crtc_index(&mtk_crtc->base)); - mbox_free_channel(mtk_crtc->cmdq_client.chan); - mtk_crtc->cmdq_client.chan = NULL; - } else { - ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client, - &mtk_crtc->cmdq_handle, - PAGE_SIZE); - if (ret) { - dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n", - drm_crtc_index(&mtk_crtc->base)); - mbox_free_channel(mtk_crtc->cmdq_client.chan); - mtk_crtc->cmdq_client.chan = NULL; - } - } - - /* for sending blocking cmd in crtc disable */ - init_waitqueue_head(&mtk_crtc->cb_blocking_queue); - } -#endif - - if (conn_routes) { - for (i = 0; i < num_conn_routes; i++) { - unsigned int comp_id = conn_routes[i].route_ddp; - struct device_node *node = priv->comp_node[comp_id]; - struct mtk_ddp_comp *comp = &priv->ddp_comp[comp_id]; - - if (!comp->dev) { - dev_dbg(dev, "comp_id:%d, Component %pOF not initialized\n", - comp_id, node); - /* mark encoder_index to -1, if route comp device is not enabled */ - comp->encoder_index = -1; - continue; - } - - mtk_ddp_comp_encoder_index_set(&priv->ddp_comp[comp_id]); - } - - mtk_crtc->num_conn_routes = num_conn_routes; - mtk_crtc->conn_routes = conn_routes; - - /* increase ddp_comp_nr at the end of mtk_drm_crtc_create */ - mtk_crtc->ddp_comp_nr++; - } - - return 0; -} diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h deleted file mode 100644 index 1f988ff1bf..0000000000 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ /dev/null @@ -1,30 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2015 MediaTek Inc. - */ - -#ifndef MTK_DRM_CRTC_H -#define MTK_DRM_CRTC_H - -#include -#include "mtk_drm_ddp_comp.h" -#include "mtk_drm_drv.h" -#include "mtk_drm_plane.h" - -#define MTK_MAX_BPC 10 -#define MTK_MIN_BPC 3 - -void mtk_drm_crtc_commit(struct drm_crtc *crtc); -int mtk_drm_crtc_create(struct drm_device *drm_dev, - const unsigned int *path, - unsigned int path_len, - int priv_data_index, - const struct mtk_drm_route *conn_routes, - unsigned int num_conn_routes); -int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, - struct mtk_plane_state *state); -void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, - struct drm_atomic_state *plane_state); -struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc); - -#endif /* MTK_DRM_CRTC_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c deleted file mode 100644 index a515e96cfe..0000000000 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ /dev/null @@ -1,644 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2015 MediaTek Inc. - * Authors: - * YT Shen - * CK Hu - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "mtk_disp_drv.h" -#include "mtk_drm_drv.h" -#include "mtk_drm_plane.h" -#include "mtk_drm_ddp_comp.h" -#include "mtk_drm_crtc.h" - - -#define DISP_REG_DITHER_EN 0x0000 -#define DITHER_EN BIT(0) -#define DISP_REG_DITHER_CFG 0x0020 -#define DITHER_RELAY_MODE BIT(0) -#define DITHER_ENGINE_EN BIT(1) -#define DISP_DITHERING BIT(2) -#define DISP_REG_DITHER_SIZE 0x0030 -#define DISP_REG_DITHER_5 0x0114 -#define DISP_REG_DITHER_7 0x011c -#define DISP_REG_DITHER_15 0x013c -#define DITHER_LSB_ERR_SHIFT_R(x) (((x) & 0x7) << 28) -#define DITHER_ADD_LSHIFT_R(x) (((x) & 0x7) << 20) -#define DITHER_NEW_BIT_MODE BIT(0) -#define DISP_REG_DITHER_16 0x0140 -#define DITHER_LSB_ERR_SHIFT_B(x) (((x) & 0x7) << 28) -#define DITHER_ADD_LSHIFT_B(x) (((x) & 0x7) << 20) -#define DITHER_LSB_ERR_SHIFT_G(x) (((x) & 0x7) << 12) -#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) - -#define DISP_REG_DSC_CON 0x0000 -#define DSC_EN BIT(0) -#define DSC_DUAL_INOUT BIT(2) -#define DSC_BYPASS BIT(4) -#define DSC_UFOE_SEL BIT(16) - -#define DISP_REG_OD_EN 0x0000 -#define DISP_REG_OD_CFG 0x0020 -#define OD_RELAYMODE BIT(0) -#define DISP_REG_OD_SIZE 0x0030 - -#define DISP_REG_POSTMASK_EN 0x0000 -#define POSTMASK_EN BIT(0) -#define DISP_REG_POSTMASK_CFG 0x0020 -#define POSTMASK_RELAY_MODE BIT(0) -#define DISP_REG_POSTMASK_SIZE 0x0030 - -#define DISP_REG_UFO_START 0x0000 -#define UFO_BYPASS BIT(2) - -struct mtk_ddp_comp_dev { - struct clk *clk; - void __iomem *regs; - struct cmdq_client_reg cmdq_reg; -}; - -void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, - struct cmdq_client_reg *cmdq_reg, void __iomem *regs, - unsigned int offset) -{ -#if IS_REACHABLE(CONFIG_MTK_CMDQ) - if (cmdq_pkt) - cmdq_pkt_write(cmdq_pkt, cmdq_reg->subsys, - cmdq_reg->offset + offset, value); - else -#endif - writel(value, regs + offset); -} - -void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value, - struct cmdq_client_reg *cmdq_reg, void __iomem *regs, - unsigned int offset) -{ -#if IS_REACHABLE(CONFIG_MTK_CMDQ) - if (cmdq_pkt) - cmdq_pkt_write(cmdq_pkt, cmdq_reg->subsys, - cmdq_reg->offset + offset, value); - else -#endif - writel_relaxed(value, regs + offset); -} - -void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value, - struct cmdq_client_reg *cmdq_reg, void __iomem *regs, - unsigned int offset, unsigned int mask) -{ -#if IS_REACHABLE(CONFIG_MTK_CMDQ) - if (cmdq_pkt) { - cmdq_pkt_write_mask(cmdq_pkt, cmdq_reg->subsys, - cmdq_reg->offset + offset, value, mask); - } else { -#endif - u32 tmp = readl(regs + offset); - - tmp = (tmp & ~mask) | (value & mask); - writel(tmp, regs + offset); -#if IS_REACHABLE(CONFIG_MTK_CMDQ) - } -#endif -} - -static int mtk_ddp_clk_enable(struct device *dev) -{ - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - - return clk_prepare_enable(priv->clk); -} - -static void mtk_ddp_clk_disable(struct device *dev) -{ - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - - clk_disable_unprepare(priv->clk); -} - -void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg, - unsigned int bpc, unsigned int cfg, - unsigned int dither_en, struct cmdq_pkt *cmdq_pkt) -{ - /* If bpc equal to 0, the dithering function didn't be enabled */ - if (bpc == 0) - return; - - if (bpc >= MTK_MIN_BPC) { - mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_5); - mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_7); - mtk_ddp_write(cmdq_pkt, - DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) | - DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) | - DITHER_NEW_BIT_MODE, - cmdq_reg, regs, DISP_REG_DITHER_15); - mtk_ddp_write(cmdq_pkt, - DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) | - DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) | - DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) | - DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc), - cmdq_reg, regs, DISP_REG_DITHER_16); - mtk_ddp_write(cmdq_pkt, dither_en, cmdq_reg, regs, cfg); - } -} - -static void mtk_dither_config(struct device *dev, unsigned int w, - unsigned int h, unsigned int vrefresh, - unsigned int bpc, struct cmdq_pkt *cmdq_pkt) -{ - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - - mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE); - mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, - DISP_REG_DITHER_CFG); - mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG, - DITHER_ENGINE_EN, cmdq_pkt); -} - -static void mtk_dither_start(struct device *dev) -{ - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - - writel(DITHER_EN, priv->regs + DISP_REG_DITHER_EN); -} - -static void mtk_dither_stop(struct device *dev) -{ - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - - writel_relaxed(0x0, priv->regs + DISP_REG_DITHER_EN); -} - -static void mtk_dither_set(struct device *dev, unsigned int bpc, - unsigned int cfg, struct cmdq_pkt *cmdq_pkt) -{ - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - - mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, cfg, - DISP_DITHERING, cmdq_pkt); -} - -static void mtk_dsc_config(struct device *dev, unsigned int w, - unsigned int h, unsigned int vrefresh, - unsigned int bpc, struct cmdq_pkt *cmdq_pkt) -{ - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - - /* dsc bypass mode */ - mtk_ddp_write_mask(cmdq_pkt, DSC_BYPASS, &priv->cmdq_reg, priv->regs, - DISP_REG_DSC_CON, DSC_BYPASS); - mtk_ddp_write_mask(cmdq_pkt, DSC_UFOE_SEL, &priv->cmdq_reg, priv->regs, - DISP_REG_DSC_CON, DSC_UFOE_SEL); - mtk_ddp_write_mask(cmdq_pkt, DSC_DUAL_INOUT, &priv->cmdq_reg, priv->regs, - DISP_REG_DSC_CON, DSC_DUAL_INOUT); -} - -static void mtk_dsc_start(struct device *dev) -{ - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - - /* write with mask to reserve the value set in mtk_dsc_config */ - mtk_ddp_write_mask(NULL, DSC_EN, &priv->cmdq_reg, priv->regs, DISP_REG_DSC_CON, DSC_EN); -} - -static void mtk_dsc_stop(struct device *dev) -{ - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - - writel_relaxed(0x0, priv->regs + DISP_REG_DSC_CON); -} - -static void mtk_od_config(struct device *dev, unsigned int w, - unsigned int h, unsigned int vrefresh, - unsigned int bpc, struct cmdq_pkt *cmdq_pkt) -{ - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - - mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_OD_SIZE); - mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_REG_OD_CFG); - mtk_dither_set(dev, bpc, DISP_REG_OD_CFG, cmdq_pkt); -} - -static void mtk_od_start(struct device *dev) -{ - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - - writel(1, priv->regs + DISP_REG_OD_EN); -} - -static void mtk_postmask_config(struct device *dev, unsigned int w, - unsigned int h, unsigned int vrefresh, - unsigned int bpc, struct cmdq_pkt *cmdq_pkt) -{ - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - - mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, - DISP_REG_POSTMASK_SIZE); - mtk_ddp_write(cmdq_pkt, POSTMASK_RELAY_MODE, &priv->cmdq_reg, - priv->regs, DISP_REG_POSTMASK_CFG); -} - -static void mtk_postmask_start(struct device *dev) -{ - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - - writel(POSTMASK_EN, priv->regs + DISP_REG_POSTMASK_EN); -} - -static void mtk_postmask_stop(struct device *dev) -{ - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - - writel_relaxed(0x0, priv->regs + DISP_REG_POSTMASK_EN); -} - -static void mtk_ufoe_start(struct device *dev) -{ - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - - writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START); -} - -static const struct mtk_ddp_comp_funcs ddp_aal = { - .clk_enable = mtk_aal_clk_enable, - .clk_disable = mtk_aal_clk_disable, - .gamma_get_lut_size = mtk_aal_gamma_get_lut_size, - .gamma_set = mtk_aal_gamma_set, - .config = mtk_aal_config, - .start = mtk_aal_start, - .stop = mtk_aal_stop, -}; - -static const struct mtk_ddp_comp_funcs ddp_ccorr = { - .clk_enable = mtk_ccorr_clk_enable, - .clk_disable = mtk_ccorr_clk_disable, - .config = mtk_ccorr_config, - .start = mtk_ccorr_start, - .stop = mtk_ccorr_stop, - .ctm_set = mtk_ccorr_ctm_set, -}; - -static const struct mtk_ddp_comp_funcs ddp_color = { - .clk_enable = mtk_color_clk_enable, - .clk_disable = mtk_color_clk_disable, - .config = mtk_color_config, - .start = mtk_color_start, -}; - -static const struct mtk_ddp_comp_funcs ddp_dither = { - .clk_enable = mtk_ddp_clk_enable, - .clk_disable = mtk_ddp_clk_disable, - .config = mtk_dither_config, - .start = mtk_dither_start, - .stop = mtk_dither_stop, -}; - -static const struct mtk_ddp_comp_funcs ddp_dpi = { - .start = mtk_dpi_start, - .stop = mtk_dpi_stop, - .encoder_index = mtk_dpi_encoder_index, -}; - -static const struct mtk_ddp_comp_funcs ddp_dsc = { - .clk_enable = mtk_ddp_clk_enable, - .clk_disable = mtk_ddp_clk_disable, - .config = mtk_dsc_config, - .start = mtk_dsc_start, - .stop = mtk_dsc_stop, -}; - -static const struct mtk_ddp_comp_funcs ddp_dsi = { - .start = mtk_dsi_ddp_start, - .stop = mtk_dsi_ddp_stop, - .encoder_index = mtk_dsi_encoder_index, -}; - -static const struct mtk_ddp_comp_funcs ddp_gamma = { - .clk_enable = mtk_gamma_clk_enable, - .clk_disable = mtk_gamma_clk_disable, - .gamma_get_lut_size = mtk_gamma_get_lut_size, - .gamma_set = mtk_gamma_set, - .config = mtk_gamma_config, - .start = mtk_gamma_start, - .stop = mtk_gamma_stop, -}; - -static const struct mtk_ddp_comp_funcs ddp_merge = { - .clk_enable = mtk_merge_clk_enable, - .clk_disable = mtk_merge_clk_disable, - .start = mtk_merge_start, - .stop = mtk_merge_stop, - .config = mtk_merge_config, -}; - -static const struct mtk_ddp_comp_funcs ddp_od = { - .clk_enable = mtk_ddp_clk_enable, - .clk_disable = mtk_ddp_clk_disable, - .config = mtk_od_config, - .start = mtk_od_start, -}; - -static const struct mtk_ddp_comp_funcs ddp_ovl = { - .clk_enable = mtk_ovl_clk_enable, - .clk_disable = mtk_ovl_clk_disable, - .config = mtk_ovl_config, - .start = mtk_ovl_start, - .stop = mtk_ovl_stop, - .register_vblank_cb = mtk_ovl_register_vblank_cb, - .unregister_vblank_cb = mtk_ovl_unregister_vblank_cb, - .enable_vblank = mtk_ovl_enable_vblank, - .disable_vblank = mtk_ovl_disable_vblank, - .supported_rotations = mtk_ovl_supported_rotations, - .layer_nr = mtk_ovl_layer_nr, - .layer_check = mtk_ovl_layer_check, - .layer_config = mtk_ovl_layer_config, - .bgclr_in_on = mtk_ovl_bgclr_in_on, - .bgclr_in_off = mtk_ovl_bgclr_in_off, - .get_formats = mtk_ovl_get_formats, - .get_num_formats = mtk_ovl_get_num_formats, -}; - -static const struct mtk_ddp_comp_funcs ddp_postmask = { - .clk_enable = mtk_ddp_clk_enable, - .clk_disable = mtk_ddp_clk_disable, - .config = mtk_postmask_config, - .start = mtk_postmask_start, - .stop = mtk_postmask_stop, -}; - -static const struct mtk_ddp_comp_funcs ddp_rdma = { - .clk_enable = mtk_rdma_clk_enable, - .clk_disable = mtk_rdma_clk_disable, - .config = mtk_rdma_config, - .start = mtk_rdma_start, - .stop = mtk_rdma_stop, - .register_vblank_cb = mtk_rdma_register_vblank_cb, - .unregister_vblank_cb = mtk_rdma_unregister_vblank_cb, - .enable_vblank = mtk_rdma_enable_vblank, - .disable_vblank = mtk_rdma_disable_vblank, - .layer_nr = mtk_rdma_layer_nr, - .layer_config = mtk_rdma_layer_config, - .get_formats = mtk_rdma_get_formats, - .get_num_formats = mtk_rdma_get_num_formats, -}; - -static const struct mtk_ddp_comp_funcs ddp_ufoe = { - .clk_enable = mtk_ddp_clk_enable, - .clk_disable = mtk_ddp_clk_disable, - .start = mtk_ufoe_start, -}; - -static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = { - .power_on = mtk_ovl_adaptor_power_on, - .power_off = mtk_ovl_adaptor_power_off, - .clk_enable = mtk_ovl_adaptor_clk_enable, - .clk_disable = mtk_ovl_adaptor_clk_disable, - .config = mtk_ovl_adaptor_config, - .start = mtk_ovl_adaptor_start, - .stop = mtk_ovl_adaptor_stop, - .layer_nr = mtk_ovl_adaptor_layer_nr, - .layer_config = mtk_ovl_adaptor_layer_config, - .register_vblank_cb = mtk_ovl_adaptor_register_vblank_cb, - .unregister_vblank_cb = mtk_ovl_adaptor_unregister_vblank_cb, - .enable_vblank = mtk_ovl_adaptor_enable_vblank, - .disable_vblank = mtk_ovl_adaptor_disable_vblank, - .dma_dev_get = mtk_ovl_adaptor_dma_dev_get, - .connect = mtk_ovl_adaptor_connect, - .disconnect = mtk_ovl_adaptor_disconnect, - .add = mtk_ovl_adaptor_add_comp, - .remove = mtk_ovl_adaptor_remove_comp, - .get_formats = mtk_ovl_adaptor_get_formats, - .get_num_formats = mtk_ovl_adaptor_get_num_formats, - .mode_valid = mtk_ovl_adaptor_mode_valid, -}; - -static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = { - [MTK_DISP_AAL] = "aal", - [MTK_DISP_BLS] = "bls", - [MTK_DISP_CCORR] = "ccorr", - [MTK_DISP_COLOR] = "color", - [MTK_DISP_DITHER] = "dither", - [MTK_DISP_DSC] = "dsc", - [MTK_DISP_GAMMA] = "gamma", - [MTK_DISP_MERGE] = "merge", - [MTK_DISP_MUTEX] = "mutex", - [MTK_DISP_OD] = "od", - [MTK_DISP_OVL] = "ovl", - [MTK_DISP_OVL_2L] = "ovl-2l", - [MTK_DISP_OVL_ADAPTOR] = "ovl_adaptor", - [MTK_DISP_POSTMASK] = "postmask", - [MTK_DISP_PWM] = "pwm", - [MTK_DISP_RDMA] = "rdma", - [MTK_DISP_UFOE] = "ufoe", - [MTK_DISP_WDMA] = "wdma", - [MTK_DP_INTF] = "dp-intf", - [MTK_DPI] = "dpi", - [MTK_DSI] = "dsi", -}; - -struct mtk_ddp_comp_match { - enum mtk_ddp_comp_type type; - int alias_id; - const struct mtk_ddp_comp_funcs *funcs; -}; - -static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_DRM_ID_MAX] = { - [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal }, - [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal }, - [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, - [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr }, - [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color }, - [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color }, - [DDP_COMPONENT_DITHER0] = { MTK_DISP_DITHER, 0, &ddp_dither }, - [DDP_COMPONENT_DP_INTF0] = { MTK_DP_INTF, 0, &ddp_dpi }, - [DDP_COMPONENT_DP_INTF1] = { MTK_DP_INTF, 1, &ddp_dpi }, - [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi }, - [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi }, - [DDP_COMPONENT_DRM_OVL_ADAPTOR] = { MTK_DISP_OVL_ADAPTOR, 0, &ddp_ovl_adaptor }, - [DDP_COMPONENT_DSC0] = { MTK_DISP_DSC, 0, &ddp_dsc }, - [DDP_COMPONENT_DSC1] = { MTK_DISP_DSC, 1, &ddp_dsc }, - [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi }, - [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi }, - [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi }, - [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi }, - [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma }, - [DDP_COMPONENT_MERGE0] = { MTK_DISP_MERGE, 0, &ddp_merge }, - [DDP_COMPONENT_MERGE1] = { MTK_DISP_MERGE, 1, &ddp_merge }, - [DDP_COMPONENT_MERGE2] = { MTK_DISP_MERGE, 2, &ddp_merge }, - [DDP_COMPONENT_MERGE3] = { MTK_DISP_MERGE, 3, &ddp_merge }, - [DDP_COMPONENT_MERGE4] = { MTK_DISP_MERGE, 4, &ddp_merge }, - [DDP_COMPONENT_MERGE5] = { MTK_DISP_MERGE, 5, &ddp_merge }, - [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od }, - [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od }, - [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl }, - [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl }, - [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl }, - [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl }, - [DDP_COMPONENT_OVL_2L2] = { MTK_DISP_OVL_2L, 2, &ddp_ovl }, - [DDP_COMPONENT_POSTMASK0] = { MTK_DISP_POSTMASK, 0, &ddp_postmask }, - [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL }, - [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL }, - [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL }, - [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma }, - [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma }, - [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma }, - [DDP_COMPONENT_RDMA4] = { MTK_DISP_RDMA, 4, &ddp_rdma }, - [DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe }, - [DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL }, - [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, -}; - -static bool mtk_drm_find_comp_in_ddp(struct device *dev, - const unsigned int *path, - unsigned int path_len, - struct mtk_ddp_comp *ddp_comp) -{ - unsigned int i; - - if (path == NULL) - return false; - - for (i = 0U; i < path_len; i++) - if (dev == ddp_comp[path[i]].dev) - return true; - - return false; -} - -static unsigned int mtk_drm_find_comp_in_ddp_conn_path(struct device *dev, - const struct mtk_drm_route *routes, - unsigned int num_routes, - struct mtk_ddp_comp *ddp_comp) -{ - int ret; - unsigned int i; - - if (!routes) { - ret = -EINVAL; - goto err; - } - - for (i = 0; i < num_routes; i++) - if (dev == ddp_comp[routes[i].route_ddp].dev) - return BIT(routes[i].crtc_id); - - ret = -ENODEV; -err: - - DRM_INFO("Failed to find comp in ddp table, ret = %d\n", ret); - - return 0; -} - -int mtk_ddp_comp_get_id(struct device_node *node, - enum mtk_ddp_comp_type comp_type) -{ - int id = of_alias_get_id(node, mtk_ddp_comp_stem[comp_type]); - int i; - - for (i = 0; i < ARRAY_SIZE(mtk_ddp_matches); i++) { - if (comp_type == mtk_ddp_matches[i].type && - (id < 0 || id == mtk_ddp_matches[i].alias_id)) - return i; - } - - return -EINVAL; -} - -unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm, - struct device *dev) -{ - struct mtk_drm_private *private = drm->dev_private; - unsigned int ret = 0; - - if (mtk_drm_find_comp_in_ddp(dev, private->data->main_path, private->data->main_len, - private->ddp_comp)) - ret = BIT(0); - else if (mtk_drm_find_comp_in_ddp(dev, private->data->ext_path, - private->data->ext_len, private->ddp_comp)) - ret = BIT(1); - else if (mtk_drm_find_comp_in_ddp(dev, private->data->third_path, - private->data->third_len, private->ddp_comp)) - ret = BIT(2); - else - ret = mtk_drm_find_comp_in_ddp_conn_path(dev, - private->data->conn_routes, - private->data->num_conn_routes, - private->ddp_comp); - - return ret; -} - -int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp, - unsigned int comp_id) -{ - struct platform_device *comp_pdev; - enum mtk_ddp_comp_type type; - struct mtk_ddp_comp_dev *priv; -#if IS_REACHABLE(CONFIG_MTK_CMDQ) - int ret; -#endif - - if (comp_id < 0 || comp_id >= DDP_COMPONENT_DRM_ID_MAX) - return -EINVAL; - - type = mtk_ddp_matches[comp_id].type; - - comp->id = comp_id; - comp->funcs = mtk_ddp_matches[comp_id].funcs; - /* Not all drm components have a DTS device node, such as ovl_adaptor, - * which is the drm bring up sub driver - */ - if (!node) - return 0; - - comp_pdev = of_find_device_by_node(node); - if (!comp_pdev) { - DRM_INFO("Waiting for device %s\n", node->full_name); - return -EPROBE_DEFER; - } - comp->dev = &comp_pdev->dev; - - if (type == MTK_DISP_AAL || - type == MTK_DISP_BLS || - type == MTK_DISP_CCORR || - type == MTK_DISP_COLOR || - type == MTK_DISP_GAMMA || - type == MTK_DISP_MERGE || - type == MTK_DISP_OVL || - type == MTK_DISP_OVL_2L || - type == MTK_DISP_PWM || - type == MTK_DISP_RDMA || - type == MTK_DPI || - type == MTK_DP_INTF || - type == MTK_DSI) - return 0; - - priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - priv->regs = of_iomap(node, 0); - priv->clk = of_clk_get(node, 0); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); - -#if IS_REACHABLE(CONFIG_MTK_CMDQ) - ret = cmdq_dev_get_client_reg(comp->dev, &priv->cmdq_reg, 0); - if (ret) - dev_dbg(comp->dev, "get mediatek,gce-client-reg fail!\n"); -#endif - - platform_set_drvdata(comp_pdev, priv); - - return 0; -} diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h deleted file mode 100644 index 93d79a1366..0000000000 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ /dev/null @@ -1,343 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2015 MediaTek Inc. - */ - -#ifndef MTK_DRM_DDP_COMP_H -#define MTK_DRM_DDP_COMP_H - -#include -#include -#include -#include -#include - -#include - -struct device; -struct device_node; -struct drm_crtc; -struct drm_device; -struct mtk_plane_state; -struct drm_crtc_state; - -enum mtk_ddp_comp_type { - MTK_DISP_AAL, - MTK_DISP_BLS, - MTK_DISP_CCORR, - MTK_DISP_COLOR, - MTK_DISP_DITHER, - MTK_DISP_DSC, - MTK_DISP_GAMMA, - MTK_DISP_MERGE, - MTK_DISP_MUTEX, - MTK_DISP_OD, - MTK_DISP_OVL, - MTK_DISP_OVL_2L, - MTK_DISP_OVL_ADAPTOR, - MTK_DISP_POSTMASK, - MTK_DISP_PWM, - MTK_DISP_RDMA, - MTK_DISP_UFOE, - MTK_DISP_WDMA, - MTK_DPI, - MTK_DP_INTF, - MTK_DSI, - MTK_DDP_COMP_TYPE_MAX, -}; - -struct mtk_ddp_comp; -struct cmdq_pkt; -struct mtk_ddp_comp_funcs { - int (*power_on)(struct device *dev); - void (*power_off)(struct device *dev); - int (*clk_enable)(struct device *dev); - void (*clk_disable)(struct device *dev); - void (*config)(struct device *dev, unsigned int w, - unsigned int h, unsigned int vrefresh, - unsigned int bpc, struct cmdq_pkt *cmdq_pkt); - void (*start)(struct device *dev); - void (*stop)(struct device *dev); - void (*register_vblank_cb)(struct device *dev, - void (*vblank_cb)(void *), - void *vblank_cb_data); - void (*unregister_vblank_cb)(struct device *dev); - void (*enable_vblank)(struct device *dev); - void (*disable_vblank)(struct device *dev); - unsigned int (*supported_rotations)(struct device *dev); - unsigned int (*layer_nr)(struct device *dev); - int (*layer_check)(struct device *dev, - unsigned int idx, - struct mtk_plane_state *state); - void (*layer_config)(struct device *dev, unsigned int idx, - struct mtk_plane_state *state, - struct cmdq_pkt *cmdq_pkt); - unsigned int (*gamma_get_lut_size)(struct device *dev); - void (*gamma_set)(struct device *dev, - struct drm_crtc_state *state); - void (*bgclr_in_on)(struct device *dev); - void (*bgclr_in_off)(struct device *dev); - void (*ctm_set)(struct device *dev, - struct drm_crtc_state *state); - struct device * (*dma_dev_get)(struct device *dev); - const u32 *(*get_formats)(struct device *dev); - size_t (*get_num_formats)(struct device *dev); - void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next); - void (*disconnect)(struct device *dev, struct device *mmsys_dev, unsigned int next); - void (*add)(struct device *dev, struct mtk_mutex *mutex); - void (*remove)(struct device *dev, struct mtk_mutex *mutex); - unsigned int (*encoder_index)(struct device *dev); - enum drm_mode_status (*mode_valid)(struct device *dev, const struct drm_display_mode *mode); -}; - -struct mtk_ddp_comp { - struct device *dev; - int irq; - unsigned int id; - int encoder_index; - const struct mtk_ddp_comp_funcs *funcs; -}; - -static inline int mtk_ddp_comp_power_on(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->power_on) - return comp->funcs->power_on(comp->dev); - else - return pm_runtime_resume_and_get(comp->dev); - return 0; -} - -static inline void mtk_ddp_comp_power_off(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->power_off) - comp->funcs->power_off(comp->dev); - else - pm_runtime_put(comp->dev); -} - -static inline int mtk_ddp_comp_clk_enable(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->clk_enable) - return comp->funcs->clk_enable(comp->dev); - - return 0; -} - -static inline void mtk_ddp_comp_clk_disable(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->clk_disable) - comp->funcs->clk_disable(comp->dev); -} - -static inline -enum drm_mode_status mtk_ddp_comp_mode_valid(struct mtk_ddp_comp *comp, - const struct drm_display_mode *mode) -{ - if (comp && comp->funcs && comp->funcs->mode_valid) - return comp->funcs->mode_valid(comp->dev, mode); - return MODE_OK; -} - -static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp, - unsigned int w, unsigned int h, - unsigned int vrefresh, unsigned int bpc, - struct cmdq_pkt *cmdq_pkt) -{ - if (comp->funcs && comp->funcs->config) - comp->funcs->config(comp->dev, w, h, vrefresh, bpc, cmdq_pkt); -} - -static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->start) - comp->funcs->start(comp->dev); -} - -static inline void mtk_ddp_comp_stop(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->stop) - comp->funcs->stop(comp->dev); -} - -static inline void mtk_ddp_comp_register_vblank_cb(struct mtk_ddp_comp *comp, - void (*vblank_cb)(void *), - void *vblank_cb_data) -{ - if (comp->funcs && comp->funcs->register_vblank_cb) - comp->funcs->register_vblank_cb(comp->dev, vblank_cb, - vblank_cb_data); -} - -static inline void mtk_ddp_comp_unregister_vblank_cb(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->unregister_vblank_cb) - comp->funcs->unregister_vblank_cb(comp->dev); -} - -static inline void mtk_ddp_comp_enable_vblank(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->enable_vblank) - comp->funcs->enable_vblank(comp->dev); -} - -static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->disable_vblank) - comp->funcs->disable_vblank(comp->dev); -} - -static inline -unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->supported_rotations) - return comp->funcs->supported_rotations(comp->dev); - - return 0; -} - -static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->layer_nr) - return comp->funcs->layer_nr(comp->dev); - - return 0; -} - -static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp, - unsigned int idx, - struct mtk_plane_state *state) -{ - if (comp->funcs && comp->funcs->layer_check) - return comp->funcs->layer_check(comp->dev, idx, state); - return 0; -} - -static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp, - unsigned int idx, - struct mtk_plane_state *state, - struct cmdq_pkt *cmdq_pkt) -{ - if (comp->funcs && comp->funcs->layer_config) - comp->funcs->layer_config(comp->dev, idx, state, cmdq_pkt); -} - -static inline unsigned int mtk_ddp_gamma_get_lut_size(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->gamma_get_lut_size) - return comp->funcs->gamma_get_lut_size(comp->dev); - - return 0; -} - -static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp, - struct drm_crtc_state *state) -{ - if (comp->funcs && comp->funcs->gamma_set) - comp->funcs->gamma_set(comp->dev, state); -} - -static inline void mtk_ddp_comp_bgclr_in_on(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->bgclr_in_on) - comp->funcs->bgclr_in_on(comp->dev); -} - -static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->bgclr_in_off) - comp->funcs->bgclr_in_off(comp->dev); -} - -static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp, - struct drm_crtc_state *state) -{ - if (comp->funcs && comp->funcs->ctm_set) - comp->funcs->ctm_set(comp->dev, state); -} - -static inline struct device *mtk_ddp_comp_dma_dev_get(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->dma_dev_get) - return comp->funcs->dma_dev_get(comp->dev); - return comp->dev; -} - -static inline -const u32 *mtk_ddp_comp_get_formats(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->get_formats) - return comp->funcs->get_formats(comp->dev); - - return NULL; -} - -static inline -size_t mtk_ddp_comp_get_num_formats(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->get_num_formats) - return comp->funcs->get_num_formats(comp->dev); - - return 0; -} - -static inline bool mtk_ddp_comp_add(struct mtk_ddp_comp *comp, struct mtk_mutex *mutex) -{ - if (comp->funcs && comp->funcs->add) { - comp->funcs->add(comp->dev, mutex); - return true; - } - return false; -} - -static inline bool mtk_ddp_comp_remove(struct mtk_ddp_comp *comp, struct mtk_mutex *mutex) -{ - if (comp->funcs && comp->funcs->remove) { - comp->funcs->remove(comp->dev, mutex); - return true; - } - return false; -} - -static inline bool mtk_ddp_comp_connect(struct mtk_ddp_comp *comp, struct device *mmsys_dev, - unsigned int next) -{ - if (comp->funcs && comp->funcs->connect) { - comp->funcs->connect(comp->dev, mmsys_dev, next); - return true; - } - return false; -} - -static inline bool mtk_ddp_comp_disconnect(struct mtk_ddp_comp *comp, struct device *mmsys_dev, - unsigned int next) -{ - if (comp->funcs && comp->funcs->disconnect) { - comp->funcs->disconnect(comp->dev, mmsys_dev, next); - return true; - } - return false; -} - -static inline void mtk_ddp_comp_encoder_index_set(struct mtk_ddp_comp *comp) -{ - if (comp->funcs && comp->funcs->encoder_index) - comp->encoder_index = (int)comp->funcs->encoder_index(comp->dev); -} - -int mtk_ddp_comp_get_id(struct device_node *node, - enum mtk_ddp_comp_type comp_type); -unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm, - struct device *dev); -int mtk_ddp_comp_init(struct device_node *comp_node, struct mtk_ddp_comp *comp, - unsigned int comp_id); -enum mtk_ddp_comp_type mtk_ddp_comp_get_type(unsigned int comp_id); -void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, - struct cmdq_client_reg *cmdq_reg, void __iomem *regs, - unsigned int offset); -void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value, - struct cmdq_client_reg *cmdq_reg, void __iomem *regs, - unsigned int offset); -void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value, - struct cmdq_client_reg *cmdq_reg, void __iomem *regs, - unsigned int offset, unsigned int mask); -#endif /* MTK_DRM_DDP_COMP_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 0b570e1940..56f409ad7f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -24,10 +24,10 @@ #include #include -#include "mtk_drm_crtc.h" -#include "mtk_drm_ddp_comp.h" +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" #include "mtk_drm_drv.h" -#include "mtk_drm_gem.h" +#include "mtk_gem.h" #define DRIVER_NAME "mediatek" #define DRIVER_DESC "Mediatek SoC DRM" @@ -294,6 +294,9 @@ static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = { .conn_routes = mt8188_mtk_ddp_main_routes, .num_conn_routes = ARRAY_SIZE(mt8188_mtk_ddp_main_routes), .mmsys_dev_num = 2, + .max_width = 8191, + .min_width = 1, + .min_height = 1, }; static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = { @@ -308,6 +311,9 @@ static const struct mtk_mmsys_driver_data mt8195_vdosys0_driver_data = { .main_path = mt8195_mtk_ddp_main, .main_len = ARRAY_SIZE(mt8195_mtk_ddp_main), .mmsys_dev_num = 2, + .max_width = 8191, + .min_width = 1, + .min_height = 1, }; static const struct mtk_mmsys_driver_data mt8195_vdosys1_driver_data = { @@ -315,6 +321,9 @@ static const struct mtk_mmsys_driver_data mt8195_vdosys1_driver_data = { .ext_len = ARRAY_SIZE(mt8195_mtk_ddp_ext), .mmsys_id = 1, .mmsys_dev_num = 2, + .max_width = 8191, + .min_width = 2, /* 2-pixel align when ethdr is bypassed */ + .min_height = 1, }; static const struct of_device_id mtk_drm_of_ids[] = { @@ -493,25 +502,34 @@ static int mtk_drm_kms_init(struct drm_device *drm) for (j = 0; j < private->data->mmsys_dev_num; j++) { priv_n = private->all_drm_private[j]; + if (priv_n->data->max_width) + drm->mode_config.max_width = priv_n->data->max_width; + + if (priv_n->data->min_width) + drm->mode_config.min_width = priv_n->data->min_width; + + if (priv_n->data->min_height) + drm->mode_config.min_height = priv_n->data->min_height; + if (i == CRTC_MAIN && priv_n->data->main_len) { - ret = mtk_drm_crtc_create(drm, priv_n->data->main_path, - priv_n->data->main_len, j, - priv_n->data->conn_routes, - priv_n->data->num_conn_routes); + ret = mtk_crtc_create(drm, priv_n->data->main_path, + priv_n->data->main_len, j, + priv_n->data->conn_routes, + priv_n->data->num_conn_routes); if (ret) goto err_component_unbind; continue; } else if (i == CRTC_EXT && priv_n->data->ext_len) { - ret = mtk_drm_crtc_create(drm, priv_n->data->ext_path, - priv_n->data->ext_len, j, NULL, 0); + ret = mtk_crtc_create(drm, priv_n->data->ext_path, + priv_n->data->ext_len, j, NULL, 0); if (ret) goto err_component_unbind; continue; } else if (i == CRTC_THIRD && priv_n->data->third_len) { - ret = mtk_drm_crtc_create(drm, priv_n->data->third_path, - priv_n->data->third_len, j, NULL, 0); + ret = mtk_crtc_create(drm, priv_n->data->third_path, + priv_n->data->third_len, j, NULL, 0); if (ret) goto err_component_unbind; @@ -520,10 +538,14 @@ static int mtk_drm_kms_init(struct drm_device *drm) } } + /* IGT will check if the cursor size is configured */ + drm->mode_config.cursor_width = drm->mode_config.max_width; + drm->mode_config.cursor_height = drm->mode_config.max_height; + /* Use OVL device for all DMA memory allocations */ crtc = drm_crtc_from_index(drm, 0); if (crtc) - dma_dev = mtk_drm_crtc_dma_dev_get(crtc); + dma_dev = mtk_crtc_dma_dev_get(crtc); if (!dma_dev) { ret = -ENODEV; dev_err(drm->dev, "Need at least one OVL device\n"); @@ -576,8 +598,8 @@ DEFINE_DRM_GEM_FOPS(mtk_drm_fops); * We need to override this because the device used to import the memory is * not dev->dev, as drm_gem_prime_import() expects. */ -static struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) +static struct drm_gem_object *mtk_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) { struct mtk_drm_private *private = dev->dev_private; @@ -587,9 +609,9 @@ static struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev, static const struct drm_driver mtk_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, - .dumb_create = mtk_drm_gem_dumb_create, + .dumb_create = mtk_gem_dumb_create, - .gem_prime_import = mtk_drm_gem_prime_import, + .gem_prime_import = mtk_gem_prime_import, .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, .fops = &mtk_drm_fops, @@ -709,6 +731,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = { .data = (void *)MTK_DISP_GAMMA, }, { .compatible = "mediatek,mt8183-disp-gamma", .data = (void *)MTK_DISP_GAMMA, }, + { .compatible = "mediatek,mt8195-disp-gamma", + .data = (void *)MTK_DISP_GAMMA, }, { .compatible = "mediatek,mt8195-disp-merge", .data = (void *)MTK_DISP_MERGE }, { .compatible = "mediatek,mt2701-disp-mutex", @@ -741,6 +765,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = { .data = (void *)MTK_DISP_OVL }, { .compatible = "mediatek,mt8192-disp-ovl", .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8195-disp-ovl", + .data = (void *)MTK_DISP_OVL }, { .compatible = "mediatek,mt8183-disp-ovl-2l", .data = (void *)MTK_DISP_OVL_2L }, { .compatible = "mediatek,mt8192-disp-ovl-2l", diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index 33fadb08dc..ce897984de 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -7,13 +7,13 @@ #define MTK_DRM_DRV_H #include -#include "mtk_drm_ddp_comp.h" +#include "mtk_ddp_comp.h" #define MAX_CONNECTOR 2 #define DDP_COMPONENT_DRM_OVL_ADAPTOR (DDP_COMPONENT_ID_MAX + 1) #define DDP_COMPONENT_DRM_ID_MAX (DDP_COMPONENT_DRM_OVL_ADAPTOR + 1) -enum mtk_drm_crtc_path { +enum mtk_crtc_path { CRTC_MAIN, CRTC_EXT, CRTC_THIRD, @@ -46,6 +46,10 @@ struct mtk_mmsys_driver_data { bool shadow_register; unsigned int mmsys_id; unsigned int mmsys_dev_num; + + u16 max_width; + u16 min_width; + u16 min_height; }; struct mtk_drm_private { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c deleted file mode 100644 index 1bf229615b..0000000000 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ /dev/null @@ -1,288 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2015 MediaTek Inc. - */ - -#include - -#include -#include -#include -#include -#include - -#include "mtk_drm_drv.h" -#include "mtk_drm_gem.h" - -static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); - -static const struct vm_operations_struct vm_ops = { - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - -static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = { - .free = mtk_drm_gem_free_object, - .get_sg_table = mtk_gem_prime_get_sg_table, - .vmap = mtk_drm_gem_prime_vmap, - .vunmap = mtk_drm_gem_prime_vunmap, - .mmap = mtk_drm_gem_object_mmap, - .vm_ops = &vm_ops, -}; - -static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev, - unsigned long size) -{ - struct mtk_drm_gem_obj *mtk_gem_obj; - int ret; - - size = round_up(size, PAGE_SIZE); - - if (size == 0) - return ERR_PTR(-EINVAL); - - mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL); - if (!mtk_gem_obj) - return ERR_PTR(-ENOMEM); - - mtk_gem_obj->base.funcs = &mtk_drm_gem_object_funcs; - - ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size); - if (ret < 0) { - DRM_ERROR("failed to initialize gem object\n"); - kfree(mtk_gem_obj); - return ERR_PTR(ret); - } - - return mtk_gem_obj; -} - -struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev, - size_t size, bool alloc_kmap) -{ - struct mtk_drm_private *priv = dev->dev_private; - struct mtk_drm_gem_obj *mtk_gem; - struct drm_gem_object *obj; - int ret; - - mtk_gem = mtk_drm_gem_init(dev, size); - if (IS_ERR(mtk_gem)) - return ERR_CAST(mtk_gem); - - obj = &mtk_gem->base; - - mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE; - - if (!alloc_kmap) - mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; - - mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size, - &mtk_gem->dma_addr, GFP_KERNEL, - mtk_gem->dma_attrs); - if (!mtk_gem->cookie) { - DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size); - ret = -ENOMEM; - goto err_gem_free; - } - - if (alloc_kmap) - mtk_gem->kvaddr = mtk_gem->cookie; - - DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n", - mtk_gem->cookie, &mtk_gem->dma_addr, - size); - - return mtk_gem; - -err_gem_free: - drm_gem_object_release(obj); - kfree(mtk_gem); - return ERR_PTR(ret); -} - -void mtk_drm_gem_free_object(struct drm_gem_object *obj) -{ - struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); - struct mtk_drm_private *priv = obj->dev->dev_private; - - if (mtk_gem->sg) - drm_prime_gem_destroy(obj, mtk_gem->sg); - else - dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie, - mtk_gem->dma_addr, mtk_gem->dma_attrs); - - /* release file pointer to gem object. */ - drm_gem_object_release(obj); - - kfree(mtk_gem); -} - -int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, - struct drm_mode_create_dumb *args) -{ - struct mtk_drm_gem_obj *mtk_gem; - int ret; - - args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); - - /* - * Multiply 2 variables of different types, - * for example: args->size = args->spacing * args->height; - * may cause coverity issue with unintentional overflow. - */ - args->size = args->pitch; - args->size *= args->height; - - mtk_gem = mtk_drm_gem_create(dev, args->size, false); - if (IS_ERR(mtk_gem)) - return PTR_ERR(mtk_gem); - - /* - * allocate a id of idr table where the obj is registered - * and handle has the id what user can see. - */ - ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle); - if (ret) - goto err_handle_create; - - /* drop reference from allocate - handle holds it now. */ - drm_gem_object_put(&mtk_gem->base); - - return 0; - -err_handle_create: - mtk_drm_gem_free_object(&mtk_gem->base); - return ret; -} - -static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma) - -{ - int ret; - struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); - struct mtk_drm_private *priv = obj->dev->dev_private; - - /* - * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the - * whole buffer from the start. - */ - vma->vm_pgoff = 0; - - /* - * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear - * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). - */ - vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); - vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); - - ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie, - mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs); - - return ret; -} - -/* - * Allocate a sg_table for this GEM object. - * Note: Both the table's contents, and the sg_table itself must be freed by - * the caller. - * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. - */ -struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj) -{ - struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); - struct mtk_drm_private *priv = obj->dev->dev_private; - struct sg_table *sgt; - int ret; - - sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); - if (!sgt) - return ERR_PTR(-ENOMEM); - - ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie, - mtk_gem->dma_addr, obj->size, - mtk_gem->dma_attrs); - if (ret) { - DRM_ERROR("failed to allocate sgt, %d\n", ret); - kfree(sgt); - return ERR_PTR(ret); - } - - return sgt; -} - -struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, - struct dma_buf_attachment *attach, struct sg_table *sg) -{ - struct mtk_drm_gem_obj *mtk_gem; - - /* check if the entries in the sg_table are contiguous */ - if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { - DRM_ERROR("sg_table is not contiguous"); - return ERR_PTR(-EINVAL); - } - - mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size); - if (IS_ERR(mtk_gem)) - return ERR_CAST(mtk_gem); - - mtk_gem->dma_addr = sg_dma_address(sg->sgl); - mtk_gem->sg = sg; - - return &mtk_gem->base; -} - -int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) -{ - struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); - struct sg_table *sgt = NULL; - unsigned int npages; - - if (mtk_gem->kvaddr) - goto out; - - sgt = mtk_gem_prime_get_sg_table(obj); - if (IS_ERR(sgt)) - return PTR_ERR(sgt); - - npages = obj->size >> PAGE_SHIFT; - mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL); - if (!mtk_gem->pages) { - sg_free_table(sgt); - kfree(sgt); - return -ENOMEM; - } - - drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages); - - mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, - pgprot_writecombine(PAGE_KERNEL)); - if (!mtk_gem->kvaddr) { - sg_free_table(sgt); - kfree(sgt); - kfree(mtk_gem->pages); - return -ENOMEM; - } - sg_free_table(sgt); - kfree(sgt); - -out: - iosys_map_set_vaddr(map, mtk_gem->kvaddr); - - return 0; -} - -void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, - struct iosys_map *map) -{ - struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); - void *vaddr = map->vaddr; - - if (!mtk_gem->pages) - return; - - vunmap(vaddr); - mtk_gem->kvaddr = NULL; - kfree(mtk_gem->pages); -} diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h deleted file mode 100644 index 78f23b07a0..0000000000 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h +++ /dev/null @@ -1,49 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2015 MediaTek Inc. - */ - -#ifndef _MTK_DRM_GEM_H_ -#define _MTK_DRM_GEM_H_ - -#include - -/* - * mtk drm buffer structure. - * - * @base: a gem object. - * - a new handle to this gem object would be created - * by drm_gem_handle_create(). - * @cookie: the return value of dma_alloc_attrs(), keep it for dma_free_attrs() - * @kvaddr: kernel virtual address of gem buffer. - * @dma_addr: dma address of gem buffer. - * @dma_attrs: dma attributes of gem buffer. - * - * P.S. this object would be transferred to user as kms_bo.handle so - * user can access the buffer through kms_bo.handle. - */ -struct mtk_drm_gem_obj { - struct drm_gem_object base; - void *cookie; - void *kvaddr; - dma_addr_t dma_addr; - unsigned long dma_attrs; - struct sg_table *sg; - struct page **pages; -}; - -#define to_mtk_gem_obj(x) container_of(x, struct mtk_drm_gem_obj, base) - -void mtk_drm_gem_free_object(struct drm_gem_object *gem); -struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev, size_t size, - bool alloc_kmap); -int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, - struct drm_mode_create_dumb *args); -struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj); -struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, - struct dma_buf_attachment *attach, struct sg_table *sg); -int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); -void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, - struct iosys_map *map); - -#endif diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c deleted file mode 100644 index ddc9355b06..0000000000 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ /dev/null @@ -1,350 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2015 MediaTek Inc. - * Author: CK Hu - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "mtk_drm_crtc.h" -#include "mtk_drm_ddp_comp.h" -#include "mtk_drm_drv.h" -#include "mtk_drm_gem.h" -#include "mtk_drm_plane.h" - -static const u64 modifiers[] = { - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | - AFBC_FORMAT_MOD_SPLIT | - AFBC_FORMAT_MOD_SPARSE), - DRM_FORMAT_MOD_INVALID, -}; - -static void mtk_plane_reset(struct drm_plane *plane) -{ - struct mtk_plane_state *state; - - if (plane->state) { - __drm_atomic_helper_plane_destroy_state(plane->state); - - state = to_mtk_plane_state(plane->state); - memset(state, 0, sizeof(*state)); - } else { - state = kzalloc(sizeof(*state), GFP_KERNEL); - if (!state) - return; - } - - __drm_atomic_helper_plane_reset(plane, &state->base); - - state->base.plane = plane; - state->pending.format = DRM_FORMAT_RGB565; - state->pending.modifier = DRM_FORMAT_MOD_LINEAR; -} - -static struct drm_plane_state *mtk_plane_duplicate_state(struct drm_plane *plane) -{ - struct mtk_plane_state *old_state = to_mtk_plane_state(plane->state); - struct mtk_plane_state *state; - - state = kmalloc(sizeof(*state), GFP_KERNEL); - if (!state) - return NULL; - - __drm_atomic_helper_plane_duplicate_state(plane, &state->base); - - WARN_ON(state->base.plane != plane); - - state->pending = old_state->pending; - - return &state->base; -} - -static bool mtk_plane_format_mod_supported(struct drm_plane *plane, - uint32_t format, - uint64_t modifier) -{ - if (modifier == DRM_FORMAT_MOD_LINEAR) - return true; - - if (modifier != DRM_FORMAT_MOD_ARM_AFBC( - AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | - AFBC_FORMAT_MOD_SPLIT | - AFBC_FORMAT_MOD_SPARSE)) - return false; - - if (format != DRM_FORMAT_XRGB8888 && - format != DRM_FORMAT_ARGB8888 && - format != DRM_FORMAT_BGRX8888 && - format != DRM_FORMAT_BGRA8888 && - format != DRM_FORMAT_ABGR8888 && - format != DRM_FORMAT_XBGR8888 && - format != DRM_FORMAT_RGB888 && - format != DRM_FORMAT_BGR888) - return false; - - return true; -} - -static void mtk_drm_plane_destroy_state(struct drm_plane *plane, - struct drm_plane_state *state) -{ - __drm_atomic_helper_plane_destroy_state(state); - kfree(to_mtk_plane_state(state)); -} - -static int mtk_plane_atomic_async_check(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, - plane); - struct drm_crtc_state *crtc_state; - int ret; - - if (plane != new_plane_state->crtc->cursor) - return -EINVAL; - - if (!plane->state) - return -EINVAL; - - if (!plane->state->fb) - return -EINVAL; - - ret = mtk_drm_crtc_plane_check(new_plane_state->crtc, plane, - to_mtk_plane_state(new_plane_state)); - if (ret) - return ret; - - crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc); - - return drm_atomic_helper_check_plane_state(plane->state, crtc_state, - DRM_PLANE_NO_SCALING, - DRM_PLANE_NO_SCALING, - true, true); -} - -static void mtk_plane_update_new_state(struct drm_plane_state *new_state, - struct mtk_plane_state *mtk_plane_state) -{ - struct drm_framebuffer *fb = new_state->fb; - struct drm_gem_object *gem; - struct mtk_drm_gem_obj *mtk_gem; - unsigned int pitch, format; - u64 modifier; - dma_addr_t addr; - dma_addr_t hdr_addr = 0; - unsigned int hdr_pitch = 0; - int offset; - - gem = fb->obj[0]; - mtk_gem = to_mtk_gem_obj(gem); - addr = mtk_gem->dma_addr; - pitch = fb->pitches[0]; - format = fb->format->format; - modifier = fb->modifier; - - if (modifier == DRM_FORMAT_MOD_LINEAR) { - /* - * Using dma_addr_t variable to calculate with multiplier of different types, - * for example: addr += (new_state->src.x1 >> 16) * fb->format->cpp[0]; - * may cause coverity issue with unintentional overflow. - */ - offset = (new_state->src.x1 >> 16) * fb->format->cpp[0]; - addr += offset; - offset = (new_state->src.y1 >> 16) * pitch; - addr += offset; - } else { - int width_in_blocks = ALIGN(fb->width, AFBC_DATA_BLOCK_WIDTH) - / AFBC_DATA_BLOCK_WIDTH; - int height_in_blocks = ALIGN(fb->height, AFBC_DATA_BLOCK_HEIGHT) - / AFBC_DATA_BLOCK_HEIGHT; - int x_offset_in_blocks = (new_state->src.x1 >> 16) / AFBC_DATA_BLOCK_WIDTH; - int y_offset_in_blocks = (new_state->src.y1 >> 16) / AFBC_DATA_BLOCK_HEIGHT; - int hdr_size, hdr_offset; - - hdr_pitch = width_in_blocks * AFBC_HEADER_BLOCK_SIZE; - pitch = width_in_blocks * AFBC_DATA_BLOCK_WIDTH * - AFBC_DATA_BLOCK_HEIGHT * fb->format->cpp[0]; - - hdr_size = ALIGN(hdr_pitch * height_in_blocks, AFBC_HEADER_ALIGNMENT); - hdr_offset = hdr_pitch * y_offset_in_blocks + - AFBC_HEADER_BLOCK_SIZE * x_offset_in_blocks; - - /* - * Using dma_addr_t variable to calculate with multiplier of different types, - * for example: addr += hdr_pitch * y_offset_in_blocks; - * may cause coverity issue with unintentional overflow. - */ - hdr_addr = addr + hdr_offset; - - /* The data plane is offset by 1 additional block. */ - offset = pitch * y_offset_in_blocks + - AFBC_DATA_BLOCK_WIDTH * AFBC_DATA_BLOCK_HEIGHT * - fb->format->cpp[0] * (x_offset_in_blocks + 1); - - /* - * Using dma_addr_t variable to calculate with multiplier of different types, - * for example: addr += pitch * y_offset_in_blocks; - * may cause coverity issue with unintentional overflow. - */ - addr = addr + hdr_size + offset; - } - - mtk_plane_state->pending.enable = true; - mtk_plane_state->pending.pitch = pitch; - mtk_plane_state->pending.hdr_pitch = hdr_pitch; - mtk_plane_state->pending.format = format; - mtk_plane_state->pending.modifier = modifier; - mtk_plane_state->pending.addr = addr; - mtk_plane_state->pending.hdr_addr = hdr_addr; - mtk_plane_state->pending.x = new_state->dst.x1; - mtk_plane_state->pending.y = new_state->dst.y1; - mtk_plane_state->pending.width = drm_rect_width(&new_state->dst); - mtk_plane_state->pending.height = drm_rect_height(&new_state->dst); - mtk_plane_state->pending.rotation = new_state->rotation; - mtk_plane_state->pending.color_encoding = new_state->color_encoding; -} - -static void mtk_plane_atomic_async_update(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, - plane); - struct mtk_plane_state *new_plane_state = to_mtk_plane_state(plane->state); - - plane->state->crtc_x = new_state->crtc_x; - plane->state->crtc_y = new_state->crtc_y; - plane->state->crtc_h = new_state->crtc_h; - plane->state->crtc_w = new_state->crtc_w; - plane->state->src_x = new_state->src_x; - plane->state->src_y = new_state->src_y; - plane->state->src_h = new_state->src_h; - plane->state->src_w = new_state->src_w; - - mtk_plane_update_new_state(new_state, new_plane_state); - swap(plane->state->fb, new_state->fb); - wmb(); /* Make sure the above parameters are set before update */ - new_plane_state->pending.async_dirty = true; - mtk_drm_crtc_async_update(new_state->crtc, plane, state); -} - -static const struct drm_plane_funcs mtk_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = drm_plane_cleanup, - .reset = mtk_plane_reset, - .atomic_duplicate_state = mtk_plane_duplicate_state, - .atomic_destroy_state = mtk_drm_plane_destroy_state, - .format_mod_supported = mtk_plane_format_mod_supported, -}; - -static int mtk_plane_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, - plane); - struct drm_framebuffer *fb = new_plane_state->fb; - struct drm_crtc_state *crtc_state; - int ret; - - if (!fb) - return 0; - - if (WARN_ON(!new_plane_state->crtc)) - return 0; - - ret = mtk_drm_crtc_plane_check(new_plane_state->crtc, plane, - to_mtk_plane_state(new_plane_state)); - if (ret) - return ret; - - crtc_state = drm_atomic_get_crtc_state(state, - new_plane_state->crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - - return drm_atomic_helper_check_plane_state(new_plane_state, - crtc_state, - DRM_PLANE_NO_SCALING, - DRM_PLANE_NO_SCALING, - true, true); -} - -static void mtk_plane_atomic_disable(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, - plane); - struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state); - mtk_plane_state->pending.enable = false; - wmb(); /* Make sure the above parameter is set before update */ - mtk_plane_state->pending.dirty = true; -} - -static void mtk_plane_atomic_update(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, - plane); - struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state); - - if (!new_state->crtc || WARN_ON(!new_state->fb)) - return; - - if (!new_state->visible) { - mtk_plane_atomic_disable(plane, state); - return; - } - - mtk_plane_update_new_state(new_state, mtk_plane_state); - wmb(); /* Make sure the above parameters are set before update */ - mtk_plane_state->pending.dirty = true; -} - -static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { - .atomic_check = mtk_plane_atomic_check, - .atomic_update = mtk_plane_atomic_update, - .atomic_disable = mtk_plane_atomic_disable, - .atomic_async_update = mtk_plane_atomic_async_update, - .atomic_async_check = mtk_plane_atomic_async_check, -}; - -int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, - unsigned long possible_crtcs, enum drm_plane_type type, - unsigned int supported_rotations, const u32 *formats, - size_t num_formats) -{ - int err; - - if (!formats || !num_formats) { - DRM_ERROR("no formats for plane\n"); - return -EINVAL; - } - - err = drm_universal_plane_init(dev, plane, possible_crtcs, - &mtk_plane_funcs, formats, - num_formats, modifiers, type, NULL); - if (err) { - DRM_ERROR("failed to initialize plane\n"); - return err; - } - - if (supported_rotations & ~DRM_MODE_ROTATE_0) { - err = drm_plane_create_rotation_property(plane, - DRM_MODE_ROTATE_0, - supported_rotations); - if (err) - DRM_INFO("Create rotation property failed\n"); - } - - drm_plane_helper_add(plane, &mtk_plane_helper_funcs); - - return 0; -} diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h deleted file mode 100644 index 99aff7da08..0000000000 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h +++ /dev/null @@ -1,54 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2015 MediaTek Inc. - * Author: CK Hu - */ - -#ifndef _MTK_DRM_PLANE_H_ -#define _MTK_DRM_PLANE_H_ - -#include -#include - -#define AFBC_DATA_BLOCK_WIDTH 32 -#define AFBC_DATA_BLOCK_HEIGHT 8 -#define AFBC_HEADER_BLOCK_SIZE 16 -#define AFBC_HEADER_ALIGNMENT 1024 - -struct mtk_plane_pending_state { - bool config; - bool enable; - dma_addr_t addr; - dma_addr_t hdr_addr; - unsigned int pitch; - unsigned int hdr_pitch; - unsigned int format; - unsigned long long modifier; - unsigned int x; - unsigned int y; - unsigned int width; - unsigned int height; - unsigned int rotation; - bool dirty; - bool async_dirty; - bool async_config; - enum drm_color_encoding color_encoding; -}; - -struct mtk_plane_state { - struct drm_plane_state base; - struct mtk_plane_pending_state pending; -}; - -static inline struct mtk_plane_state * -to_mtk_plane_state(struct drm_plane_state *state) -{ - return container_of(state, struct mtk_plane_state, base); -} - -int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, - unsigned long possible_crtcs, enum drm_plane_type type, - unsigned int supported_rotations, const u32 *formats, - size_t num_formats); - -#endif diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 9501f40191..b6e3c011a1 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -28,8 +28,8 @@ #include #include +#include "mtk_ddp_comp.h" #include "mtk_disp_drv.h" -#include "mtk_drm_ddp_comp.h" #include "mtk_drm_drv.h" #define DSI_START 0x00 @@ -242,22 +242,23 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi) u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, HZ_PER_MHZ); struct mtk_phy_timing *timing = &dsi->phy_timing; - timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1; - timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000; - timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 - + timing->lpx = (80 * data_rate_mhz / (8 * 1000)) + 1; + timing->da_hs_prepare = (59 * data_rate_mhz + 4 * 1000) / 8000 + 1; + timing->da_hs_zero = (163 * data_rate_mhz + 11 * 1000) / 8000 + 1 - timing->da_hs_prepare; - timing->da_hs_trail = timing->da_hs_prepare + 1; + timing->da_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1; - timing->ta_go = 4 * timing->lpx - 2; - timing->ta_sure = timing->lpx + 2; - timing->ta_get = 4 * timing->lpx; - timing->da_hs_exit = 2 * timing->lpx + 1; + timing->ta_go = 4 * timing->lpx; + timing->ta_sure = 3 * timing->lpx / 2; + timing->ta_get = 5 * timing->lpx; + timing->da_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1; - timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000); - timing->clk_hs_post = timing->clk_hs_prepare + 8; - timing->clk_hs_trail = timing->clk_hs_prepare; - timing->clk_hs_zero = timing->clk_hs_trail * 4; - timing->clk_hs_exit = 2 * timing->clk_hs_trail; + timing->clk_hs_prepare = (57 * data_rate_mhz / (8 * 1000)) + 1; + timing->clk_hs_post = (65 * data_rate_mhz + 53 * 1000) / 8000 + 1; + timing->clk_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1; + timing->clk_hs_zero = (330 * data_rate_mhz / (8 * 1000)) + 1 - + timing->clk_hs_prepare; + timing->clk_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1; timcon0 = FIELD_PREP(LPX, timing->lpx) | FIELD_PREP(HS_PREP, timing->da_hs_prepare) | @@ -662,7 +663,7 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi) /* * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since - * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(), + * mtk_dsi_stop() should be called after mtk_crtc_atomic_disable(), * which needs irq for vblank, and mtk_dsi_stop() will disable irq. * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), * after dsi is fully set. @@ -836,7 +837,10 @@ static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi) return ret; } - dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev); + ret = mtk_find_possible_crtcs(drm, dsi->host.dev); + if (ret < 0) + goto err_cleanup_encoder; + dsi->encoder.possible_crtcs = ret; ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c index 6a5d0c345a..bf5826b7e7 100644 --- a/drivers/gpu/drm/mediatek/mtk_ethdr.c +++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c @@ -14,8 +14,8 @@ #include #include -#include "mtk_drm_crtc.h" -#include "mtk_drm_ddp_comp.h" +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" #include "mtk_drm_drv.h" #include "mtk_ethdr.h" @@ -50,7 +50,6 @@ #define MIXER_INX_MODE_BYPASS 0 #define MIXER_INX_MODE_EVEN_EXTEND 1 -#define DEFAULT_9BIT_ALPHA 0x100 #define MIXER_ALPHA_AEN BIT(8) #define MIXER_ALPHA 0xff #define ETHDR_CLK_NUM 13 @@ -154,13 +153,19 @@ void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, unsigned int offset = (pending->x & 1) << 31 | pending->y << 16 | pending->x; unsigned int align_width = ALIGN_DOWN(pending->width, 2); unsigned int alpha_con = 0; + bool replace_src_a = false; dev_dbg(dev, "%s+ idx:%d", __func__, idx); if (idx >= 4) return; - if (!pending->enable) { + if (!pending->enable || !pending->width || !pending->height) { + /* + * instead of disabling layer with MIX_SRC_CON directly + * set the size to 0 to avoid screen shift due to mixer + * mode switch (hardware behavior) + */ mtk_ddp_write(cmdq_pkt, 0, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_SIZE(idx)); return; } @@ -168,8 +173,16 @@ void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, if (state->base.fb && state->base.fb->format->has_alpha) alpha_con = MIXER_ALPHA_AEN | MIXER_ALPHA; - mtk_mmsys_mixer_in_config(priv->mmsys_dev, idx + 1, alpha_con ? false : true, - DEFAULT_9BIT_ALPHA, + if (state->base.fb && !state->base.fb->format->has_alpha) { + /* + * Mixer doesn't support CONST_BLD mode, + * use a trick to make the output equivalent + */ + replace_src_a = true; + } + + mtk_mmsys_mixer_in_config(priv->mmsys_dev, idx + 1, replace_src_a, + MIXER_ALPHA, pending->x & 1 ? MIXER_INX_MODE_EVEN_EXTEND : MIXER_INX_MODE_BYPASS, align_width / 2 - 1, cmdq_pkt); @@ -363,7 +376,6 @@ struct platform_driver mtk_ethdr_driver = { .remove_new = mtk_ethdr_remove, .driver = { .name = "mediatek-disp-ethdr", - .owner = THIS_MODULE, .of_match_table = mtk_ethdr_driver_dt_match, }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_gem.c b/drivers/gpu/drm/mediatek/mtk_gem.c new file mode 100644 index 0000000000..a172456d1d --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_gem.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#include +#include + +#include +#include +#include +#include +#include + +#include "mtk_drm_drv.h" +#include "mtk_gem.h" + +static int mtk_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); + +static const struct vm_operations_struct vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_gem_object_funcs mtk_gem_object_funcs = { + .free = mtk_gem_free_object, + .get_sg_table = mtk_gem_prime_get_sg_table, + .vmap = mtk_gem_prime_vmap, + .vunmap = mtk_gem_prime_vunmap, + .mmap = mtk_gem_object_mmap, + .vm_ops = &vm_ops, +}; + +static struct mtk_gem_obj *mtk_gem_init(struct drm_device *dev, + unsigned long size) +{ + struct mtk_gem_obj *mtk_gem_obj; + int ret; + + size = round_up(size, PAGE_SIZE); + + if (size == 0) + return ERR_PTR(-EINVAL); + + mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL); + if (!mtk_gem_obj) + return ERR_PTR(-ENOMEM); + + mtk_gem_obj->base.funcs = &mtk_gem_object_funcs; + + ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size); + if (ret < 0) { + DRM_ERROR("failed to initialize gem object\n"); + kfree(mtk_gem_obj); + return ERR_PTR(ret); + } + + return mtk_gem_obj; +} + +struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, + size_t size, bool alloc_kmap) +{ + struct mtk_drm_private *priv = dev->dev_private; + struct mtk_gem_obj *mtk_gem; + struct drm_gem_object *obj; + int ret; + + mtk_gem = mtk_gem_init(dev, size); + if (IS_ERR(mtk_gem)) + return ERR_CAST(mtk_gem); + + obj = &mtk_gem->base; + + mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE; + + if (!alloc_kmap) + mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; + + mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size, + &mtk_gem->dma_addr, GFP_KERNEL, + mtk_gem->dma_attrs); + if (!mtk_gem->cookie) { + DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size); + ret = -ENOMEM; + goto err_gem_free; + } + + if (alloc_kmap) + mtk_gem->kvaddr = mtk_gem->cookie; + + DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n", + mtk_gem->cookie, &mtk_gem->dma_addr, + size); + + return mtk_gem; + +err_gem_free: + drm_gem_object_release(obj); + kfree(mtk_gem); + return ERR_PTR(ret); +} + +void mtk_gem_free_object(struct drm_gem_object *obj) +{ + struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + struct mtk_drm_private *priv = obj->dev->dev_private; + + if (mtk_gem->sg) + drm_prime_gem_destroy(obj, mtk_gem->sg); + else + dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie, + mtk_gem->dma_addr, mtk_gem->dma_attrs); + + /* release file pointer to gem object. */ + drm_gem_object_release(obj); + + kfree(mtk_gem); +} + +int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct mtk_gem_obj *mtk_gem; + int ret; + + args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + + /* + * Multiply 2 variables of different types, + * for example: args->size = args->spacing * args->height; + * may cause coverity issue with unintentional overflow. + */ + args->size = args->pitch; + args->size *= args->height; + + mtk_gem = mtk_gem_create(dev, args->size, false); + if (IS_ERR(mtk_gem)) + return PTR_ERR(mtk_gem); + + /* + * allocate a id of idr table where the obj is registered + * and handle has the id what user can see. + */ + ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle); + if (ret) + goto err_handle_create; + + /* drop reference from allocate - handle holds it now. */ + drm_gem_object_put(&mtk_gem->base); + + return 0; + +err_handle_create: + mtk_gem_free_object(&mtk_gem->base); + return ret; +} + +static int mtk_gem_object_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) + +{ + int ret; + struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + struct mtk_drm_private *priv = obj->dev->dev_private; + + /* + * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the + * whole buffer from the start. + */ + vma->vm_pgoff = 0; + + /* + * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear + * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). + */ + vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + + ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie, + mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs); + + return ret; +} + +/* + * Allocate a sg_table for this GEM object. + * Note: Both the table's contents, and the sg_table itself must be freed by + * the caller. + * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. + */ +struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + struct mtk_drm_private *priv = obj->dev->dev_private; + struct sg_table *sgt; + int ret; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); + + ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie, + mtk_gem->dma_addr, obj->size, + mtk_gem->dma_attrs); + if (ret) { + DRM_ERROR("failed to allocate sgt, %d\n", ret); + kfree(sgt); + return ERR_PTR(ret); + } + + return sgt; +} + +struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sg) +{ + struct mtk_gem_obj *mtk_gem; + + /* check if the entries in the sg_table are contiguous */ + if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { + DRM_ERROR("sg_table is not contiguous"); + return ERR_PTR(-EINVAL); + } + + mtk_gem = mtk_gem_init(dev, attach->dmabuf->size); + if (IS_ERR(mtk_gem)) + return ERR_CAST(mtk_gem); + + mtk_gem->dma_addr = sg_dma_address(sg->sgl); + mtk_gem->sg = sg; + + return &mtk_gem->base; +} + +int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + struct sg_table *sgt = NULL; + unsigned int npages; + + if (mtk_gem->kvaddr) + goto out; + + sgt = mtk_gem_prime_get_sg_table(obj); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + npages = obj->size >> PAGE_SHIFT; + mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL); + if (!mtk_gem->pages) { + sg_free_table(sgt); + kfree(sgt); + return -ENOMEM; + } + + drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages); + + mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + if (!mtk_gem->kvaddr) { + sg_free_table(sgt); + kfree(sgt); + kfree(mtk_gem->pages); + return -ENOMEM; + } + sg_free_table(sgt); + kfree(sgt); + +out: + iosys_map_set_vaddr(map, mtk_gem->kvaddr); + + return 0; +} + +void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + void *vaddr = map->vaddr; + + if (!mtk_gem->pages) + return; + + vunmap(vaddr); + mtk_gem->kvaddr = NULL; + kfree(mtk_gem->pages); +} diff --git a/drivers/gpu/drm/mediatek/mtk_gem.h b/drivers/gpu/drm/mediatek/mtk_gem.h new file mode 100644 index 0000000000..66e5f154f6 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_gem.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#ifndef _MTK_GEM_H_ +#define _MTK_GEM_H_ + +#include + +/* + * mtk drm buffer structure. + * + * @base: a gem object. + * - a new handle to this gem object would be created + * by drm_gem_handle_create(). + * @cookie: the return value of dma_alloc_attrs(), keep it for dma_free_attrs() + * @kvaddr: kernel virtual address of gem buffer. + * @dma_addr: dma address of gem buffer. + * @dma_attrs: dma attributes of gem buffer. + * + * P.S. this object would be transferred to user as kms_bo.handle so + * user can access the buffer through kms_bo.handle. + */ +struct mtk_gem_obj { + struct drm_gem_object base; + void *cookie; + void *kvaddr; + dma_addr_t dma_addr; + unsigned long dma_attrs; + struct sg_table *sg; + struct page **pages; +}; + +#define to_mtk_gem_obj(x) container_of(x, struct mtk_gem_obj, base) + +void mtk_gem_free_object(struct drm_gem_object *gem); +struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, size_t size, + bool alloc_kmap); +int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, + struct drm_mode_create_dumb *args); +struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sg); +int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); +void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map); + +#endif diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index c6bdc565e4..6e1cca97a6 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1695,7 +1695,7 @@ static int mtk_hdmi_register_audio_driver(struct device *dev) return 0; } -static int mtk_drm_hdmi_probe(struct platform_device *pdev) +static int mtk_hdmi_probe(struct platform_device *pdev) { struct mtk_hdmi *hdmi; struct device *dev = &pdev->dev; @@ -1754,7 +1754,7 @@ err_bridge_remove: return ret; } -static void mtk_drm_hdmi_remove(struct platform_device *pdev) +static void mtk_hdmi_remove(struct platform_device *pdev) { struct mtk_hdmi *hdmi = platform_get_drvdata(pdev); @@ -1798,7 +1798,7 @@ static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8167 = { .cea_modes_only = true, }; -static const struct of_device_id mtk_drm_hdmi_of_ids[] = { +static const struct of_device_id mtk_hdmi_of_ids[] = { { .compatible = "mediatek,mt2701-hdmi", .data = &mtk_hdmi_conf_mt2701, }, @@ -1809,14 +1809,14 @@ static const struct of_device_id mtk_drm_hdmi_of_ids[] = { }, {} }; -MODULE_DEVICE_TABLE(of, mtk_drm_hdmi_of_ids); +MODULE_DEVICE_TABLE(of, mtk_hdmi_of_ids); static struct platform_driver mtk_hdmi_driver = { - .probe = mtk_drm_hdmi_probe, - .remove_new = mtk_drm_hdmi_remove, + .probe = mtk_hdmi_probe, + .remove_new = mtk_hdmi_remove, .driver = { .name = "mediatek-drm-hdmi", - .of_match_table = mtk_drm_hdmi_of_ids, + .of_match_table = mtk_hdmi_of_ids, .pm = &mtk_hdmi_pm_ops, }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c index 54e46e440e..52d55861f9 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c @@ -284,8 +284,7 @@ static int mtk_hdmi_ddc_probe(struct platform_device *pdev) return PTR_ERR(ddc->clk); } - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ddc->regs = devm_ioremap_resource(&pdev->dev, mem); + ddc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); if (IS_ERR(ddc->regs)) return PTR_ERR(ddc->regs); diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c index ee9ce9b6d0..925cbb7471 100644 --- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c @@ -346,7 +346,6 @@ struct platform_driver mtk_mdp_rdma_driver = { .remove_new = mtk_mdp_rdma_remove, .driver = { .name = "mediatek-mdp-rdma", - .owner = THIS_MODULE, .of_match_table = mtk_mdp_rdma_driver_dt_match, }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_padding.c b/drivers/gpu/drm/mediatek/mtk_padding.c index 0d6451c149..85bc6768b6 100644 --- a/drivers/gpu/drm/mediatek/mtk_padding.c +++ b/drivers/gpu/drm/mediatek/mtk_padding.c @@ -11,9 +11,9 @@ #include #include +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" #include "mtk_disp_drv.h" -#include "mtk_drm_crtc.h" -#include "mtk_drm_ddp_comp.h" #define PADDING_CONTROL_REG 0x00 #define PADDING_BYPASS BIT(0) @@ -154,7 +154,6 @@ struct platform_driver mtk_padding_driver = { .remove = mtk_padding_remove, .driver = { .name = "mediatek-disp-padding", - .owner = THIS_MODULE, .of_match_table = mtk_padding_driver_dt_match, }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c new file mode 100644 index 0000000000..1723d4333f --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_plane.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 MediaTek Inc. + * Author: CK Hu + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" +#include "mtk_drm_drv.h" +#include "mtk_gem.h" +#include "mtk_plane.h" + +static const u64 modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | + AFBC_FORMAT_MOD_SPLIT | + AFBC_FORMAT_MOD_SPARSE), + DRM_FORMAT_MOD_INVALID, +}; + +static void mtk_plane_reset(struct drm_plane *plane) +{ + struct mtk_plane_state *state; + + if (plane->state) { + __drm_atomic_helper_plane_destroy_state(plane->state); + + state = to_mtk_plane_state(plane->state); + memset(state, 0, sizeof(*state)); + } else { + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return; + } + + __drm_atomic_helper_plane_reset(plane, &state->base); + + state->base.plane = plane; + state->pending.format = DRM_FORMAT_RGB565; + state->pending.modifier = DRM_FORMAT_MOD_LINEAR; +} + +static struct drm_plane_state *mtk_plane_duplicate_state(struct drm_plane *plane) +{ + struct mtk_plane_state *old_state = to_mtk_plane_state(plane->state); + struct mtk_plane_state *state; + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, &state->base); + + WARN_ON(state->base.plane != plane); + + state->pending = old_state->pending; + + return &state->base; +} + +static bool mtk_plane_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) +{ + if (modifier == DRM_FORMAT_MOD_LINEAR) + return true; + + if (modifier != DRM_FORMAT_MOD_ARM_AFBC( + AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | + AFBC_FORMAT_MOD_SPLIT | + AFBC_FORMAT_MOD_SPARSE)) + return false; + + if (format != DRM_FORMAT_XRGB8888 && + format != DRM_FORMAT_ARGB8888 && + format != DRM_FORMAT_BGRX8888 && + format != DRM_FORMAT_BGRA8888 && + format != DRM_FORMAT_ABGR8888 && + format != DRM_FORMAT_XBGR8888 && + format != DRM_FORMAT_RGB888 && + format != DRM_FORMAT_BGR888) + return false; + + return true; +} + +static void mtk_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + __drm_atomic_helper_plane_destroy_state(state); + kfree(to_mtk_plane_state(state)); +} + +static int mtk_plane_atomic_async_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_crtc_state *crtc_state; + int ret; + + if (plane != new_plane_state->crtc->cursor) + return -EINVAL; + + if (!plane->state) + return -EINVAL; + + if (!plane->state->fb) + return -EINVAL; + + ret = mtk_crtc_plane_check(new_plane_state->crtc, plane, + to_mtk_plane_state(new_plane_state)); + if (ret) + return ret; + + crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc); + + return drm_atomic_helper_check_plane_state(plane->state, crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + true, true); +} + +static void mtk_plane_update_new_state(struct drm_plane_state *new_state, + struct mtk_plane_state *mtk_plane_state) +{ + struct drm_framebuffer *fb = new_state->fb; + struct drm_gem_object *gem; + struct mtk_gem_obj *mtk_gem; + unsigned int pitch, format; + u64 modifier; + dma_addr_t addr; + dma_addr_t hdr_addr = 0; + unsigned int hdr_pitch = 0; + int offset; + + gem = fb->obj[0]; + mtk_gem = to_mtk_gem_obj(gem); + addr = mtk_gem->dma_addr; + pitch = fb->pitches[0]; + format = fb->format->format; + modifier = fb->modifier; + + if (modifier == DRM_FORMAT_MOD_LINEAR) { + /* + * Using dma_addr_t variable to calculate with multiplier of different types, + * for example: addr += (new_state->src.x1 >> 16) * fb->format->cpp[0]; + * may cause coverity issue with unintentional overflow. + */ + offset = (new_state->src.x1 >> 16) * fb->format->cpp[0]; + addr += offset; + offset = (new_state->src.y1 >> 16) * pitch; + addr += offset; + } else { + int width_in_blocks = ALIGN(fb->width, AFBC_DATA_BLOCK_WIDTH) + / AFBC_DATA_BLOCK_WIDTH; + int height_in_blocks = ALIGN(fb->height, AFBC_DATA_BLOCK_HEIGHT) + / AFBC_DATA_BLOCK_HEIGHT; + int x_offset_in_blocks = (new_state->src.x1 >> 16) / AFBC_DATA_BLOCK_WIDTH; + int y_offset_in_blocks = (new_state->src.y1 >> 16) / AFBC_DATA_BLOCK_HEIGHT; + int hdr_size, hdr_offset; + + hdr_pitch = width_in_blocks * AFBC_HEADER_BLOCK_SIZE; + pitch = width_in_blocks * AFBC_DATA_BLOCK_WIDTH * + AFBC_DATA_BLOCK_HEIGHT * fb->format->cpp[0]; + + hdr_size = ALIGN(hdr_pitch * height_in_blocks, AFBC_HEADER_ALIGNMENT); + hdr_offset = hdr_pitch * y_offset_in_blocks + + AFBC_HEADER_BLOCK_SIZE * x_offset_in_blocks; + + /* + * Using dma_addr_t variable to calculate with multiplier of different types, + * for example: addr += hdr_pitch * y_offset_in_blocks; + * may cause coverity issue with unintentional overflow. + */ + hdr_addr = addr + hdr_offset; + + /* The data plane is offset by 1 additional block. */ + offset = pitch * y_offset_in_blocks + + AFBC_DATA_BLOCK_WIDTH * AFBC_DATA_BLOCK_HEIGHT * + fb->format->cpp[0] * (x_offset_in_blocks + 1); + + /* + * Using dma_addr_t variable to calculate with multiplier of different types, + * for example: addr += pitch * y_offset_in_blocks; + * may cause coverity issue with unintentional overflow. + */ + addr = addr + hdr_size + offset; + } + + mtk_plane_state->pending.enable = true; + mtk_plane_state->pending.pitch = pitch; + mtk_plane_state->pending.hdr_pitch = hdr_pitch; + mtk_plane_state->pending.format = format; + mtk_plane_state->pending.modifier = modifier; + mtk_plane_state->pending.addr = addr; + mtk_plane_state->pending.hdr_addr = hdr_addr; + mtk_plane_state->pending.x = new_state->dst.x1; + mtk_plane_state->pending.y = new_state->dst.y1; + mtk_plane_state->pending.width = drm_rect_width(&new_state->dst); + mtk_plane_state->pending.height = drm_rect_height(&new_state->dst); + mtk_plane_state->pending.rotation = new_state->rotation; + mtk_plane_state->pending.color_encoding = new_state->color_encoding; +} + +static void mtk_plane_atomic_async_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct mtk_plane_state *new_plane_state = to_mtk_plane_state(plane->state); + + plane->state->crtc_x = new_state->crtc_x; + plane->state->crtc_y = new_state->crtc_y; + plane->state->crtc_h = new_state->crtc_h; + plane->state->crtc_w = new_state->crtc_w; + plane->state->src_x = new_state->src_x; + plane->state->src_y = new_state->src_y; + plane->state->src_h = new_state->src_h; + plane->state->src_w = new_state->src_w; + plane->state->dst.x1 = new_state->dst.x1; + plane->state->dst.y1 = new_state->dst.y1; + + mtk_plane_update_new_state(new_state, new_plane_state); + swap(plane->state->fb, new_state->fb); + wmb(); /* Make sure the above parameters are set before update */ + new_plane_state->pending.async_dirty = true; + mtk_crtc_async_update(new_state->crtc, plane, state); +} + +static const struct drm_plane_funcs mtk_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = mtk_plane_reset, + .atomic_duplicate_state = mtk_plane_duplicate_state, + .atomic_destroy_state = mtk_plane_destroy_state, + .format_mod_supported = mtk_plane_format_mod_supported, +}; + +static int mtk_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_framebuffer *fb = new_plane_state->fb; + struct drm_crtc_state *crtc_state; + int ret; + + if (!fb) + return 0; + + if (WARN_ON(!new_plane_state->crtc)) + return 0; + + ret = mtk_crtc_plane_check(new_plane_state->crtc, plane, + to_mtk_plane_state(new_plane_state)); + if (ret) + return ret; + + crtc_state = drm_atomic_get_crtc_state(state, + new_plane_state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + return drm_atomic_helper_check_plane_state(new_plane_state, + crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + true, true); +} + +static void mtk_plane_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state); + mtk_plane_state->pending.enable = false; + wmb(); /* Make sure the above parameter is set before update */ + mtk_plane_state->pending.dirty = true; +} + +static void mtk_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state); + + if (!new_state->crtc || WARN_ON(!new_state->fb)) + return; + + if (!new_state->visible) { + mtk_plane_atomic_disable(plane, state); + return; + } + + mtk_plane_update_new_state(new_state, mtk_plane_state); + wmb(); /* Make sure the above parameters are set before update */ + mtk_plane_state->pending.dirty = true; +} + +static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { + .atomic_check = mtk_plane_atomic_check, + .atomic_update = mtk_plane_atomic_update, + .atomic_disable = mtk_plane_atomic_disable, + .atomic_async_update = mtk_plane_atomic_async_update, + .atomic_async_check = mtk_plane_atomic_async_check, +}; + +int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, + unsigned long possible_crtcs, enum drm_plane_type type, + unsigned int supported_rotations, const u32 *formats, + size_t num_formats) +{ + int err; + + if (!formats || !num_formats) { + DRM_ERROR("no formats for plane\n"); + return -EINVAL; + } + + err = drm_universal_plane_init(dev, plane, possible_crtcs, + &mtk_plane_funcs, formats, + num_formats, modifiers, type, NULL); + if (err) { + DRM_ERROR("failed to initialize plane\n"); + return err; + } + + if (supported_rotations) { + err = drm_plane_create_rotation_property(plane, + DRM_MODE_ROTATE_0, + supported_rotations); + if (err) + DRM_INFO("Create rotation property failed\n"); + } + + drm_plane_helper_add(plane, &mtk_plane_helper_funcs); + + return 0; +} diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h new file mode 100644 index 0000000000..231bb7aac9 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_plane.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 MediaTek Inc. + * Author: CK Hu + */ + +#ifndef _MTK_PLANE_H_ +#define _MTK_PLANE_H_ + +#include +#include + +#define AFBC_DATA_BLOCK_WIDTH 32 +#define AFBC_DATA_BLOCK_HEIGHT 8 +#define AFBC_HEADER_BLOCK_SIZE 16 +#define AFBC_HEADER_ALIGNMENT 1024 + +struct mtk_plane_pending_state { + bool config; + bool enable; + dma_addr_t addr; + dma_addr_t hdr_addr; + unsigned int pitch; + unsigned int hdr_pitch; + unsigned int format; + unsigned long long modifier; + unsigned int x; + unsigned int y; + unsigned int width; + unsigned int height; + unsigned int rotation; + bool dirty; + bool async_dirty; + bool async_config; + enum drm_color_encoding color_encoding; +}; + +struct mtk_plane_state { + struct drm_plane_state base; + struct mtk_plane_pending_state pending; +}; + +static inline struct mtk_plane_state * +to_mtk_plane_state(struct drm_plane_state *state) +{ + return container_of(state, struct mtk_plane_state, base); +} + +int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, + unsigned long possible_crtcs, enum drm_plane_type type, + unsigned int supported_rotations, const u32 *formats, + size_t num_formats); + +#endif diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 17a5cca007..4bd0baa2a4 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -250,29 +250,20 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) if (ret) goto free_drm; ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0); - if (ret) { - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); - goto free_drm; - } + if (ret) + goto free_canvas_osd1; ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1); - if (ret) { - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); - goto free_drm; - } + if (ret) + goto free_canvas_vd1_0; ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2); - if (ret) { - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1); - goto free_drm; - } + if (ret) + goto free_canvas_vd1_1; priv->vsync_irq = platform_get_irq(pdev, 0); ret = drm_vblank_init(drm, 1); if (ret) - goto free_drm; + goto free_canvas_vd1_2; /* Assign limits per soc revision/package */ for (i = 0 ; i < ARRAY_SIZE(meson_drm_soc_attrs) ; ++i) { @@ -288,11 +279,11 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) */ ret = drm_aperture_remove_framebuffers(&meson_driver); if (ret) - goto free_drm; + goto free_canvas_vd1_2; ret = drmm_mode_config_init(drm); if (ret) - goto free_drm; + goto free_canvas_vd1_2; drm->mode_config.max_width = 3840; drm->mode_config.max_height = 2160; drm->mode_config.funcs = &meson_mode_config_funcs; @@ -307,7 +298,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) if (priv->afbcd.ops) { ret = priv->afbcd.ops->init(priv); if (ret) - goto free_drm; + goto free_canvas_vd1_2; } /* Encoder Initialization */ @@ -371,6 +362,14 @@ uninstall_irq: exit_afbcd: if (priv->afbcd.ops) priv->afbcd.ops->exit(priv); +free_canvas_vd1_2: + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2); +free_canvas_vd1_1: + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1); +free_canvas_vd1_0: + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); +free_canvas_osd1: + meson_canvas_free(priv->canvas, priv->canvas_id_osd1); free_drm: drm_dev_put(drm); diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 765e49fd89..58a0e62eaf 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -366,6 +366,7 @@ struct drm_crtc_state; struct drm_display_mode; struct drm_plane; struct drm_atomic_state; +struct drm_scanout_buffer; extern const uint32_t mgag200_primary_plane_formats[]; extern const size_t mgag200_primary_plane_formats_size; @@ -379,12 +380,16 @@ void mgag200_primary_plane_helper_atomic_enable(struct drm_plane *plane, struct drm_atomic_state *state); void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *old_state); +int mgag200_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane, + struct drm_scanout_buffer *sb); + #define MGAG200_PRIMARY_PLANE_HELPER_FUNCS \ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \ .atomic_check = mgag200_primary_plane_helper_atomic_check, \ .atomic_update = mgag200_primary_plane_helper_atomic_update, \ .atomic_enable = mgag200_primary_plane_helper_atomic_enable, \ - .atomic_disable = mgag200_primary_plane_helper_atomic_disable + .atomic_disable = mgag200_primary_plane_helper_atomic_disable, \ + .get_scanout_buffer = mgag200_primary_plane_helper_get_scanout_buffer #define MGAG200_PRIMARY_PLANE_FUNCS \ .update_plane = drm_atomic_helper_update_plane, \ diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index e17cb4c5f7..fc54851d33 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include "mgag200_drv.h" @@ -546,6 +547,23 @@ void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane, msleep(20); } +int mgag200_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane, + struct drm_scanout_buffer *sb) +{ + struct mga_device *mdev = to_mga_device(plane->dev); + struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(mdev->vram); + + if (plane->state && plane->state->fb) { + sb->format = plane->state->fb->format; + sb->width = plane->state->fb->width; + sb->height = plane->state->fb->height; + sb->pitch[0] = plane->state->fb->pitches[0]; + sb->map[0] = map; + return 0; + } + return -ENODEV; +} + /* * CRTC */ diff --git a/drivers/gpu/drm/msm/.gitignore b/drivers/gpu/drm/msm/.gitignore new file mode 100644 index 0000000000..9ab870da89 --- /dev/null +++ b/drivers/gpu/drm/msm/.gitignore @@ -0,0 +1 @@ +generated/ diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index f202f26ada..1931ecf73e 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -14,7 +14,7 @@ config DRM_MSM select IOMMU_IO_PGTABLE select QCOM_MDT_LOADER if ARCH_QCOM select REGULATOR - select DRM_DP_AUX_BUS + select DRM_DISPLAY_DP_AUX_BUS select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER select DRM_EXEC @@ -54,6 +54,14 @@ config DRM_MSM_GPU_SUDO Only use this if you are a driver developer. This should *not* be enabled for production kernels. If unsure, say N. +config DRM_MSM_VALIDATE_XML + bool "Validate XML register files against schema" + depends on DRM_MSM && EXPERT + depends on $(success,$(PYTHON3) -c "import lxml") + help + Validate XML files with register definitions against rules-fd schema. + This option is mostly targeting DRM MSM developers. If unsure, say N. + config DRM_MSM_MDSS bool depends on DRM_MSM diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index b21ae2880c..eb788921ff 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -1,13 +1,15 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y := -I $(srctree)/$(src) -ccflags-y += -I $(srctree)/$(src)/disp/dpu1 -ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi -ccflags-$(CONFIG_DRM_MSM_DP) += -I $(srctree)/$(src)/dp +ccflags-y := -I $(src) +ccflags-y += -I $(obj)/generated +ccflags-y += -I $(src)/disp/dpu1 +ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(src)/dsi +ccflags-$(CONFIG_DRM_MSM_DP) += -I $(src)/dp -msm-y := \ +adreno-y := \ adreno/adreno_device.o \ adreno/adreno_gpu.o \ adreno/a2xx_gpu.o \ + adreno/a2xx_gpummu.o \ adreno/a3xx_gpu.o \ adreno/a4xx_gpu.o \ adreno/a5xx_gpu.o \ @@ -17,7 +19,11 @@ msm-y := \ adreno/a6xx_gmu.o \ adreno/a6xx_hfi.o \ -msm-$(CONFIG_DRM_MSM_HDMI) += \ +adreno-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \ + +adreno-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o + +msm-display-$(CONFIG_DRM_MSM_HDMI) += \ hdmi/hdmi.o \ hdmi/hdmi_audio.o \ hdmi/hdmi_bridge.o \ @@ -30,7 +36,7 @@ msm-$(CONFIG_DRM_MSM_HDMI) += \ hdmi/hdmi_phy_8x74.o \ hdmi/hdmi_pll_8960.o \ -msm-$(CONFIG_DRM_MSM_MDP4) += \ +msm-display-$(CONFIG_DRM_MSM_MDP4) += \ disp/mdp4/mdp4_crtc.o \ disp/mdp4/mdp4_dsi_encoder.o \ disp/mdp4/mdp4_dtv_encoder.o \ @@ -41,7 +47,7 @@ msm-$(CONFIG_DRM_MSM_MDP4) += \ disp/mdp4/mdp4_kms.o \ disp/mdp4/mdp4_plane.o \ -msm-$(CONFIG_DRM_MSM_MDP5) += \ +msm-display-$(CONFIG_DRM_MSM_MDP5) += \ disp/mdp5/mdp5_cfg.o \ disp/mdp5/mdp5_cmd_encoder.o \ disp/mdp5/mdp5_ctl.o \ @@ -54,7 +60,7 @@ msm-$(CONFIG_DRM_MSM_MDP5) += \ disp/mdp5/mdp5_plane.o \ disp/mdp5/mdp5_smp.o \ -msm-$(CONFIG_DRM_MSM_DPU) += \ +msm-display-$(CONFIG_DRM_MSM_DPU) += \ disp/dpu1/dpu_core_perf.o \ disp/dpu1/dpu_crtc.o \ disp/dpu1/dpu_encoder.o \ @@ -84,14 +90,16 @@ msm-$(CONFIG_DRM_MSM_DPU) += \ disp/dpu1/dpu_vbif.o \ disp/dpu1/dpu_writeback.o -msm-$(CONFIG_DRM_MSM_MDSS) += \ +msm-display-$(CONFIG_DRM_MSM_MDSS) += \ msm_mdss.o \ -msm-y += \ +msm-display-y += \ disp/mdp_format.o \ disp/mdp_kms.o \ disp/msm_disp_snapshot.o \ disp/msm_disp_snapshot_util.o \ + +msm-y += \ msm_atomic.o \ msm_atomic_tracepoints.o \ msm_debugfs.o \ @@ -113,14 +121,13 @@ msm-y += \ msm_ringbuffer.o \ msm_submitqueue.o \ msm_gpu_tracepoints.o \ - msm_gpummu.o -msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \ - dp/dp_debug.o +msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o -msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o +msm-display-$(CONFIG_DEBUG_FS) += \ + dp/dp_debug.o -msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \ +msm-display-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \ dp/dp_catalog.o \ dp/dp_ctrl.o \ dp/dp_display.o \ @@ -130,21 +137,76 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \ dp/dp_audio.o \ dp/dp_utils.o -msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o - -msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o +msm-display-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o -msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ +msm-display-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ dsi/dsi_cfg.o \ dsi/dsi_host.o \ dsi/dsi_manager.o \ dsi/phy/dsi_phy.o -msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o -msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o -msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o -msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o -msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o -msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o +msm-display-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o +msm-display-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o +msm-display-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o +msm-display-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o +msm-display-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o +msm-display-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o + +msm-y += $(adreno-y) $(msm-display-y) obj-$(CONFIG_DRM_MSM) += msm.o + +ifeq (y,$(CONFIG_DRM_MSM_VALIDATE_XML)) + headergen-opts += --validate +else + headergen-opts += --no-validate +endif + +quiet_cmd_headergen = GENHDR $@ + cmd_headergen = mkdir -p $(obj)/generated && $(PYTHON3) $(src)/registers/gen_header.py \ + $(headergen-opts) --rnn $(src)/registers --xml $< c-defines > $@ + +$(obj)/generated/%.xml.h: $(src)/registers/adreno/%.xml \ + $(src)/registers/adreno/adreno_common.xml \ + $(src)/registers/adreno/adreno_pm4.xml \ + $(src)/registers/freedreno_copyright.xml \ + $(src)/registers/gen_header.py \ + $(src)/registers/rules-fd.xsd \ + FORCE + $(call if_changed,headergen) + +$(obj)/generated/%.xml.h: $(src)/registers/display/%.xml \ + $(src)/registers/freedreno_copyright.xml \ + $(src)/registers/gen_header.py \ + $(src)/registers/rules-fd.xsd \ + FORCE + $(call if_changed,headergen) + +ADRENO_HEADERS = \ + generated/a2xx.xml.h \ + generated/a3xx.xml.h \ + generated/a4xx.xml.h \ + generated/a5xx.xml.h \ + generated/a6xx.xml.h \ + generated/a6xx_gmu.xml.h \ + generated/adreno_common.xml.h \ + generated/adreno_pm4.xml.h \ + +DISPLAY_HEADERS = \ + generated/dsi_phy_7nm.xml.h \ + generated/dsi_phy_10nm.xml.h \ + generated/dsi_phy_14nm.xml.h \ + generated/dsi_phy_20nm.xml.h \ + generated/dsi_phy_28nm_8960.xml.h \ + generated/dsi_phy_28nm.xml.h \ + generated/dsi.xml.h \ + generated/hdmi.xml.h \ + generated/mdp4.xml.h \ + generated/mdp5.xml.h \ + generated/mdp_common.xml.h \ + generated/sfpb.xml.h + +$(addprefix $(obj)/,$(adreno-y)): $(addprefix $(obj)/,$(ADRENO_HEADERS)) +$(addprefix $(obj)/,$(msm-display-y)): $(addprefix $(obj)/,$(DISPLAY_HEADERS)) + +targets += $(ADRENO_HEADERS) $(DISPLAY_HEADERS) diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h deleted file mode 100644 index 23141cbcea..0000000000 --- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h +++ /dev/null @@ -1,3251 +0,0 @@ -#ifndef A2XX_XML -#define A2XX_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng gen_header.py tool in this git repository: -http://gitlab.freedesktop.org/mesa/mesa/ -git clone https://gitlab.freedesktop.org/mesa/mesa.git - -The rules-ng-ng source files this header was generated from are: - -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from Fri Jun 2 14:59:26 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85691 bytes, from Fri Feb 16 09:49:01 2024) - -Copyright (C) 2013-2024 by the following authors: -- Rob Clark Rob Clark -- Ilia Mirkin Ilia Mirkin - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -*/ - -#ifdef __KERNEL__ -#include -#define assert(x) BUG_ON(!(x)) -#else -#include -#endif - -#ifdef __cplusplus -#define __struct_cast(X) -#else -#define __struct_cast(X) (struct X) -#endif - -enum a2xx_rb_dither_type { - DITHER_PIXEL = 0, - DITHER_SUBPIXEL = 1, -}; - -enum a2xx_colorformatx { - COLORX_4_4_4_4 = 0, - COLORX_1_5_5_5 = 1, - COLORX_5_6_5 = 2, - COLORX_8 = 3, - COLORX_8_8 = 4, - COLORX_8_8_8_8 = 5, - COLORX_S8_8_8_8 = 6, - COLORX_16_FLOAT = 7, - COLORX_16_16_FLOAT = 8, - COLORX_16_16_16_16_FLOAT = 9, - COLORX_32_FLOAT = 10, - COLORX_32_32_FLOAT = 11, - COLORX_32_32_32_32_FLOAT = 12, - COLORX_2_3_3 = 13, - COLORX_8_8_8 = 14, -}; - -enum a2xx_sq_surfaceformat { - FMT_1_REVERSE = 0, - FMT_1 = 1, - FMT_8 = 2, - FMT_1_5_5_5 = 3, - FMT_5_6_5 = 4, - FMT_6_5_5 = 5, - FMT_8_8_8_8 = 6, - FMT_2_10_10_10 = 7, - FMT_8_A = 8, - FMT_8_B = 9, - FMT_8_8 = 10, - FMT_Cr_Y1_Cb_Y0 = 11, - FMT_Y1_Cr_Y0_Cb = 12, - FMT_5_5_5_1 = 13, - FMT_8_8_8_8_A = 14, - FMT_4_4_4_4 = 15, - FMT_8_8_8 = 16, - FMT_DXT1 = 18, - FMT_DXT2_3 = 19, - FMT_DXT4_5 = 20, - FMT_10_10_10_2 = 21, - FMT_24_8 = 22, - FMT_16 = 24, - FMT_16_16 = 25, - FMT_16_16_16_16 = 26, - FMT_16_EXPAND = 27, - FMT_16_16_EXPAND = 28, - FMT_16_16_16_16_EXPAND = 29, - FMT_16_FLOAT = 30, - FMT_16_16_FLOAT = 31, - FMT_16_16_16_16_FLOAT = 32, - FMT_32 = 33, - FMT_32_32 = 34, - FMT_32_32_32_32 = 35, - FMT_32_FLOAT = 36, - FMT_32_32_FLOAT = 37, - FMT_32_32_32_32_FLOAT = 38, - FMT_ATI_TC_RGB = 39, - FMT_ATI_TC_RGBA = 40, - FMT_ATI_TC_555_565_RGB = 41, - FMT_ATI_TC_555_565_RGBA = 42, - FMT_ATI_TC_RGBA_INTERP = 43, - FMT_ATI_TC_555_565_RGBA_INTERP = 44, - FMT_ETC1_RGBA_INTERP = 46, - FMT_ETC1_RGB = 47, - FMT_ETC1_RGBA = 48, - FMT_DXN = 49, - FMT_2_3_3 = 51, - FMT_2_10_10_10_AS_16_16_16_16 = 54, - FMT_10_10_10_2_AS_16_16_16_16 = 55, - FMT_32_32_32_FLOAT = 57, - FMT_DXT3A = 58, - FMT_DXT5A = 59, - FMT_CTX1 = 60, -}; - -enum a2xx_sq_ps_vtx_mode { - POSITION_1_VECTOR = 0, - POSITION_2_VECTORS_UNUSED = 1, - POSITION_2_VECTORS_SPRITE = 2, - POSITION_2_VECTORS_EDGE = 3, - POSITION_2_VECTORS_KILL = 4, - POSITION_2_VECTORS_SPRITE_KILL = 5, - POSITION_2_VECTORS_EDGE_KILL = 6, - MULTIPASS = 7, -}; - -enum a2xx_sq_sample_cntl { - CENTROIDS_ONLY = 0, - CENTERS_ONLY = 1, - CENTROIDS_AND_CENTERS = 2, -}; - -enum a2xx_dx_clip_space { - DXCLIP_OPENGL = 0, - DXCLIP_DIRECTX = 1, -}; - -enum a2xx_pa_su_sc_polymode { - POLY_DISABLED = 0, - POLY_DUALMODE = 1, -}; - -enum a2xx_rb_edram_mode { - EDRAM_NOP = 0, - COLOR_DEPTH = 4, - DEPTH_ONLY = 5, - EDRAM_COPY = 6, -}; - -enum a2xx_pa_sc_pattern_bit_order { - LITTLE = 0, - BIG = 1, -}; - -enum a2xx_pa_sc_auto_reset_cntl { - NEVER = 0, - EACH_PRIMITIVE = 1, - EACH_PACKET = 2, -}; - -enum a2xx_pa_pixcenter { - PIXCENTER_D3D = 0, - PIXCENTER_OGL = 1, -}; - -enum a2xx_pa_roundmode { - TRUNCATE = 0, - ROUND = 1, - ROUNDTOEVEN = 2, - ROUNDTOODD = 3, -}; - -enum a2xx_pa_quantmode { - ONE_SIXTEENTH = 0, - ONE_EIGTH = 1, - ONE_QUARTER = 2, - ONE_HALF = 3, - ONE = 4, -}; - -enum a2xx_rb_copy_sample_select { - SAMPLE_0 = 0, - SAMPLE_1 = 1, - SAMPLE_2 = 2, - SAMPLE_3 = 3, - SAMPLE_01 = 4, - SAMPLE_23 = 5, - SAMPLE_0123 = 6, -}; - -enum a2xx_rb_blend_opcode { - BLEND2_DST_PLUS_SRC = 0, - BLEND2_SRC_MINUS_DST = 1, - BLEND2_MIN_DST_SRC = 2, - BLEND2_MAX_DST_SRC = 3, - BLEND2_DST_MINUS_SRC = 4, - BLEND2_DST_PLUS_SRC_BIAS = 5, -}; - -enum a2xx_su_perfcnt_select { - PERF_PAPC_PASX_REQ = 0, - PERF_PAPC_PASX_FIRST_VECTOR = 2, - PERF_PAPC_PASX_SECOND_VECTOR = 3, - PERF_PAPC_PASX_FIRST_DEAD = 4, - PERF_PAPC_PASX_SECOND_DEAD = 5, - PERF_PAPC_PASX_VTX_KILL_DISCARD = 6, - PERF_PAPC_PASX_VTX_NAN_DISCARD = 7, - PERF_PAPC_PA_INPUT_PRIM = 8, - PERF_PAPC_PA_INPUT_NULL_PRIM = 9, - PERF_PAPC_PA_INPUT_EVENT_FLAG = 10, - PERF_PAPC_PA_INPUT_FIRST_PRIM_SLOT = 11, - PERF_PAPC_PA_INPUT_END_OF_PACKET = 12, - PERF_PAPC_CLPR_CULL_PRIM = 13, - PERF_PAPC_CLPR_VV_CULL_PRIM = 15, - PERF_PAPC_CLPR_VTX_KILL_CULL_PRIM = 17, - PERF_PAPC_CLPR_VTX_NAN_CULL_PRIM = 18, - PERF_PAPC_CLPR_CULL_TO_NULL_PRIM = 19, - PERF_PAPC_CLPR_VV_CLIP_PRIM = 21, - PERF_PAPC_CLPR_POINT_CLIP_CANDIDATE = 23, - PERF_PAPC_CLPR_CLIP_PLANE_CNT_1 = 24, - PERF_PAPC_CLPR_CLIP_PLANE_CNT_2 = 25, - PERF_PAPC_CLPR_CLIP_PLANE_CNT_3 = 26, - PERF_PAPC_CLPR_CLIP_PLANE_CNT_4 = 27, - PERF_PAPC_CLPR_CLIP_PLANE_CNT_5 = 28, - PERF_PAPC_CLPR_CLIP_PLANE_CNT_6 = 29, - PERF_PAPC_CLPR_CLIP_PLANE_NEAR = 30, - PERF_PAPC_CLPR_CLIP_PLANE_FAR = 31, - PERF_PAPC_CLPR_CLIP_PLANE_LEFT = 32, - PERF_PAPC_CLPR_CLIP_PLANE_RIGHT = 33, - PERF_PAPC_CLPR_CLIP_PLANE_TOP = 34, - PERF_PAPC_CLPR_CLIP_PLANE_BOTTOM = 35, - PERF_PAPC_CLSM_NULL_PRIM = 36, - PERF_PAPC_CLSM_TOTALLY_VISIBLE_PRIM = 37, - PERF_PAPC_CLSM_CLIP_PRIM = 38, - PERF_PAPC_CLSM_CULL_TO_NULL_PRIM = 39, - PERF_PAPC_CLSM_OUT_PRIM_CNT_1 = 40, - PERF_PAPC_CLSM_OUT_PRIM_CNT_2 = 41, - PERF_PAPC_CLSM_OUT_PRIM_CNT_3 = 42, - PERF_PAPC_CLSM_OUT_PRIM_CNT_4 = 43, - PERF_PAPC_CLSM_OUT_PRIM_CNT_5 = 44, - PERF_PAPC_CLSM_OUT_PRIM_CNT_6_7 = 45, - PERF_PAPC_CLSM_NON_TRIVIAL_CULL = 46, - PERF_PAPC_SU_INPUT_PRIM = 47, - PERF_PAPC_SU_INPUT_CLIP_PRIM = 48, - PERF_PAPC_SU_INPUT_NULL_PRIM = 49, - PERF_PAPC_SU_ZERO_AREA_CULL_PRIM = 50, - PERF_PAPC_SU_BACK_FACE_CULL_PRIM = 51, - PERF_PAPC_SU_FRONT_FACE_CULL_PRIM = 52, - PERF_PAPC_SU_POLYMODE_FACE_CULL = 53, - PERF_PAPC_SU_POLYMODE_BACK_CULL = 54, - PERF_PAPC_SU_POLYMODE_FRONT_CULL = 55, - PERF_PAPC_SU_POLYMODE_INVALID_FILL = 56, - PERF_PAPC_SU_OUTPUT_PRIM = 57, - PERF_PAPC_SU_OUTPUT_CLIP_PRIM = 58, - PERF_PAPC_SU_OUTPUT_NULL_PRIM = 59, - PERF_PAPC_SU_OUTPUT_EVENT_FLAG = 60, - PERF_PAPC_SU_OUTPUT_FIRST_PRIM_SLOT = 61, - PERF_PAPC_SU_OUTPUT_END_OF_PACKET = 62, - PERF_PAPC_SU_OUTPUT_POLYMODE_FACE = 63, - PERF_PAPC_SU_OUTPUT_POLYMODE_BACK = 64, - PERF_PAPC_SU_OUTPUT_POLYMODE_FRONT = 65, - PERF_PAPC_SU_OUT_CLIP_POLYMODE_FACE = 66, - PERF_PAPC_SU_OUT_CLIP_POLYMODE_BACK = 67, - PERF_PAPC_SU_OUT_CLIP_POLYMODE_FRONT = 68, - PERF_PAPC_PASX_REQ_IDLE = 69, - PERF_PAPC_PASX_REQ_BUSY = 70, - PERF_PAPC_PASX_REQ_STALLED = 71, - PERF_PAPC_PASX_REC_IDLE = 72, - PERF_PAPC_PASX_REC_BUSY = 73, - PERF_PAPC_PASX_REC_STARVED_SX = 74, - PERF_PAPC_PASX_REC_STALLED = 75, - PERF_PAPC_PASX_REC_STALLED_POS_MEM = 76, - PERF_PAPC_PASX_REC_STALLED_CCGSM_IN = 77, - PERF_PAPC_CCGSM_IDLE = 78, - PERF_PAPC_CCGSM_BUSY = 79, - PERF_PAPC_CCGSM_STALLED = 80, - PERF_PAPC_CLPRIM_IDLE = 81, - PERF_PAPC_CLPRIM_BUSY = 82, - PERF_PAPC_CLPRIM_STALLED = 83, - PERF_PAPC_CLPRIM_STARVED_CCGSM = 84, - PERF_PAPC_CLIPSM_IDLE = 85, - PERF_PAPC_CLIPSM_BUSY = 86, - PERF_PAPC_CLIPSM_WAIT_CLIP_VERT_ENGH = 87, - PERF_PAPC_CLIPSM_WAIT_HIGH_PRI_SEQ = 88, - PERF_PAPC_CLIPSM_WAIT_CLIPGA = 89, - PERF_PAPC_CLIPSM_WAIT_AVAIL_VTE_CLIP = 90, - PERF_PAPC_CLIPSM_WAIT_CLIP_OUTSM = 91, - PERF_PAPC_CLIPGA_IDLE = 92, - PERF_PAPC_CLIPGA_BUSY = 93, - PERF_PAPC_CLIPGA_STARVED_VTE_CLIP = 94, - PERF_PAPC_CLIPGA_STALLED = 95, - PERF_PAPC_CLIP_IDLE = 96, - PERF_PAPC_CLIP_BUSY = 97, - PERF_PAPC_SU_IDLE = 98, - PERF_PAPC_SU_BUSY = 99, - PERF_PAPC_SU_STARVED_CLIP = 100, - PERF_PAPC_SU_STALLED_SC = 101, - PERF_PAPC_SU_FACENESS_CULL = 102, -}; - -enum a2xx_sc_perfcnt_select { - SC_SR_WINDOW_VALID = 0, - SC_CW_WINDOW_VALID = 1, - SC_QM_WINDOW_VALID = 2, - SC_FW_WINDOW_VALID = 3, - SC_EZ_WINDOW_VALID = 4, - SC_IT_WINDOW_VALID = 5, - SC_STARVED_BY_PA = 6, - SC_STALLED_BY_RB_TILE = 7, - SC_STALLED_BY_RB_SAMP = 8, - SC_STARVED_BY_RB_EZ = 9, - SC_STALLED_BY_SAMPLE_FF = 10, - SC_STALLED_BY_SQ = 11, - SC_STALLED_BY_SP = 12, - SC_TOTAL_NO_PRIMS = 13, - SC_NON_EMPTY_PRIMS = 14, - SC_NO_TILES_PASSING_QM = 15, - SC_NO_PIXELS_PRE_EZ = 16, - SC_NO_PIXELS_POST_EZ = 17, -}; - -enum a2xx_vgt_perfcount_select { - VGT_SQ_EVENT_WINDOW_ACTIVE = 0, - VGT_SQ_SEND = 1, - VGT_SQ_STALLED = 2, - VGT_SQ_STARVED_BUSY = 3, - VGT_SQ_STARVED_IDLE = 4, - VGT_SQ_STATIC = 5, - VGT_PA_EVENT_WINDOW_ACTIVE = 6, - VGT_PA_CLIP_V_SEND = 7, - VGT_PA_CLIP_V_STALLED = 8, - VGT_PA_CLIP_V_STARVED_BUSY = 9, - VGT_PA_CLIP_V_STARVED_IDLE = 10, - VGT_PA_CLIP_V_STATIC = 11, - VGT_PA_CLIP_P_SEND = 12, - VGT_PA_CLIP_P_STALLED = 13, - VGT_PA_CLIP_P_STARVED_BUSY = 14, - VGT_PA_CLIP_P_STARVED_IDLE = 15, - VGT_PA_CLIP_P_STATIC = 16, - VGT_PA_CLIP_S_SEND = 17, - VGT_PA_CLIP_S_STALLED = 18, - VGT_PA_CLIP_S_STARVED_BUSY = 19, - VGT_PA_CLIP_S_STARVED_IDLE = 20, - VGT_PA_CLIP_S_STATIC = 21, - RBIU_FIFOS_EVENT_WINDOW_ACTIVE = 22, - RBIU_IMMED_DATA_FIFO_STARVED = 23, - RBIU_IMMED_DATA_FIFO_STALLED = 24, - RBIU_DMA_REQUEST_FIFO_STARVED = 25, - RBIU_DMA_REQUEST_FIFO_STALLED = 26, - RBIU_DRAW_INITIATOR_FIFO_STARVED = 27, - RBIU_DRAW_INITIATOR_FIFO_STALLED = 28, - BIN_PRIM_NEAR_CULL = 29, - BIN_PRIM_ZERO_CULL = 30, - BIN_PRIM_FAR_CULL = 31, - BIN_PRIM_BIN_CULL = 32, - BIN_PRIM_FACE_CULL = 33, - SPARE34 = 34, - SPARE35 = 35, - SPARE36 = 36, - SPARE37 = 37, - SPARE38 = 38, - SPARE39 = 39, - TE_SU_IN_VALID = 40, - TE_SU_IN_READ = 41, - TE_SU_IN_PRIM = 42, - TE_SU_IN_EOP = 43, - TE_SU_IN_NULL_PRIM = 44, - TE_WK_IN_VALID = 45, - TE_WK_IN_READ = 46, - TE_OUT_PRIM_VALID = 47, - TE_OUT_PRIM_READ = 48, -}; - -enum a2xx_tcr_perfcount_select { - DGMMPD_IPMUX0_STALL = 0, - DGMMPD_IPMUX_ALL_STALL = 4, - OPMUX0_L2_WRITES = 5, -}; - -enum a2xx_tp_perfcount_select { - POINT_QUADS = 0, - BILIN_QUADS = 1, - ANISO_QUADS = 2, - MIP_QUADS = 3, - VOL_QUADS = 4, - MIP_VOL_QUADS = 5, - MIP_ANISO_QUADS = 6, - VOL_ANISO_QUADS = 7, - ANISO_2_1_QUADS = 8, - ANISO_4_1_QUADS = 9, - ANISO_6_1_QUADS = 10, - ANISO_8_1_QUADS = 11, - ANISO_10_1_QUADS = 12, - ANISO_12_1_QUADS = 13, - ANISO_14_1_QUADS = 14, - ANISO_16_1_QUADS = 15, - MIP_VOL_ANISO_QUADS = 16, - ALIGN_2_QUADS = 17, - ALIGN_4_QUADS = 18, - PIX_0_QUAD = 19, - PIX_1_QUAD = 20, - PIX_2_QUAD = 21, - PIX_3_QUAD = 22, - PIX_4_QUAD = 23, - TP_MIPMAP_LOD0 = 24, - TP_MIPMAP_LOD1 = 25, - TP_MIPMAP_LOD2 = 26, - TP_MIPMAP_LOD3 = 27, - TP_MIPMAP_LOD4 = 28, - TP_MIPMAP_LOD5 = 29, - TP_MIPMAP_LOD6 = 30, - TP_MIPMAP_LOD7 = 31, - TP_MIPMAP_LOD8 = 32, - TP_MIPMAP_LOD9 = 33, - TP_MIPMAP_LOD10 = 34, - TP_MIPMAP_LOD11 = 35, - TP_MIPMAP_LOD12 = 36, - TP_MIPMAP_LOD13 = 37, - TP_MIPMAP_LOD14 = 38, -}; - -enum a2xx_tcm_perfcount_select { - QUAD0_RD_LAT_FIFO_EMPTY = 0, - QUAD0_RD_LAT_FIFO_4TH_FULL = 3, - QUAD0_RD_LAT_FIFO_HALF_FULL = 4, - QUAD0_RD_LAT_FIFO_FULL = 5, - QUAD0_RD_LAT_FIFO_LT_4TH_FULL = 6, - READ_STARVED_QUAD0 = 28, - READ_STARVED = 32, - READ_STALLED_QUAD0 = 33, - READ_STALLED = 37, - VALID_READ_QUAD0 = 38, - TC_TP_STARVED_QUAD0 = 42, - TC_TP_STARVED = 46, -}; - -enum a2xx_tcf_perfcount_select { - VALID_CYCLES = 0, - SINGLE_PHASES = 1, - ANISO_PHASES = 2, - MIP_PHASES = 3, - VOL_PHASES = 4, - MIP_VOL_PHASES = 5, - MIP_ANISO_PHASES = 6, - VOL_ANISO_PHASES = 7, - ANISO_2_1_PHASES = 8, - ANISO_4_1_PHASES = 9, - ANISO_6_1_PHASES = 10, - ANISO_8_1_PHASES = 11, - ANISO_10_1_PHASES = 12, - ANISO_12_1_PHASES = 13, - ANISO_14_1_PHASES = 14, - ANISO_16_1_PHASES = 15, - MIP_VOL_ANISO_PHASES = 16, - ALIGN_2_PHASES = 17, - ALIGN_4_PHASES = 18, - TPC_BUSY = 19, - TPC_STALLED = 20, - TPC_STARVED = 21, - TPC_WORKING = 22, - TPC_WALKER_BUSY = 23, - TPC_WALKER_STALLED = 24, - TPC_WALKER_WORKING = 25, - TPC_ALIGNER_BUSY = 26, - TPC_ALIGNER_STALLED = 27, - TPC_ALIGNER_STALLED_BY_BLEND = 28, - TPC_ALIGNER_STALLED_BY_CACHE = 29, - TPC_ALIGNER_WORKING = 30, - TPC_BLEND_BUSY = 31, - TPC_BLEND_SYNC = 32, - TPC_BLEND_STARVED = 33, - TPC_BLEND_WORKING = 34, - OPCODE_0x00 = 35, - OPCODE_0x01 = 36, - OPCODE_0x04 = 37, - OPCODE_0x10 = 38, - OPCODE_0x11 = 39, - OPCODE_0x12 = 40, - OPCODE_0x13 = 41, - OPCODE_0x18 = 42, - OPCODE_0x19 = 43, - OPCODE_0x1A = 44, - OPCODE_OTHER = 45, - IN_FIFO_0_EMPTY = 56, - IN_FIFO_0_LT_HALF_FULL = 57, - IN_FIFO_0_HALF_FULL = 58, - IN_FIFO_0_FULL = 59, - IN_FIFO_TPC_EMPTY = 72, - IN_FIFO_TPC_LT_HALF_FULL = 73, - IN_FIFO_TPC_HALF_FULL = 74, - IN_FIFO_TPC_FULL = 75, - TPC_TC_XFC = 76, - TPC_TC_STATE = 77, - TC_STALL = 78, - QUAD0_TAPS = 79, - QUADS = 83, - TCA_SYNC_STALL = 84, - TAG_STALL = 85, - TCB_SYNC_STALL = 88, - TCA_VALID = 89, - PROBES_VALID = 90, - MISS_STALL = 91, - FETCH_FIFO_STALL = 92, - TCO_STALL = 93, - ANY_STALL = 94, - TAG_MISSES = 95, - TAG_HITS = 96, - SUB_TAG_MISSES = 97, - SET0_INVALIDATES = 98, - SET1_INVALIDATES = 99, - SET2_INVALIDATES = 100, - SET3_INVALIDATES = 101, - SET0_TAG_MISSES = 102, - SET1_TAG_MISSES = 103, - SET2_TAG_MISSES = 104, - SET3_TAG_MISSES = 105, - SET0_TAG_HITS = 106, - SET1_TAG_HITS = 107, - SET2_TAG_HITS = 108, - SET3_TAG_HITS = 109, - SET0_SUB_TAG_MISSES = 110, - SET1_SUB_TAG_MISSES = 111, - SET2_SUB_TAG_MISSES = 112, - SET3_SUB_TAG_MISSES = 113, - SET0_EVICT1 = 114, - SET0_EVICT2 = 115, - SET0_EVICT3 = 116, - SET0_EVICT4 = 117, - SET0_EVICT5 = 118, - SET0_EVICT6 = 119, - SET0_EVICT7 = 120, - SET0_EVICT8 = 121, - SET1_EVICT1 = 130, - SET1_EVICT2 = 131, - SET1_EVICT3 = 132, - SET1_EVICT4 = 133, - SET1_EVICT5 = 134, - SET1_EVICT6 = 135, - SET1_EVICT7 = 136, - SET1_EVICT8 = 137, - SET2_EVICT1 = 146, - SET2_EVICT2 = 147, - SET2_EVICT3 = 148, - SET2_EVICT4 = 149, - SET2_EVICT5 = 150, - SET2_EVICT6 = 151, - SET2_EVICT7 = 152, - SET2_EVICT8 = 153, - SET3_EVICT1 = 162, - SET3_EVICT2 = 163, - SET3_EVICT3 = 164, - SET3_EVICT4 = 165, - SET3_EVICT5 = 166, - SET3_EVICT6 = 167, - SET3_EVICT7 = 168, - SET3_EVICT8 = 169, - FF_EMPTY = 178, - FF_LT_HALF_FULL = 179, - FF_HALF_FULL = 180, - FF_FULL = 181, - FF_XFC = 182, - FF_STALLED = 183, - FG_MASKS = 184, - FG_LEFT_MASKS = 185, - FG_LEFT_MASK_STALLED = 186, - FG_LEFT_NOT_DONE_STALL = 187, - FG_LEFT_FG_STALL = 188, - FG_LEFT_SECTORS = 189, - FG0_REQUESTS = 195, - FG0_STALLED = 196, - MEM_REQ512 = 199, - MEM_REQ_SENT = 200, - MEM_LOCAL_READ_REQ = 202, - TC0_MH_STALLED = 203, -}; - -enum a2xx_sq_perfcnt_select { - SQ_PIXEL_VECTORS_SUB = 0, - SQ_VERTEX_VECTORS_SUB = 1, - SQ_ALU0_ACTIVE_VTX_SIMD0 = 2, - SQ_ALU1_ACTIVE_VTX_SIMD0 = 3, - SQ_ALU0_ACTIVE_PIX_SIMD0 = 4, - SQ_ALU1_ACTIVE_PIX_SIMD0 = 5, - SQ_ALU0_ACTIVE_VTX_SIMD1 = 6, - SQ_ALU1_ACTIVE_VTX_SIMD1 = 7, - SQ_ALU0_ACTIVE_PIX_SIMD1 = 8, - SQ_ALU1_ACTIVE_PIX_SIMD1 = 9, - SQ_EXPORT_CYCLES = 10, - SQ_ALU_CST_WRITTEN = 11, - SQ_TEX_CST_WRITTEN = 12, - SQ_ALU_CST_STALL = 13, - SQ_ALU_TEX_STALL = 14, - SQ_INST_WRITTEN = 15, - SQ_BOOLEAN_WRITTEN = 16, - SQ_LOOPS_WRITTEN = 17, - SQ_PIXEL_SWAP_IN = 18, - SQ_PIXEL_SWAP_OUT = 19, - SQ_VERTEX_SWAP_IN = 20, - SQ_VERTEX_SWAP_OUT = 21, - SQ_ALU_VTX_INST_ISSUED = 22, - SQ_TEX_VTX_INST_ISSUED = 23, - SQ_VC_VTX_INST_ISSUED = 24, - SQ_CF_VTX_INST_ISSUED = 25, - SQ_ALU_PIX_INST_ISSUED = 26, - SQ_TEX_PIX_INST_ISSUED = 27, - SQ_VC_PIX_INST_ISSUED = 28, - SQ_CF_PIX_INST_ISSUED = 29, - SQ_ALU0_FIFO_EMPTY_SIMD0 = 30, - SQ_ALU1_FIFO_EMPTY_SIMD0 = 31, - SQ_ALU0_FIFO_EMPTY_SIMD1 = 32, - SQ_ALU1_FIFO_EMPTY_SIMD1 = 33, - SQ_ALU_NOPS = 34, - SQ_PRED_SKIP = 35, - SQ_SYNC_ALU_STALL_SIMD0_VTX = 36, - SQ_SYNC_ALU_STALL_SIMD1_VTX = 37, - SQ_SYNC_TEX_STALL_VTX = 38, - SQ_SYNC_VC_STALL_VTX = 39, - SQ_CONSTANTS_USED_SIMD0 = 40, - SQ_CONSTANTS_SENT_SP_SIMD0 = 41, - SQ_GPR_STALL_VTX = 42, - SQ_GPR_STALL_PIX = 43, - SQ_VTX_RS_STALL = 44, - SQ_PIX_RS_STALL = 45, - SQ_SX_PC_FULL = 46, - SQ_SX_EXP_BUFF_FULL = 47, - SQ_SX_POS_BUFF_FULL = 48, - SQ_INTERP_QUADS = 49, - SQ_INTERP_ACTIVE = 50, - SQ_IN_PIXEL_STALL = 51, - SQ_IN_VTX_STALL = 52, - SQ_VTX_CNT = 53, - SQ_VTX_VECTOR2 = 54, - SQ_VTX_VECTOR3 = 55, - SQ_VTX_VECTOR4 = 56, - SQ_PIXEL_VECTOR1 = 57, - SQ_PIXEL_VECTOR23 = 58, - SQ_PIXEL_VECTOR4 = 59, - SQ_CONSTANTS_USED_SIMD1 = 60, - SQ_CONSTANTS_SENT_SP_SIMD1 = 61, - SQ_SX_MEM_EXP_FULL = 62, - SQ_ALU0_ACTIVE_VTX_SIMD2 = 63, - SQ_ALU1_ACTIVE_VTX_SIMD2 = 64, - SQ_ALU0_ACTIVE_PIX_SIMD2 = 65, - SQ_ALU1_ACTIVE_PIX_SIMD2 = 66, - SQ_ALU0_ACTIVE_VTX_SIMD3 = 67, - SQ_PERFCOUNT_VTX_QUAL_TP_DONE = 68, - SQ_ALU0_ACTIVE_PIX_SIMD3 = 69, - SQ_PERFCOUNT_PIX_QUAL_TP_DONE = 70, - SQ_ALU0_FIFO_EMPTY_SIMD2 = 71, - SQ_ALU1_FIFO_EMPTY_SIMD2 = 72, - SQ_ALU0_FIFO_EMPTY_SIMD3 = 73, - SQ_ALU1_FIFO_EMPTY_SIMD3 = 74, - SQ_SYNC_ALU_STALL_SIMD2_VTX = 75, - SQ_PERFCOUNT_VTX_POP_THREAD = 76, - SQ_SYNC_ALU_STALL_SIMD0_PIX = 77, - SQ_SYNC_ALU_STALL_SIMD1_PIX = 78, - SQ_SYNC_ALU_STALL_SIMD2_PIX = 79, - SQ_PERFCOUNT_PIX_POP_THREAD = 80, - SQ_SYNC_TEX_STALL_PIX = 81, - SQ_SYNC_VC_STALL_PIX = 82, - SQ_CONSTANTS_USED_SIMD2 = 83, - SQ_CONSTANTS_SENT_SP_SIMD2 = 84, - SQ_PERFCOUNT_VTX_DEALLOC_ACK = 85, - SQ_PERFCOUNT_PIX_DEALLOC_ACK = 86, - SQ_ALU0_FIFO_FULL_SIMD0 = 87, - SQ_ALU1_FIFO_FULL_SIMD0 = 88, - SQ_ALU0_FIFO_FULL_SIMD1 = 89, - SQ_ALU1_FIFO_FULL_SIMD1 = 90, - SQ_ALU0_FIFO_FULL_SIMD2 = 91, - SQ_ALU1_FIFO_FULL_SIMD2 = 92, - SQ_ALU0_FIFO_FULL_SIMD3 = 93, - SQ_ALU1_FIFO_FULL_SIMD3 = 94, - VC_PERF_STATIC = 95, - VC_PERF_STALLED = 96, - VC_PERF_STARVED = 97, - VC_PERF_SEND = 98, - VC_PERF_ACTUAL_STARVED = 99, - PIXEL_THREAD_0_ACTIVE = 100, - VERTEX_THREAD_0_ACTIVE = 101, - PIXEL_THREAD_0_NUMBER = 102, - VERTEX_THREAD_0_NUMBER = 103, - VERTEX_EVENT_NUMBER = 104, - PIXEL_EVENT_NUMBER = 105, - PTRBUFF_EF_PUSH = 106, - PTRBUFF_EF_POP_EVENT = 107, - PTRBUFF_EF_POP_NEW_VTX = 108, - PTRBUFF_EF_POP_DEALLOC = 109, - PTRBUFF_EF_POP_PVECTOR = 110, - PTRBUFF_EF_POP_PVECTOR_X = 111, - PTRBUFF_EF_POP_PVECTOR_VNZ = 112, - PTRBUFF_PB_DEALLOC = 113, - PTRBUFF_PI_STATE_PPB_POP = 114, - PTRBUFF_PI_RTR = 115, - PTRBUFF_PI_READ_EN = 116, - PTRBUFF_PI_BUFF_SWAP = 117, - PTRBUFF_SQ_FREE_BUFF = 118, - PTRBUFF_SQ_DEC = 119, - PTRBUFF_SC_VALID_CNTL_EVENT = 120, - PTRBUFF_SC_VALID_IJ_XFER = 121, - PTRBUFF_SC_NEW_VECTOR_1_Q = 122, - PTRBUFF_QUAL_NEW_VECTOR = 123, - PTRBUFF_QUAL_EVENT = 124, - PTRBUFF_END_BUFFER = 125, - PTRBUFF_FILL_QUAD = 126, - VERTS_WRITTEN_SPI = 127, - TP_FETCH_INSTR_EXEC = 128, - TP_FETCH_INSTR_REQ = 129, - TP_DATA_RETURN = 130, - SPI_WRITE_CYCLES_SP = 131, - SPI_WRITES_SP = 132, - SP_ALU_INSTR_EXEC = 133, - SP_CONST_ADDR_TO_SQ = 134, - SP_PRED_KILLS_TO_SQ = 135, - SP_EXPORT_CYCLES_TO_SX = 136, - SP_EXPORTS_TO_SX = 137, - SQ_CYCLES_ELAPSED = 138, - SQ_TCFS_OPT_ALLOC_EXEC = 139, - SQ_TCFS_NO_OPT_ALLOC = 140, - SQ_ALU0_NO_OPT_ALLOC = 141, - SQ_ALU1_NO_OPT_ALLOC = 142, - SQ_TCFS_ARB_XFC_CNT = 143, - SQ_ALU0_ARB_XFC_CNT = 144, - SQ_ALU1_ARB_XFC_CNT = 145, - SQ_TCFS_CFS_UPDATE_CNT = 146, - SQ_ALU0_CFS_UPDATE_CNT = 147, - SQ_ALU1_CFS_UPDATE_CNT = 148, - SQ_VTX_PUSH_THREAD_CNT = 149, - SQ_VTX_POP_THREAD_CNT = 150, - SQ_PIX_PUSH_THREAD_CNT = 151, - SQ_PIX_POP_THREAD_CNT = 152, - SQ_PIX_TOTAL = 153, - SQ_PIX_KILLED = 154, -}; - -enum a2xx_sx_perfcnt_select { - SX_EXPORT_VECTORS = 0, - SX_DUMMY_QUADS = 1, - SX_ALPHA_FAIL = 2, - SX_RB_QUAD_BUSY = 3, - SX_RB_COLOR_BUSY = 4, - SX_RB_QUAD_STALL = 5, - SX_RB_COLOR_STALL = 6, -}; - -enum a2xx_rbbm_perfcount1_sel { - RBBM1_COUNT = 0, - RBBM1_NRT_BUSY = 1, - RBBM1_RB_BUSY = 2, - RBBM1_SQ_CNTX0_BUSY = 3, - RBBM1_SQ_CNTX17_BUSY = 4, - RBBM1_VGT_BUSY = 5, - RBBM1_VGT_NODMA_BUSY = 6, - RBBM1_PA_BUSY = 7, - RBBM1_SC_CNTX_BUSY = 8, - RBBM1_TPC_BUSY = 9, - RBBM1_TC_BUSY = 10, - RBBM1_SX_BUSY = 11, - RBBM1_CP_COHER_BUSY = 12, - RBBM1_CP_NRT_BUSY = 13, - RBBM1_GFX_IDLE_STALL = 14, - RBBM1_INTERRUPT = 15, -}; - -enum a2xx_cp_perfcount_sel { - ALWAYS_COUNT = 0, - TRANS_FIFO_FULL = 1, - TRANS_FIFO_AF = 2, - RCIU_PFPTRANS_WAIT = 3, - RCIU_NRTTRANS_WAIT = 6, - CSF_NRT_READ_WAIT = 8, - CSF_I1_FIFO_FULL = 9, - CSF_I2_FIFO_FULL = 10, - CSF_ST_FIFO_FULL = 11, - CSF_RING_ROQ_FULL = 13, - CSF_I1_ROQ_FULL = 14, - CSF_I2_ROQ_FULL = 15, - CSF_ST_ROQ_FULL = 16, - MIU_TAG_MEM_FULL = 18, - MIU_WRITECLEAN = 19, - MIU_NRT_WRITE_STALLED = 22, - MIU_NRT_READ_STALLED = 23, - ME_WRITE_CONFIRM_FIFO_FULL = 24, - ME_VS_DEALLOC_FIFO_FULL = 25, - ME_PS_DEALLOC_FIFO_FULL = 26, - ME_REGS_VS_EVENT_FIFO_FULL = 27, - ME_REGS_PS_EVENT_FIFO_FULL = 28, - ME_REGS_CF_EVENT_FIFO_FULL = 29, - ME_MICRO_RB_STARVED = 30, - ME_MICRO_I1_STARVED = 31, - ME_MICRO_I2_STARVED = 32, - ME_MICRO_ST_STARVED = 33, - RCIU_RBBM_DWORD_SENT = 40, - ME_BUSY_CLOCKS = 41, - ME_WAIT_CONTEXT_AVAIL = 42, - PFP_TYPE0_PACKET = 43, - PFP_TYPE3_PACKET = 44, - CSF_RB_WPTR_NEQ_RPTR = 45, - CSF_I1_SIZE_NEQ_ZERO = 46, - CSF_I2_SIZE_NEQ_ZERO = 47, - CSF_RBI1I2_FETCHING = 48, -}; - -enum a2xx_rb_perfcnt_select { - RBPERF_CNTX_BUSY = 0, - RBPERF_CNTX_BUSY_MAX = 1, - RBPERF_SX_QUAD_STARVED = 2, - RBPERF_SX_QUAD_STARVED_MAX = 3, - RBPERF_GA_GC_CH0_SYS_REQ = 4, - RBPERF_GA_GC_CH0_SYS_REQ_MAX = 5, - RBPERF_GA_GC_CH1_SYS_REQ = 6, - RBPERF_GA_GC_CH1_SYS_REQ_MAX = 7, - RBPERF_MH_STARVED = 8, - RBPERF_MH_STARVED_MAX = 9, - RBPERF_AZ_BC_COLOR_BUSY = 10, - RBPERF_AZ_BC_COLOR_BUSY_MAX = 11, - RBPERF_AZ_BC_Z_BUSY = 12, - RBPERF_AZ_BC_Z_BUSY_MAX = 13, - RBPERF_RB_SC_TILE_RTR_N = 14, - RBPERF_RB_SC_TILE_RTR_N_MAX = 15, - RBPERF_RB_SC_SAMP_RTR_N = 16, - RBPERF_RB_SC_SAMP_RTR_N_MAX = 17, - RBPERF_RB_SX_QUAD_RTR_N = 18, - RBPERF_RB_SX_QUAD_RTR_N_MAX = 19, - RBPERF_RB_SX_COLOR_RTR_N = 20, - RBPERF_RB_SX_COLOR_RTR_N_MAX = 21, - RBPERF_RB_SC_SAMP_LZ_BUSY = 22, - RBPERF_RB_SC_SAMP_LZ_BUSY_MAX = 23, - RBPERF_ZXP_STALL = 24, - RBPERF_ZXP_STALL_MAX = 25, - RBPERF_EVENT_PENDING = 26, - RBPERF_EVENT_PENDING_MAX = 27, - RBPERF_RB_MH_VALID = 28, - RBPERF_RB_MH_VALID_MAX = 29, - RBPERF_SX_RB_QUAD_SEND = 30, - RBPERF_SX_RB_COLOR_SEND = 31, - RBPERF_SC_RB_TILE_SEND = 32, - RBPERF_SC_RB_SAMPLE_SEND = 33, - RBPERF_SX_RB_MEM_EXPORT = 34, - RBPERF_SX_RB_QUAD_EVENT = 35, - RBPERF_SC_RB_TILE_EVENT_FILTERED = 36, - RBPERF_SC_RB_TILE_EVENT_ALL = 37, - RBPERF_RB_SC_EZ_SEND = 38, - RBPERF_RB_SX_INDEX_SEND = 39, - RBPERF_GMEM_INTFO_RD = 40, - RBPERF_GMEM_INTF1_RD = 41, - RBPERF_GMEM_INTFO_WR = 42, - RBPERF_GMEM_INTF1_WR = 43, - RBPERF_RB_CP_CONTEXT_DONE = 44, - RBPERF_RB_CP_CACHE_FLUSH = 45, - RBPERF_ZPASS_DONE = 46, - RBPERF_ZCMD_VALID = 47, - RBPERF_CCMD_VALID = 48, - RBPERF_ACCUM_GRANT = 49, - RBPERF_ACCUM_C0_GRANT = 50, - RBPERF_ACCUM_C1_GRANT = 51, - RBPERF_ACCUM_FULL_BE_WR = 52, - RBPERF_ACCUM_REQUEST_NO_GRANT = 53, - RBPERF_ACCUM_TIMEOUT_PULSE = 54, - RBPERF_ACCUM_LIN_TIMEOUT_PULSE = 55, - RBPERF_ACCUM_CAM_HIT_FLUSHING = 56, -}; - -enum a2xx_mh_perfcnt_select { - CP_R0_REQUESTS = 0, - CP_R1_REQUESTS = 1, - CP_R2_REQUESTS = 2, - CP_R3_REQUESTS = 3, - CP_R4_REQUESTS = 4, - CP_TOTAL_READ_REQUESTS = 5, - CP_TOTAL_WRITE_REQUESTS = 6, - CP_TOTAL_REQUESTS = 7, - CP_DATA_BYTES_WRITTEN = 8, - CP_WRITE_CLEAN_RESPONSES = 9, - CP_R0_READ_BURSTS_RECEIVED = 10, - CP_R1_READ_BURSTS_RECEIVED = 11, - CP_R2_READ_BURSTS_RECEIVED = 12, - CP_R3_READ_BURSTS_RECEIVED = 13, - CP_R4_READ_BURSTS_RECEIVED = 14, - CP_TOTAL_READ_BURSTS_RECEIVED = 15, - CP_R0_DATA_BEATS_READ = 16, - CP_R1_DATA_BEATS_READ = 17, - CP_R2_DATA_BEATS_READ = 18, - CP_R3_DATA_BEATS_READ = 19, - CP_R4_DATA_BEATS_READ = 20, - CP_TOTAL_DATA_BEATS_READ = 21, - VGT_R0_REQUESTS = 22, - VGT_R1_REQUESTS = 23, - VGT_TOTAL_REQUESTS = 24, - VGT_R0_READ_BURSTS_RECEIVED = 25, - VGT_R1_READ_BURSTS_RECEIVED = 26, - VGT_TOTAL_READ_BURSTS_RECEIVED = 27, - VGT_R0_DATA_BEATS_READ = 28, - VGT_R1_DATA_BEATS_READ = 29, - VGT_TOTAL_DATA_BEATS_READ = 30, - TC_TOTAL_REQUESTS = 31, - TC_ROQ_REQUESTS = 32, - TC_INFO_SENT = 33, - TC_READ_BURSTS_RECEIVED = 34, - TC_DATA_BEATS_READ = 35, - TCD_BURSTS_READ = 36, - RB_REQUESTS = 37, - RB_DATA_BYTES_WRITTEN = 38, - RB_WRITE_CLEAN_RESPONSES = 39, - AXI_READ_REQUESTS_ID_0 = 40, - AXI_READ_REQUESTS_ID_1 = 41, - AXI_READ_REQUESTS_ID_2 = 42, - AXI_READ_REQUESTS_ID_3 = 43, - AXI_READ_REQUESTS_ID_4 = 44, - AXI_READ_REQUESTS_ID_5 = 45, - AXI_READ_REQUESTS_ID_6 = 46, - AXI_READ_REQUESTS_ID_7 = 47, - AXI_TOTAL_READ_REQUESTS = 48, - AXI_WRITE_REQUESTS_ID_0 = 49, - AXI_WRITE_REQUESTS_ID_1 = 50, - AXI_WRITE_REQUESTS_ID_2 = 51, - AXI_WRITE_REQUESTS_ID_3 = 52, - AXI_WRITE_REQUESTS_ID_4 = 53, - AXI_WRITE_REQUESTS_ID_5 = 54, - AXI_WRITE_REQUESTS_ID_6 = 55, - AXI_WRITE_REQUESTS_ID_7 = 56, - AXI_TOTAL_WRITE_REQUESTS = 57, - AXI_TOTAL_REQUESTS_ID_0 = 58, - AXI_TOTAL_REQUESTS_ID_1 = 59, - AXI_TOTAL_REQUESTS_ID_2 = 60, - AXI_TOTAL_REQUESTS_ID_3 = 61, - AXI_TOTAL_REQUESTS_ID_4 = 62, - AXI_TOTAL_REQUESTS_ID_5 = 63, - AXI_TOTAL_REQUESTS_ID_6 = 64, - AXI_TOTAL_REQUESTS_ID_7 = 65, - AXI_TOTAL_REQUESTS = 66, - AXI_READ_CHANNEL_BURSTS_ID_0 = 67, - AXI_READ_CHANNEL_BURSTS_ID_1 = 68, - AXI_READ_CHANNEL_BURSTS_ID_2 = 69, - AXI_READ_CHANNEL_BURSTS_ID_3 = 70, - AXI_READ_CHANNEL_BURSTS_ID_4 = 71, - AXI_READ_CHANNEL_BURSTS_ID_5 = 72, - AXI_READ_CHANNEL_BURSTS_ID_6 = 73, - AXI_READ_CHANNEL_BURSTS_ID_7 = 74, - AXI_READ_CHANNEL_TOTAL_BURSTS = 75, - AXI_READ_CHANNEL_DATA_BEATS_READ_ID_0 = 76, - AXI_READ_CHANNEL_DATA_BEATS_READ_ID_1 = 77, - AXI_READ_CHANNEL_DATA_BEATS_READ_ID_2 = 78, - AXI_READ_CHANNEL_DATA_BEATS_READ_ID_3 = 79, - AXI_READ_CHANNEL_DATA_BEATS_READ_ID_4 = 80, - AXI_READ_CHANNEL_DATA_BEATS_READ_ID_5 = 81, - AXI_READ_CHANNEL_DATA_BEATS_READ_ID_6 = 82, - AXI_READ_CHANNEL_DATA_BEATS_READ_ID_7 = 83, - AXI_READ_CHANNEL_TOTAL_DATA_BEATS_READ = 84, - AXI_WRITE_CHANNEL_BURSTS_ID_0 = 85, - AXI_WRITE_CHANNEL_BURSTS_ID_1 = 86, - AXI_WRITE_CHANNEL_BURSTS_ID_2 = 87, - AXI_WRITE_CHANNEL_BURSTS_ID_3 = 88, - AXI_WRITE_CHANNEL_BURSTS_ID_4 = 89, - AXI_WRITE_CHANNEL_BURSTS_ID_5 = 90, - AXI_WRITE_CHANNEL_BURSTS_ID_6 = 91, - AXI_WRITE_CHANNEL_BURSTS_ID_7 = 92, - AXI_WRITE_CHANNEL_TOTAL_BURSTS = 93, - AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_0 = 94, - AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_1 = 95, - AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_2 = 96, - AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_3 = 97, - AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_4 = 98, - AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_5 = 99, - AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_6 = 100, - AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_7 = 101, - AXI_WRITE_CHANNEL_TOTAL_DATA_BYTES_WRITTEN = 102, - AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_0 = 103, - AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_1 = 104, - AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_2 = 105, - AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_3 = 106, - AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_4 = 107, - AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_5 = 108, - AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_6 = 109, - AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_7 = 110, - AXI_WRITE_RESPONSE_CHANNEL_TOTAL_RESPONSES = 111, - TOTAL_MMU_MISSES = 112, - MMU_READ_MISSES = 113, - MMU_WRITE_MISSES = 114, - TOTAL_MMU_HITS = 115, - MMU_READ_HITS = 116, - MMU_WRITE_HITS = 117, - SPLIT_MODE_TC_HITS = 118, - SPLIT_MODE_TC_MISSES = 119, - SPLIT_MODE_NON_TC_HITS = 120, - SPLIT_MODE_NON_TC_MISSES = 121, - STALL_AWAITING_TLB_MISS_FETCH = 122, - MMU_TLB_MISS_READ_BURSTS_RECEIVED = 123, - MMU_TLB_MISS_DATA_BEATS_READ = 124, - CP_CYCLES_HELD_OFF = 125, - VGT_CYCLES_HELD_OFF = 126, - TC_CYCLES_HELD_OFF = 127, - TC_ROQ_CYCLES_HELD_OFF = 128, - TC_CYCLES_HELD_OFF_TCD_FULL = 129, - RB_CYCLES_HELD_OFF = 130, - TOTAL_CYCLES_ANY_CLNT_HELD_OFF = 131, - TLB_MISS_CYCLES_HELD_OFF = 132, - AXI_READ_REQUEST_HELD_OFF = 133, - AXI_WRITE_REQUEST_HELD_OFF = 134, - AXI_REQUEST_HELD_OFF = 135, - AXI_REQUEST_HELD_OFF_INFLIGHT_LIMIT = 136, - AXI_WRITE_DATA_HELD_OFF = 137, - CP_SAME_PAGE_BANK_REQUESTS = 138, - VGT_SAME_PAGE_BANK_REQUESTS = 139, - TC_SAME_PAGE_BANK_REQUESTS = 140, - TC_ARB_HOLD_SAME_PAGE_BANK_REQUESTS = 141, - RB_SAME_PAGE_BANK_REQUESTS = 142, - TOTAL_SAME_PAGE_BANK_REQUESTS = 143, - CP_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 144, - VGT_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 145, - TC_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 146, - RB_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 147, - TOTAL_SAME_PAGE_BANK_KILLED_FAIRNESS_LIMIT = 148, - TOTAL_MH_READ_REQUESTS = 149, - TOTAL_MH_WRITE_REQUESTS = 150, - TOTAL_MH_REQUESTS = 151, - MH_BUSY = 152, - CP_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 153, - VGT_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 154, - TC_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 155, - RB_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 156, - TC_ROQ_N_VALID_ENTRIES = 157, - ARQ_N_ENTRIES = 158, - WDB_N_ENTRIES = 159, - MH_READ_LATENCY_OUTST_REQ_SUM = 160, - MC_READ_LATENCY_OUTST_REQ_SUM = 161, - MC_TOTAL_READ_REQUESTS = 162, - ELAPSED_CYCLES_MH_GATED_CLK = 163, - ELAPSED_CLK_CYCLES = 164, - CP_W_16B_REQUESTS = 165, - CP_W_32B_REQUESTS = 166, - TC_16B_REQUESTS = 167, - TC_32B_REQUESTS = 168, - PA_REQUESTS = 169, - PA_DATA_BYTES_WRITTEN = 170, - PA_WRITE_CLEAN_RESPONSES = 171, - PA_CYCLES_HELD_OFF = 172, - AXI_READ_REQUEST_DATA_BEATS_ID_0 = 173, - AXI_READ_REQUEST_DATA_BEATS_ID_1 = 174, - AXI_READ_REQUEST_DATA_BEATS_ID_2 = 175, - AXI_READ_REQUEST_DATA_BEATS_ID_3 = 176, - AXI_READ_REQUEST_DATA_BEATS_ID_4 = 177, - AXI_READ_REQUEST_DATA_BEATS_ID_5 = 178, - AXI_READ_REQUEST_DATA_BEATS_ID_6 = 179, - AXI_READ_REQUEST_DATA_BEATS_ID_7 = 180, - AXI_TOTAL_READ_REQUEST_DATA_BEATS = 181, -}; - -enum perf_mode_cnt { - PERF_STATE_RESET = 0, - PERF_STATE_ENABLE = 1, - PERF_STATE_FREEZE = 2, -}; - -enum adreno_mmu_clnt_beh { - BEH_NEVR = 0, - BEH_TRAN_RNG = 1, - BEH_TRAN_FLT = 2, -}; - -enum sq_tex_clamp { - SQ_TEX_WRAP = 0, - SQ_TEX_MIRROR = 1, - SQ_TEX_CLAMP_LAST_TEXEL = 2, - SQ_TEX_MIRROR_ONCE_LAST_TEXEL = 3, - SQ_TEX_CLAMP_HALF_BORDER = 4, - SQ_TEX_MIRROR_ONCE_HALF_BORDER = 5, - SQ_TEX_CLAMP_BORDER = 6, - SQ_TEX_MIRROR_ONCE_BORDER = 7, -}; - -enum sq_tex_swiz { - SQ_TEX_X = 0, - SQ_TEX_Y = 1, - SQ_TEX_Z = 2, - SQ_TEX_W = 3, - SQ_TEX_ZERO = 4, - SQ_TEX_ONE = 5, -}; - -enum sq_tex_filter { - SQ_TEX_FILTER_POINT = 0, - SQ_TEX_FILTER_BILINEAR = 1, - SQ_TEX_FILTER_BASEMAP = 2, - SQ_TEX_FILTER_USE_FETCH_CONST = 3, -}; - -enum sq_tex_aniso_filter { - SQ_TEX_ANISO_FILTER_DISABLED = 0, - SQ_TEX_ANISO_FILTER_MAX_1_1 = 1, - SQ_TEX_ANISO_FILTER_MAX_2_1 = 2, - SQ_TEX_ANISO_FILTER_MAX_4_1 = 3, - SQ_TEX_ANISO_FILTER_MAX_8_1 = 4, - SQ_TEX_ANISO_FILTER_MAX_16_1 = 5, - SQ_TEX_ANISO_FILTER_USE_FETCH_CONST = 7, -}; - -enum sq_tex_dimension { - SQ_TEX_DIMENSION_1D = 0, - SQ_TEX_DIMENSION_2D = 1, - SQ_TEX_DIMENSION_3D = 2, - SQ_TEX_DIMENSION_CUBE = 3, -}; - -enum sq_tex_border_color { - SQ_TEX_BORDER_COLOR_BLACK = 0, - SQ_TEX_BORDER_COLOR_WHITE = 1, - SQ_TEX_BORDER_COLOR_ACBYCR_BLACK = 2, - SQ_TEX_BORDER_COLOR_ACBCRY_BLACK = 3, -}; - -enum sq_tex_sign { - SQ_TEX_SIGN_UNSIGNED = 0, - SQ_TEX_SIGN_SIGNED = 1, - SQ_TEX_SIGN_UNSIGNED_BIASED = 2, - SQ_TEX_SIGN_GAMMA = 3, -}; - -enum sq_tex_endian { - SQ_TEX_ENDIAN_NONE = 0, - SQ_TEX_ENDIAN_8IN16 = 1, - SQ_TEX_ENDIAN_8IN32 = 2, - SQ_TEX_ENDIAN_16IN32 = 3, -}; - -enum sq_tex_clamp_policy { - SQ_TEX_CLAMP_POLICY_D3D = 0, - SQ_TEX_CLAMP_POLICY_OGL = 1, -}; - -enum sq_tex_num_format { - SQ_TEX_NUM_FORMAT_FRAC = 0, - SQ_TEX_NUM_FORMAT_INT = 1, -}; - -enum sq_tex_type { - SQ_TEX_TYPE_0 = 0, - SQ_TEX_TYPE_1 = 1, - SQ_TEX_TYPE_2 = 2, - SQ_TEX_TYPE_3 = 3, -}; - -#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001 - -#define REG_A2XX_RBBM_CNTL 0x0000003b - -#define REG_A2XX_RBBM_SOFT_RESET 0x0000003c - -#define REG_A2XX_CP_PFP_UCODE_ADDR 0x000000c0 - -#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1 - -#define REG_A2XX_MH_MMU_CONFIG 0x00000040 -#define A2XX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001 -#define A2XX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002 -#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030 -#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4 -static inline uint32_t A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) -{ - return ((val) << A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK; -} -#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0 -#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6 -static inline uint32_t A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) -{ - return ((val) << A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK; -} -#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300 -#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8 -static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) -{ - return ((val) << A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK; -} -#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00 -#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10 -static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) -{ - return ((val) << A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK; -} -#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000 -#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12 -static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) -{ - return ((val) << A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK; -} -#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000 -#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14 -static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) -{ - return ((val) << A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK; -} -#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000 -#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16 -static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) -{ - return ((val) << A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK; -} -#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000 -#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18 -static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) -{ - return ((val) << A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK; -} -#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000 -#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20 -static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) -{ - return ((val) << A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK; -} -#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000 -#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22 -static inline uint32_t A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) -{ - return ((val) << A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK; -} -#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000 -#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24 -static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) -{ - return ((val) << A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK; -} - -#define REG_A2XX_MH_MMU_VA_RANGE 0x00000041 -#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK 0x00000fff -#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT 0 -static inline uint32_t A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(uint32_t val) -{ - return ((val) << A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT) & A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK; -} -#define A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK 0xfffff000 -#define A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT 12 -static inline uint32_t A2XX_MH_MMU_VA_RANGE_VA_BASE(uint32_t val) -{ - return ((val) << A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT) & A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK; -} - -#define REG_A2XX_MH_MMU_PT_BASE 0x00000042 - -#define REG_A2XX_MH_MMU_PAGE_FAULT 0x00000043 - -#define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044 - -#define REG_A2XX_MH_MMU_INVALIDATE 0x00000045 -#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL 0x00000001 -#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC 0x00000002 - -#define REG_A2XX_MH_MMU_MPU_BASE 0x00000046 - -#define REG_A2XX_MH_MMU_MPU_END 0x00000047 - -#define REG_A2XX_NQWAIT_UNTIL 0x00000394 - -#define REG_A2XX_RBBM_PERFCOUNTER0_SELECT 0x00000395 - -#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000396 - -#define REG_A2XX_RBBM_PERFCOUNTER0_LO 0x00000397 - -#define REG_A2XX_RBBM_PERFCOUNTER0_HI 0x00000398 - -#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000399 - -#define REG_A2XX_RBBM_PERFCOUNTER1_HI 0x0000039a - -#define REG_A2XX_RBBM_DEBUG 0x0000039b - -#define REG_A2XX_RBBM_PM_OVERRIDE1 0x0000039c -#define A2XX_RBBM_PM_OVERRIDE1_RBBM_AHBCLK_PM_OVERRIDE 0x00000001 -#define A2XX_RBBM_PM_OVERRIDE1_SC_REG_SCLK_PM_OVERRIDE 0x00000002 -#define A2XX_RBBM_PM_OVERRIDE1_SC_SCLK_PM_OVERRIDE 0x00000004 -#define A2XX_RBBM_PM_OVERRIDE1_SP_TOP_SCLK_PM_OVERRIDE 0x00000008 -#define A2XX_RBBM_PM_OVERRIDE1_SP_V0_SCLK_PM_OVERRIDE 0x00000010 -#define A2XX_RBBM_PM_OVERRIDE1_SQ_REG_SCLK_PM_OVERRIDE 0x00000020 -#define A2XX_RBBM_PM_OVERRIDE1_SQ_REG_FIFOS_SCLK_PM_OVERRIDE 0x00000040 -#define A2XX_RBBM_PM_OVERRIDE1_SQ_CONST_MEM_SCLK_PM_OVERRIDE 0x00000080 -#define A2XX_RBBM_PM_OVERRIDE1_SQ_SQ_SCLK_PM_OVERRIDE 0x00000100 -#define A2XX_RBBM_PM_OVERRIDE1_SX_SCLK_PM_OVERRIDE 0x00000200 -#define A2XX_RBBM_PM_OVERRIDE1_SX_REG_SCLK_PM_OVERRIDE 0x00000400 -#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCO_SCLK_PM_OVERRIDE 0x00000800 -#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCM_SCLK_PM_OVERRIDE 0x00001000 -#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCD_SCLK_PM_OVERRIDE 0x00002000 -#define A2XX_RBBM_PM_OVERRIDE1_TCM_REG_SCLK_PM_OVERRIDE 0x00004000 -#define A2XX_RBBM_PM_OVERRIDE1_TPC_TPC_SCLK_PM_OVERRIDE 0x00008000 -#define A2XX_RBBM_PM_OVERRIDE1_TPC_REG_SCLK_PM_OVERRIDE 0x00010000 -#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCA_SCLK_PM_OVERRIDE 0x00020000 -#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCB_SCLK_PM_OVERRIDE 0x00040000 -#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCB_READ_SCLK_PM_OVERRIDE 0x00080000 -#define A2XX_RBBM_PM_OVERRIDE1_TP_TP_SCLK_PM_OVERRIDE 0x00100000 -#define A2XX_RBBM_PM_OVERRIDE1_TP_REG_SCLK_PM_OVERRIDE 0x00200000 -#define A2XX_RBBM_PM_OVERRIDE1_CP_G_SCLK_PM_OVERRIDE 0x00400000 -#define A2XX_RBBM_PM_OVERRIDE1_CP_REG_SCLK_PM_OVERRIDE 0x00800000 -#define A2XX_RBBM_PM_OVERRIDE1_CP_G_REG_SCLK_PM_OVERRIDE 0x01000000 -#define A2XX_RBBM_PM_OVERRIDE1_SPI_SCLK_PM_OVERRIDE 0x02000000 -#define A2XX_RBBM_PM_OVERRIDE1_RB_REG_SCLK_PM_OVERRIDE 0x04000000 -#define A2XX_RBBM_PM_OVERRIDE1_RB_SCLK_PM_OVERRIDE 0x08000000 -#define A2XX_RBBM_PM_OVERRIDE1_MH_MH_SCLK_PM_OVERRIDE 0x10000000 -#define A2XX_RBBM_PM_OVERRIDE1_MH_REG_SCLK_PM_OVERRIDE 0x20000000 -#define A2XX_RBBM_PM_OVERRIDE1_MH_MMU_SCLK_PM_OVERRIDE 0x40000000 -#define A2XX_RBBM_PM_OVERRIDE1_MH_TCROQ_SCLK_PM_OVERRIDE 0x80000000 - -#define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d -#define A2XX_RBBM_PM_OVERRIDE2_PA_REG_SCLK_PM_OVERRIDE 0x00000001 -#define A2XX_RBBM_PM_OVERRIDE2_PA_PA_SCLK_PM_OVERRIDE 0x00000002 -#define A2XX_RBBM_PM_OVERRIDE2_PA_AG_SCLK_PM_OVERRIDE 0x00000004 -#define A2XX_RBBM_PM_OVERRIDE2_VGT_REG_SCLK_PM_OVERRIDE 0x00000008 -#define A2XX_RBBM_PM_OVERRIDE2_VGT_FIFOS_SCLK_PM_OVERRIDE 0x00000010 -#define A2XX_RBBM_PM_OVERRIDE2_VGT_VGT_SCLK_PM_OVERRIDE 0x00000020 -#define A2XX_RBBM_PM_OVERRIDE2_DEBUG_PERF_SCLK_PM_OVERRIDE 0x00000040 -#define A2XX_RBBM_PM_OVERRIDE2_PERM_SCLK_PM_OVERRIDE 0x00000080 -#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM0_PM_OVERRIDE 0x00000100 -#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM1_PM_OVERRIDE 0x00000200 -#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM2_PM_OVERRIDE 0x00000400 -#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM3_PM_OVERRIDE 0x00000800 - -#define REG_A2XX_RBBM_DEBUG_OUT 0x000003a0 - -#define REG_A2XX_RBBM_DEBUG_CNTL 0x000003a1 - -#define REG_A2XX_RBBM_READ_ERROR 0x000003b3 - -#define REG_A2XX_RBBM_INT_CNTL 0x000003b4 -#define A2XX_RBBM_INT_CNTL_RDERR_INT_MASK 0x00000001 -#define A2XX_RBBM_INT_CNTL_DISPLAY_UPDATE_INT_MASK 0x00000002 -#define A2XX_RBBM_INT_CNTL_GUI_IDLE_INT_MASK 0x00080000 - -#define REG_A2XX_RBBM_INT_STATUS 0x000003b5 - -#define REG_A2XX_RBBM_INT_ACK 0x000003b6 - -#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7 -#define A2XX_MASTER_INT_SIGNAL_MH_INT_STAT 0x00000020 -#define A2XX_MASTER_INT_SIGNAL_SQ_INT_STAT 0x04000000 -#define A2XX_MASTER_INT_SIGNAL_CP_INT_STAT 0x40000000 -#define A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT 0x80000000 - -#define REG_A2XX_RBBM_PERIPHID1 0x000003f9 - -#define REG_A2XX_RBBM_PERIPHID2 0x000003fa - -#define REG_A2XX_CP_PERFMON_CNTL 0x00000444 -#define A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__MASK 0x00000007 -#define A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__SHIFT 0 -static inline uint32_t A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT(enum perf_mode_cnt val) -{ - return ((val) << A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__SHIFT) & A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__MASK; -} - -#define REG_A2XX_CP_PERFCOUNTER_SELECT 0x00000445 - -#define REG_A2XX_CP_PERFCOUNTER_LO 0x00000446 - -#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447 - -#define REG_A2XX_RBBM_STATUS 0x000005d0 -#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f -#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0 -static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val) -{ - return ((val) << A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT) & A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK; -} -#define A2XX_RBBM_STATUS_TC_BUSY 0x00000020 -#define A2XX_RBBM_STATUS_HIRQ_PENDING 0x00000100 -#define A2XX_RBBM_STATUS_CPRQ_PENDING 0x00000200 -#define A2XX_RBBM_STATUS_CFRQ_PENDING 0x00000400 -#define A2XX_RBBM_STATUS_PFRQ_PENDING 0x00000800 -#define A2XX_RBBM_STATUS_VGT_BUSY_NO_DMA 0x00001000 -#define A2XX_RBBM_STATUS_RBBM_WU_BUSY 0x00004000 -#define A2XX_RBBM_STATUS_CP_NRT_BUSY 0x00010000 -#define A2XX_RBBM_STATUS_MH_BUSY 0x00040000 -#define A2XX_RBBM_STATUS_MH_COHERENCY_BUSY 0x00080000 -#define A2XX_RBBM_STATUS_SX_BUSY 0x00200000 -#define A2XX_RBBM_STATUS_TPC_BUSY 0x00400000 -#define A2XX_RBBM_STATUS_SC_CNTX_BUSY 0x01000000 -#define A2XX_RBBM_STATUS_PA_BUSY 0x02000000 -#define A2XX_RBBM_STATUS_VGT_BUSY 0x04000000 -#define A2XX_RBBM_STATUS_SQ_CNTX17_BUSY 0x08000000 -#define A2XX_RBBM_STATUS_SQ_CNTX0_BUSY 0x10000000 -#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000 -#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000 - -#define REG_A2XX_MH_ARBITER_CONFIG 0x00000a40 -#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK 0x0000003f -#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT 0 -static inline uint32_t A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(uint32_t val) -{ - return ((val) << A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK; -} -#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_GRANULARITY 0x00000040 -#define A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE 0x00000080 -#define A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE 0x00000100 -#define A2XX_MH_ARBITER_CONFIG_L2_ARB_CONTROL 0x00000200 -#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK 0x00001c00 -#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT 10 -static inline uint32_t A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(uint32_t val) -{ - return ((val) << A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT) & A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK; -} -#define A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE 0x00002000 -#define A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE 0x00004000 -#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE 0x00008000 -#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK 0x003f0000 -#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT 16 -static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val) -{ - return ((val) << A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK; -} -#define A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE 0x00400000 -#define A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE 0x00800000 -#define A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE 0x01000000 -#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE 0x02000000 -#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE 0x04000000 - -#define REG_A2XX_MH_INTERRUPT_MASK 0x00000a42 -#define A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR 0x00000001 -#define A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR 0x00000002 -#define A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT 0x00000004 - -#define REG_A2XX_MH_INTERRUPT_STATUS 0x00000a43 - -#define REG_A2XX_MH_INTERRUPT_CLEAR 0x00000a44 - -#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1 0x00000a54 - -#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG2 0x00000a55 - -#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01 -#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f -#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0 -static inline uint32_t A2XX_A220_VSC_BIN_SIZE_WIDTH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK; -} -#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0 -#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT 5 -static inline uint32_t A2XX_A220_VSC_BIN_SIZE_HEIGHT(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK; -} - -#define REG_A2XX_VSC_PIPE(i0) (0x00000c06 + 0x3*(i0)) - -static inline uint32_t REG_A2XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; } - -static inline uint32_t REG_A2XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; } - -static inline uint32_t REG_A2XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; } - -#define REG_A2XX_PC_DEBUG_CNTL 0x00000c38 - -#define REG_A2XX_PC_DEBUG_DATA 0x00000c39 - -#define REG_A2XX_PA_SC_VIZ_QUERY_STATUS 0x00000c44 - -#define REG_A2XX_GRAS_DEBUG_CNTL 0x00000c80 - -#define REG_A2XX_PA_SU_DEBUG_CNTL 0x00000c80 - -#define REG_A2XX_GRAS_DEBUG_DATA 0x00000c81 - -#define REG_A2XX_PA_SU_DEBUG_DATA 0x00000c81 - -#define REG_A2XX_PA_SU_FACE_DATA 0x00000c86 -#define A2XX_PA_SU_FACE_DATA_BASE_ADDR__MASK 0xffffffe0 -#define A2XX_PA_SU_FACE_DATA_BASE_ADDR__SHIFT 5 -static inline uint32_t A2XX_PA_SU_FACE_DATA_BASE_ADDR(uint32_t val) -{ - return ((val) << A2XX_PA_SU_FACE_DATA_BASE_ADDR__SHIFT) & A2XX_PA_SU_FACE_DATA_BASE_ADDR__MASK; -} - -#define REG_A2XX_SQ_GPR_MANAGEMENT 0x00000d00 -#define A2XX_SQ_GPR_MANAGEMENT_REG_DYNAMIC 0x00000001 -#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__MASK 0x00000ff0 -#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__SHIFT 4 -static inline uint32_t A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX(uint32_t val) -{ - return ((val) << A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__SHIFT) & A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__MASK; -} -#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__MASK 0x000ff000 -#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__SHIFT 12 -static inline uint32_t A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX(uint32_t val) -{ - return ((val) << A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__SHIFT) & A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__MASK; -} - -#define REG_A2XX_SQ_FLOW_CONTROL 0x00000d01 - -#define REG_A2XX_SQ_INST_STORE_MANAGMENT 0x00000d02 -#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__MASK 0x00000fff -#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__SHIFT 0 -static inline uint32_t A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX(uint32_t val) -{ - return ((val) << A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__SHIFT) & A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__MASK; -} -#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__MASK 0x0fff0000 -#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__SHIFT 16 -static inline uint32_t A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX(uint32_t val) -{ - return ((val) << A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__SHIFT) & A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__MASK; -} - -#define REG_A2XX_SQ_DEBUG_MISC 0x00000d05 - -#define REG_A2XX_SQ_INT_CNTL 0x00000d34 - -#define REG_A2XX_SQ_INT_STATUS 0x00000d35 - -#define REG_A2XX_SQ_INT_ACK 0x00000d36 - -#define REG_A2XX_SQ_DEBUG_INPUT_FSM 0x00000dae - -#define REG_A2XX_SQ_DEBUG_CONST_MGR_FSM 0x00000daf - -#define REG_A2XX_SQ_DEBUG_TP_FSM 0x00000db0 - -#define REG_A2XX_SQ_DEBUG_FSM_ALU_0 0x00000db1 - -#define REG_A2XX_SQ_DEBUG_FSM_ALU_1 0x00000db2 - -#define REG_A2XX_SQ_DEBUG_EXP_ALLOC 0x00000db3 - -#define REG_A2XX_SQ_DEBUG_PTR_BUFF 0x00000db4 - -#define REG_A2XX_SQ_DEBUG_GPR_VTX 0x00000db5 - -#define REG_A2XX_SQ_DEBUG_GPR_PIX 0x00000db6 - -#define REG_A2XX_SQ_DEBUG_TB_STATUS_SEL 0x00000db7 - -#define REG_A2XX_SQ_DEBUG_VTX_TB_0 0x00000db8 - -#define REG_A2XX_SQ_DEBUG_VTX_TB_1 0x00000db9 - -#define REG_A2XX_SQ_DEBUG_VTX_TB_STATUS_REG 0x00000dba - -#define REG_A2XX_SQ_DEBUG_VTX_TB_STATE_MEM 0x00000dbb - -#define REG_A2XX_SQ_DEBUG_PIX_TB_0 0x00000dbc - -#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x00000dbd - -#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x00000dbe - -#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x00000dbf - -#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x00000dc0 - -#define REG_A2XX_SQ_DEBUG_PIX_TB_STATE_MEM 0x00000dc1 - -#define REG_A2XX_TC_CNTL_STATUS 0x00000e00 -#define A2XX_TC_CNTL_STATUS_L2_INVALIDATE 0x00000001 - -#define REG_A2XX_TP0_CHICKEN 0x00000e1e - -#define REG_A2XX_RB_BC_CONTROL 0x00000f01 -#define A2XX_RB_BC_CONTROL_ACCUM_LINEAR_MODE_ENABLE 0x00000001 -#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK 0x00000006 -#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT 1 -static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT(uint32_t val) -{ - return ((val) << A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK; -} -#define A2XX_RB_BC_CONTROL_DISABLE_EDRAM_CAM 0x00000008 -#define A2XX_RB_BC_CONTROL_DISABLE_EZ_FAST_CONTEXT_SWITCH 0x00000010 -#define A2XX_RB_BC_CONTROL_DISABLE_EZ_NULL_ZCMD_DROP 0x00000020 -#define A2XX_RB_BC_CONTROL_DISABLE_LZ_NULL_ZCMD_DROP 0x00000040 -#define A2XX_RB_BC_CONTROL_ENABLE_AZ_THROTTLE 0x00000080 -#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK 0x00001f00 -#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT 8 -static inline uint32_t A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT(uint32_t val) -{ - return ((val) << A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT) & A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK; -} -#define A2XX_RB_BC_CONTROL_ENABLE_CRC_UPDATE 0x00004000 -#define A2XX_RB_BC_CONTROL_CRC_MODE 0x00008000 -#define A2XX_RB_BC_CONTROL_DISABLE_SAMPLE_COUNTERS 0x00010000 -#define A2XX_RB_BC_CONTROL_DISABLE_ACCUM 0x00020000 -#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK 0x003c0000 -#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT 18 -static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK(uint32_t val) -{ - return ((val) << A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK; -} -#define A2XX_RB_BC_CONTROL_LINEAR_PERFORMANCE_ENABLE 0x00400000 -#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK 0x07800000 -#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT 23 -static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT(uint32_t val) -{ - return ((val) << A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK; -} -#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK 0x18000000 -#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT 27 -static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val) -{ - return ((val) << A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK; -} -#define A2XX_RB_BC_CONTROL_MEM_EXPORT_LINEAR_MODE_ENABLE 0x20000000 -#define A2XX_RB_BC_CONTROL_CRC_SYSTEM 0x40000000 -#define A2XX_RB_BC_CONTROL_RESERVED6 0x80000000 - -#define REG_A2XX_RB_EDRAM_INFO 0x00000f02 - -#define REG_A2XX_RB_DEBUG_CNTL 0x00000f26 - -#define REG_A2XX_RB_DEBUG_DATA 0x00000f27 - -#define REG_A2XX_RB_SURFACE_INFO 0x00002000 -#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK 0x00003fff -#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT 0 -static inline uint32_t A2XX_RB_SURFACE_INFO_SURFACE_PITCH(uint32_t val) -{ - return ((val) << A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT) & A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK; -} -#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK 0x0000c000 -#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT 14 -static inline uint32_t A2XX_RB_SURFACE_INFO_MSAA_SAMPLES(uint32_t val) -{ - return ((val) << A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT) & A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK; -} - -#define REG_A2XX_RB_COLOR_INFO 0x00002001 -#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f -#define A2XX_RB_COLOR_INFO_FORMAT__SHIFT 0 -static inline uint32_t A2XX_RB_COLOR_INFO_FORMAT(enum a2xx_colorformatx val) -{ - return ((val) << A2XX_RB_COLOR_INFO_FORMAT__SHIFT) & A2XX_RB_COLOR_INFO_FORMAT__MASK; -} -#define A2XX_RB_COLOR_INFO_ROUND_MODE__MASK 0x00000030 -#define A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT 4 -static inline uint32_t A2XX_RB_COLOR_INFO_ROUND_MODE(uint32_t val) -{ - return ((val) << A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT) & A2XX_RB_COLOR_INFO_ROUND_MODE__MASK; -} -#define A2XX_RB_COLOR_INFO_LINEAR 0x00000040 -#define A2XX_RB_COLOR_INFO_ENDIAN__MASK 0x00000180 -#define A2XX_RB_COLOR_INFO_ENDIAN__SHIFT 7 -static inline uint32_t A2XX_RB_COLOR_INFO_ENDIAN(uint32_t val) -{ - return ((val) << A2XX_RB_COLOR_INFO_ENDIAN__SHIFT) & A2XX_RB_COLOR_INFO_ENDIAN__MASK; -} -#define A2XX_RB_COLOR_INFO_SWAP__MASK 0x00000600 -#define A2XX_RB_COLOR_INFO_SWAP__SHIFT 9 -static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val) -{ - return ((val) << A2XX_RB_COLOR_INFO_SWAP__SHIFT) & A2XX_RB_COLOR_INFO_SWAP__MASK; -} -#define A2XX_RB_COLOR_INFO_BASE__MASK 0xfffff000 -#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12 -static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK; -} - -#define REG_A2XX_RB_DEPTH_INFO 0x00002002 -#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001 -#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0 -static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val) -{ - return ((val) << A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK; -} -#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000 -#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12 -static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK; -} - -#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005 - -#define REG_A2XX_COHER_DEST_BASE_0 0x00002006 - -#define REG_A2XX_PA_SC_SCREEN_SCISSOR_TL 0x0000200e -#define A2XX_PA_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 -#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff -#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT 0 -static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_X(uint32_t val) -{ - return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK; -} -#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000 -#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16 -static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_Y(uint32_t val) -{ - return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK; -} - -#define REG_A2XX_PA_SC_SCREEN_SCISSOR_BR 0x0000200f -#define A2XX_PA_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 -#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff -#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT 0 -static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_X(uint32_t val) -{ - return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK; -} -#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000 -#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16 -static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(uint32_t val) -{ - return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK; -} - -#define REG_A2XX_PA_SC_WINDOW_OFFSET 0x00002080 -#define A2XX_PA_SC_WINDOW_OFFSET_X__MASK 0x00007fff -#define A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0 -static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_X(int32_t val) -{ - return ((val) << A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_X__MASK; -} -#define A2XX_PA_SC_WINDOW_OFFSET_Y__MASK 0x7fff0000 -#define A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16 -static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_Y(int32_t val) -{ - return ((val) << A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_Y__MASK; -} -#define A2XX_PA_SC_WINDOW_OFFSET_DISABLE 0x80000000 - -#define REG_A2XX_PA_SC_WINDOW_SCISSOR_TL 0x00002081 -#define A2XX_PA_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 -#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff -#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT 0 -static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_X(uint32_t val) -{ - return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK; -} -#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000 -#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16 -static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_Y(uint32_t val) -{ - return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK; -} - -#define REG_A2XX_PA_SC_WINDOW_SCISSOR_BR 0x00002082 -#define A2XX_PA_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 -#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff -#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT 0 -static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_X(uint32_t val) -{ - return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK; -} -#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000 -#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16 -static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_Y(uint32_t val) -{ - return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK; -} - -#define REG_A2XX_UNKNOWN_2010 0x00002010 - -#define REG_A2XX_VGT_MAX_VTX_INDX 0x00002100 - -#define REG_A2XX_VGT_MIN_VTX_INDX 0x00002101 - -#define REG_A2XX_VGT_INDX_OFFSET 0x00002102 - -#define REG_A2XX_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x00002103 - -#define REG_A2XX_RB_COLOR_MASK 0x00002104 -#define A2XX_RB_COLOR_MASK_WRITE_RED 0x00000001 -#define A2XX_RB_COLOR_MASK_WRITE_GREEN 0x00000002 -#define A2XX_RB_COLOR_MASK_WRITE_BLUE 0x00000004 -#define A2XX_RB_COLOR_MASK_WRITE_ALPHA 0x00000008 - -#define REG_A2XX_RB_BLEND_RED 0x00002105 - -#define REG_A2XX_RB_BLEND_GREEN 0x00002106 - -#define REG_A2XX_RB_BLEND_BLUE 0x00002107 - -#define REG_A2XX_RB_BLEND_ALPHA 0x00002108 - -#define REG_A2XX_RB_FOG_COLOR 0x00002109 -#define A2XX_RB_FOG_COLOR_FOG_RED__MASK 0x000000ff -#define A2XX_RB_FOG_COLOR_FOG_RED__SHIFT 0 -static inline uint32_t A2XX_RB_FOG_COLOR_FOG_RED(uint32_t val) -{ - return ((val) << A2XX_RB_FOG_COLOR_FOG_RED__SHIFT) & A2XX_RB_FOG_COLOR_FOG_RED__MASK; -} -#define A2XX_RB_FOG_COLOR_FOG_GREEN__MASK 0x0000ff00 -#define A2XX_RB_FOG_COLOR_FOG_GREEN__SHIFT 8 -static inline uint32_t A2XX_RB_FOG_COLOR_FOG_GREEN(uint32_t val) -{ - return ((val) << A2XX_RB_FOG_COLOR_FOG_GREEN__SHIFT) & A2XX_RB_FOG_COLOR_FOG_GREEN__MASK; -} -#define A2XX_RB_FOG_COLOR_FOG_BLUE__MASK 0x00ff0000 -#define A2XX_RB_FOG_COLOR_FOG_BLUE__SHIFT 16 -static inline uint32_t A2XX_RB_FOG_COLOR_FOG_BLUE(uint32_t val) -{ - return ((val) << A2XX_RB_FOG_COLOR_FOG_BLUE__SHIFT) & A2XX_RB_FOG_COLOR_FOG_BLUE__MASK; -} - -#define REG_A2XX_RB_STENCILREFMASK_BF 0x0000210c -#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff -#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0 -static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val) -{ - return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK; -} -#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00 -#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8 -static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val) -{ - return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK; -} -#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000 -#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16 -static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val) -{ - return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK; -} - -#define REG_A2XX_RB_STENCILREFMASK 0x0000210d -#define A2XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff -#define A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0 -static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILREF(uint32_t val) -{ - return ((val) << A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILREF__MASK; -} -#define A2XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00 -#define A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8 -static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val) -{ - return ((val) << A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILMASK__MASK; -} -#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000 -#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16 -static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val) -{ - return ((val) << A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK; -} - -#define REG_A2XX_RB_ALPHA_REF 0x0000210e - -#define REG_A2XX_PA_CL_VPORT_XSCALE 0x0000210f -#define A2XX_PA_CL_VPORT_XSCALE__MASK 0xffffffff -#define A2XX_PA_CL_VPORT_XSCALE__SHIFT 0 -static inline uint32_t A2XX_PA_CL_VPORT_XSCALE(float val) -{ - return ((fui(val)) << A2XX_PA_CL_VPORT_XSCALE__SHIFT) & A2XX_PA_CL_VPORT_XSCALE__MASK; -} - -#define REG_A2XX_PA_CL_VPORT_XOFFSET 0x00002110 -#define A2XX_PA_CL_VPORT_XOFFSET__MASK 0xffffffff -#define A2XX_PA_CL_VPORT_XOFFSET__SHIFT 0 -static inline uint32_t A2XX_PA_CL_VPORT_XOFFSET(float val) -{ - return ((fui(val)) << A2XX_PA_CL_VPORT_XOFFSET__SHIFT) & A2XX_PA_CL_VPORT_XOFFSET__MASK; -} - -#define REG_A2XX_PA_CL_VPORT_YSCALE 0x00002111 -#define A2XX_PA_CL_VPORT_YSCALE__MASK 0xffffffff -#define A2XX_PA_CL_VPORT_YSCALE__SHIFT 0 -static inline uint32_t A2XX_PA_CL_VPORT_YSCALE(float val) -{ - return ((fui(val)) << A2XX_PA_CL_VPORT_YSCALE__SHIFT) & A2XX_PA_CL_VPORT_YSCALE__MASK; -} - -#define REG_A2XX_PA_CL_VPORT_YOFFSET 0x00002112 -#define A2XX_PA_CL_VPORT_YOFFSET__MASK 0xffffffff -#define A2XX_PA_CL_VPORT_YOFFSET__SHIFT 0 -static inline uint32_t A2XX_PA_CL_VPORT_YOFFSET(float val) -{ - return ((fui(val)) << A2XX_PA_CL_VPORT_YOFFSET__SHIFT) & A2XX_PA_CL_VPORT_YOFFSET__MASK; -} - -#define REG_A2XX_PA_CL_VPORT_ZSCALE 0x00002113 -#define A2XX_PA_CL_VPORT_ZSCALE__MASK 0xffffffff -#define A2XX_PA_CL_VPORT_ZSCALE__SHIFT 0 -static inline uint32_t A2XX_PA_CL_VPORT_ZSCALE(float val) -{ - return ((fui(val)) << A2XX_PA_CL_VPORT_ZSCALE__SHIFT) & A2XX_PA_CL_VPORT_ZSCALE__MASK; -} - -#define REG_A2XX_PA_CL_VPORT_ZOFFSET 0x00002114 -#define A2XX_PA_CL_VPORT_ZOFFSET__MASK 0xffffffff -#define A2XX_PA_CL_VPORT_ZOFFSET__SHIFT 0 -static inline uint32_t A2XX_PA_CL_VPORT_ZOFFSET(float val) -{ - return ((fui(val)) << A2XX_PA_CL_VPORT_ZOFFSET__SHIFT) & A2XX_PA_CL_VPORT_ZOFFSET__MASK; -} - -#define REG_A2XX_SQ_PROGRAM_CNTL 0x00002180 -#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK 0x000000ff -#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT 0 -static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_REGS(uint32_t val) -{ - return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK; -} -#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK 0x0000ff00 -#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT 8 -static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_REGS(uint32_t val) -{ - return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK; -} -#define A2XX_SQ_PROGRAM_CNTL_VS_RESOURCE 0x00010000 -#define A2XX_SQ_PROGRAM_CNTL_PS_RESOURCE 0x00020000 -#define A2XX_SQ_PROGRAM_CNTL_PARAM_GEN 0x00040000 -#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_PIX 0x00080000 -#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK 0x00f00000 -#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT 20 -static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT(uint32_t val) -{ - return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK; -} -#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK 0x07000000 -#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT 24 -static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE(enum a2xx_sq_ps_vtx_mode val) -{ - return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK; -} -#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK 0x78000000 -#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT 27 -static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE(uint32_t val) -{ - return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK; -} -#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_VTX 0x80000000 - -#define REG_A2XX_SQ_CONTEXT_MISC 0x00002181 -#define A2XX_SQ_CONTEXT_MISC_INST_PRED_OPTIMIZE 0x00000001 -#define A2XX_SQ_CONTEXT_MISC_SC_OUTPUT_SCREEN_XY 0x00000002 -#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK 0x0000000c -#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT 2 -static inline uint32_t A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL(enum a2xx_sq_sample_cntl val) -{ - return ((val) << A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT) & A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK; -} -#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK 0x0000ff00 -#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT 8 -static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val) -{ - return ((val) << A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT) & A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK; -} -#define A2XX_SQ_CONTEXT_MISC_PERFCOUNTER_REF 0x00010000 -#define A2XX_SQ_CONTEXT_MISC_YEILD_OPTIMIZE 0x00020000 -#define A2XX_SQ_CONTEXT_MISC_TX_CACHE_SEL 0x00040000 - -#define REG_A2XX_SQ_INTERPOLATOR_CNTL 0x00002182 -#define A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__MASK 0x0000ffff -#define A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__SHIFT 0 -static inline uint32_t A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE(uint32_t val) -{ - return ((val) << A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__SHIFT) & A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__MASK; -} -#define A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__MASK 0xffff0000 -#define A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__SHIFT 16 -static inline uint32_t A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN(uint32_t val) -{ - return ((val) << A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__SHIFT) & A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__MASK; -} - -#define REG_A2XX_SQ_WRAPPING_0 0x00002183 -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__MASK 0x0000000f -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__SHIFT 0 -static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_0(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__MASK; -} -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__MASK 0x000000f0 -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__SHIFT 4 -static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_1(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__MASK; -} -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__MASK 0x00000f00 -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__SHIFT 8 -static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_2(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__MASK; -} -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__MASK 0x0000f000 -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__SHIFT 12 -static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_3(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__MASK; -} -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__MASK 0x000f0000 -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__SHIFT 16 -static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_4(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__MASK; -} -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__MASK 0x00f00000 -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__SHIFT 20 -static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_5(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__MASK; -} -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__MASK 0x0f000000 -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__SHIFT 24 -static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_6(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__MASK; -} -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__MASK 0xf0000000 -#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__SHIFT 28 -static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_7(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__MASK; -} - -#define REG_A2XX_SQ_WRAPPING_1 0x00002184 -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__MASK 0x0000000f -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__SHIFT 0 -static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_8(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__MASK; -} -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__MASK 0x000000f0 -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__SHIFT 4 -static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_9(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__MASK; -} -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__MASK 0x00000f00 -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__SHIFT 8 -static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_10(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__MASK; -} -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__MASK 0x0000f000 -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__SHIFT 12 -static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_11(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__MASK; -} -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__MASK 0x000f0000 -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__SHIFT 16 -static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_12(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__MASK; -} -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__MASK 0x00f00000 -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__SHIFT 20 -static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_13(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__MASK; -} -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__MASK 0x0f000000 -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__SHIFT 24 -static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_14(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__MASK; -} -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__MASK 0xf0000000 -#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__SHIFT 28 -static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_15(uint32_t val) -{ - return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__MASK; -} - -#define REG_A2XX_SQ_PS_PROGRAM 0x000021f6 -#define A2XX_SQ_PS_PROGRAM_BASE__MASK 0x00000fff -#define A2XX_SQ_PS_PROGRAM_BASE__SHIFT 0 -static inline uint32_t A2XX_SQ_PS_PROGRAM_BASE(uint32_t val) -{ - return ((val) << A2XX_SQ_PS_PROGRAM_BASE__SHIFT) & A2XX_SQ_PS_PROGRAM_BASE__MASK; -} -#define A2XX_SQ_PS_PROGRAM_SIZE__MASK 0x00fff000 -#define A2XX_SQ_PS_PROGRAM_SIZE__SHIFT 12 -static inline uint32_t A2XX_SQ_PS_PROGRAM_SIZE(uint32_t val) -{ - return ((val) << A2XX_SQ_PS_PROGRAM_SIZE__SHIFT) & A2XX_SQ_PS_PROGRAM_SIZE__MASK; -} - -#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7 -#define A2XX_SQ_VS_PROGRAM_BASE__MASK 0x00000fff -#define A2XX_SQ_VS_PROGRAM_BASE__SHIFT 0 -static inline uint32_t A2XX_SQ_VS_PROGRAM_BASE(uint32_t val) -{ - return ((val) << A2XX_SQ_VS_PROGRAM_BASE__SHIFT) & A2XX_SQ_VS_PROGRAM_BASE__MASK; -} -#define A2XX_SQ_VS_PROGRAM_SIZE__MASK 0x00fff000 -#define A2XX_SQ_VS_PROGRAM_SIZE__SHIFT 12 -static inline uint32_t A2XX_SQ_VS_PROGRAM_SIZE(uint32_t val) -{ - return ((val) << A2XX_SQ_VS_PROGRAM_SIZE__SHIFT) & A2XX_SQ_VS_PROGRAM_SIZE__MASK; -} - -#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9 - -#define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc -#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f -#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0 -static inline uint32_t A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val) -{ - return ((val) << A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK; -} -#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0 -#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6 -static inline uint32_t A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val) -{ - return ((val) << A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK; -} -#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600 -#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9 -static inline uint32_t A2XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val) -{ - return ((val) << A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK; -} -#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800 -#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11 -static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val) -{ - return ((val) << A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK; -} -#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000 -#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000 -#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000 -#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000 -#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24 -static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val) -{ - return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK; -} - -#define REG_A2XX_VGT_IMMED_DATA 0x000021fd - -#define REG_A2XX_RB_DEPTHCONTROL 0x00002200 -#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001 -#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002 -#define A2XX_RB_DEPTHCONTROL_Z_WRITE_ENABLE 0x00000004 -#define A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE 0x00000008 -#define A2XX_RB_DEPTHCONTROL_ZFUNC__MASK 0x00000070 -#define A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT 4 -static inline uint32_t A2XX_RB_DEPTHCONTROL_ZFUNC(enum adreno_compare_func val) -{ - return ((val) << A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_ZFUNC__MASK; -} -#define A2XX_RB_DEPTHCONTROL_BACKFACE_ENABLE 0x00000080 -#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK 0x00000700 -#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT 8 -static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC(enum adreno_compare_func val) -{ - return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK; -} -#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK 0x00003800 -#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT 11 -static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL(enum adreno_stencil_op val) -{ - return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK; -} -#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK 0x0001c000 -#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT 14 -static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS(enum adreno_stencil_op val) -{ - return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK; -} -#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK 0x000e0000 -#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT 17 -static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL(enum adreno_stencil_op val) -{ - return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK; -} -#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK 0x00700000 -#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT 20 -static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF(enum adreno_compare_func val) -{ - return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK; -} -#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK 0x03800000 -#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT 23 -static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF(enum adreno_stencil_op val) -{ - return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK; -} -#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK 0x1c000000 -#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT 26 -static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF(enum adreno_stencil_op val) -{ - return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK; -} -#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK 0xe0000000 -#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT 29 -static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF(enum adreno_stencil_op val) -{ - return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK; -} - -#define REG_A2XX_RB_BLEND_CONTROL 0x00002201 -#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK 0x0000001f -#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT 0 -static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend_factor val) -{ - return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK; -} -#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0 -#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5 -static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum a2xx_rb_blend_opcode val) -{ - return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK; -} -#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK 0x00001f00 -#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT 8 -static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND(enum adreno_rb_blend_factor val) -{ - return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK; -} -#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK 0x001f0000 -#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT 16 -static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend_factor val) -{ - return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK; -} -#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000 -#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21 -static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum a2xx_rb_blend_opcode val) -{ - return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK; -} -#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK 0x1f000000 -#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT 24 -static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND(enum adreno_rb_blend_factor val) -{ - return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK; -} -#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE_ENABLE 0x20000000 -#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE 0x40000000 - -#define REG_A2XX_RB_COLORCONTROL 0x00002202 -#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK 0x00000007 -#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT 0 -static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_FUNC(enum adreno_compare_func val) -{ - return ((val) << A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK; -} -#define A2XX_RB_COLORCONTROL_ALPHA_TEST_ENABLE 0x00000008 -#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_ENABLE 0x00000010 -#define A2XX_RB_COLORCONTROL_BLEND_DISABLE 0x00000020 -#define A2XX_RB_COLORCONTROL_VOB_ENABLE 0x00000040 -#define A2XX_RB_COLORCONTROL_VS_EXPORTS_FOG 0x00000080 -#define A2XX_RB_COLORCONTROL_ROP_CODE__MASK 0x00000f00 -#define A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT 8 -static inline uint32_t A2XX_RB_COLORCONTROL_ROP_CODE(uint32_t val) -{ - return ((val) << A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT) & A2XX_RB_COLORCONTROL_ROP_CODE__MASK; -} -#define A2XX_RB_COLORCONTROL_DITHER_MODE__MASK 0x00003000 -#define A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT 12 -static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_MODE(enum adreno_rb_dither_mode val) -{ - return ((val) << A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_MODE__MASK; -} -#define A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK 0x0000c000 -#define A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT 14 -static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_TYPE(enum a2xx_rb_dither_type val) -{ - return ((val) << A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK; -} -#define A2XX_RB_COLORCONTROL_PIXEL_FOG 0x00010000 -#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK 0x03000000 -#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT 24 -static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0(uint32_t val) -{ - return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK; -} -#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK 0x0c000000 -#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT 26 -static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1(uint32_t val) -{ - return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK; -} -#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK 0x30000000 -#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT 28 -static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2(uint32_t val) -{ - return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK; -} -#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK 0xc0000000 -#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT 30 -static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3(uint32_t val) -{ - return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK; -} - -#define REG_A2XX_VGT_CURRENT_BIN_ID_MAX 0x00002203 -#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK 0x00000007 -#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT 0 -static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN(uint32_t val) -{ - return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK; -} -#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK 0x00000038 -#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT 3 -static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_ROW(uint32_t val) -{ - return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK; -} -#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK 0x000001c0 -#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT 6 -static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK(uint32_t val) -{ - return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK; -} - -#define REG_A2XX_PA_CL_CLIP_CNTL 0x00002204 -#define A2XX_PA_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000 -#define A2XX_PA_CL_CLIP_CNTL_BOUNDARY_EDGE_FLAG_ENA 0x00040000 -#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK 0x00080000 -#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT 19 -static inline uint32_t A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF(enum a2xx_dx_clip_space val) -{ - return ((val) << A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT) & A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK; -} -#define A2XX_PA_CL_CLIP_CNTL_DIS_CLIP_ERR_DETECT 0x00100000 -#define A2XX_PA_CL_CLIP_CNTL_VTX_KILL_OR 0x00200000 -#define A2XX_PA_CL_CLIP_CNTL_XY_NAN_RETAIN 0x00400000 -#define A2XX_PA_CL_CLIP_CNTL_Z_NAN_RETAIN 0x00800000 -#define A2XX_PA_CL_CLIP_CNTL_W_NAN_RETAIN 0x01000000 - -#define REG_A2XX_PA_SU_SC_MODE_CNTL 0x00002205 -#define A2XX_PA_SU_SC_MODE_CNTL_CULL_FRONT 0x00000001 -#define A2XX_PA_SU_SC_MODE_CNTL_CULL_BACK 0x00000002 -#define A2XX_PA_SU_SC_MODE_CNTL_FACE 0x00000004 -#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK 0x00000018 -#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT 3 -static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_POLYMODE(enum a2xx_pa_su_sc_polymode val) -{ - return ((val) << A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK; -} -#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK 0x000000e0 -#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT 5 -static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE(enum adreno_pa_su_sc_draw val) -{ - return ((val) << A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK; -} -#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK 0x00000700 -#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT 8 -static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE(enum adreno_pa_su_sc_draw val) -{ - return ((val) << A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK; -} -#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_FRONT_ENABLE 0x00000800 -#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_BACK_ENABLE 0x00001000 -#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_PARA_ENABLE 0x00002000 -#define A2XX_PA_SU_SC_MODE_CNTL_MSAA_ENABLE 0x00008000 -#define A2XX_PA_SU_SC_MODE_CNTL_VTX_WINDOW_OFFSET_ENABLE 0x00010000 -#define A2XX_PA_SU_SC_MODE_CNTL_LINE_STIPPLE_ENABLE 0x00040000 -#define A2XX_PA_SU_SC_MODE_CNTL_PROVOKING_VTX_LAST 0x00080000 -#define A2XX_PA_SU_SC_MODE_CNTL_PERSP_CORR_DIS 0x00100000 -#define A2XX_PA_SU_SC_MODE_CNTL_MULTI_PRIM_IB_ENA 0x00200000 -#define A2XX_PA_SU_SC_MODE_CNTL_QUAD_ORDER_ENABLE 0x00800000 -#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_ALL_TRI 0x02000000 -#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_FIRST_TRI_NEW_STATE 0x04000000 -#define A2XX_PA_SU_SC_MODE_CNTL_CLAMPED_FACENESS 0x10000000 -#define A2XX_PA_SU_SC_MODE_CNTL_ZERO_AREA_FACENESS 0x20000000 -#define A2XX_PA_SU_SC_MODE_CNTL_FACE_KILL_ENABLE 0x40000000 -#define A2XX_PA_SU_SC_MODE_CNTL_FACE_WRITE_ENABLE 0x80000000 - -#define REG_A2XX_PA_CL_VTE_CNTL 0x00002206 -#define A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA 0x00000001 -#define A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA 0x00000002 -#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA 0x00000004 -#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA 0x00000008 -#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_SCALE_ENA 0x00000010 -#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_OFFSET_ENA 0x00000020 -#define A2XX_PA_CL_VTE_CNTL_VTX_XY_FMT 0x00000100 -#define A2XX_PA_CL_VTE_CNTL_VTX_Z_FMT 0x00000200 -#define A2XX_PA_CL_VTE_CNTL_VTX_W0_FMT 0x00000400 -#define A2XX_PA_CL_VTE_CNTL_PERFCOUNTER_REF 0x00000800 - -#define REG_A2XX_VGT_CURRENT_BIN_ID_MIN 0x00002207 -#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK 0x00000007 -#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT 0 -static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN(uint32_t val) -{ - return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK; -} -#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK 0x00000038 -#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT 3 -static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_ROW(uint32_t val) -{ - return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK; -} -#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK 0x000001c0 -#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT 6 -static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK(uint32_t val) -{ - return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK; -} - -#define REG_A2XX_RB_MODECONTROL 0x00002208 -#define A2XX_RB_MODECONTROL_EDRAM_MODE__MASK 0x00000007 -#define A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT 0 -static inline uint32_t A2XX_RB_MODECONTROL_EDRAM_MODE(enum a2xx_rb_edram_mode val) -{ - return ((val) << A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT) & A2XX_RB_MODECONTROL_EDRAM_MODE__MASK; -} - -#define REG_A2XX_A220_RB_LRZ_VSC_CONTROL 0x00002209 - -#define REG_A2XX_RB_SAMPLE_POS 0x0000220a - -#define REG_A2XX_CLEAR_COLOR 0x0000220b -#define A2XX_CLEAR_COLOR_RED__MASK 0x000000ff -#define A2XX_CLEAR_COLOR_RED__SHIFT 0 -static inline uint32_t A2XX_CLEAR_COLOR_RED(uint32_t val) -{ - return ((val) << A2XX_CLEAR_COLOR_RED__SHIFT) & A2XX_CLEAR_COLOR_RED__MASK; -} -#define A2XX_CLEAR_COLOR_GREEN__MASK 0x0000ff00 -#define A2XX_CLEAR_COLOR_GREEN__SHIFT 8 -static inline uint32_t A2XX_CLEAR_COLOR_GREEN(uint32_t val) -{ - return ((val) << A2XX_CLEAR_COLOR_GREEN__SHIFT) & A2XX_CLEAR_COLOR_GREEN__MASK; -} -#define A2XX_CLEAR_COLOR_BLUE__MASK 0x00ff0000 -#define A2XX_CLEAR_COLOR_BLUE__SHIFT 16 -static inline uint32_t A2XX_CLEAR_COLOR_BLUE(uint32_t val) -{ - return ((val) << A2XX_CLEAR_COLOR_BLUE__SHIFT) & A2XX_CLEAR_COLOR_BLUE__MASK; -} -#define A2XX_CLEAR_COLOR_ALPHA__MASK 0xff000000 -#define A2XX_CLEAR_COLOR_ALPHA__SHIFT 24 -static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val) -{ - return ((val) << A2XX_CLEAR_COLOR_ALPHA__SHIFT) & A2XX_CLEAR_COLOR_ALPHA__MASK; -} - -#define REG_A2XX_A220_GRAS_CONTROL 0x00002210 - -#define REG_A2XX_PA_SU_POINT_SIZE 0x00002280 -#define A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK 0x0000ffff -#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0 -static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val) -{ - return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK; -} -#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000 -#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16 -static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val) -{ - return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK; -} - -#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281 -#define A2XX_PA_SU_POINT_MINMAX_MIN__MASK 0x0000ffff -#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0 -static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val) -{ - return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK; -} -#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000 -#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16 -static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val) -{ - return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK; -} - -#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282 -#define A2XX_PA_SU_LINE_CNTL_WIDTH__MASK 0x0000ffff -#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0 -static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val) -{ - return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK; -} - -#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283 -#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK 0x0000ffff -#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT 0 -static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN(uint32_t val) -{ - return ((val) << A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK; -} -#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK 0x00ff0000 -#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT 16 -static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT(uint32_t val) -{ - return ((val) << A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK; -} -#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK 0x10000000 -#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT 28 -static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER(enum a2xx_pa_sc_pattern_bit_order val) -{ - return ((val) << A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK; -} -#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK 0x60000000 -#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT 29 -static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL(enum a2xx_pa_sc_auto_reset_cntl val) -{ - return ((val) << A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK; -} - -#define REG_A2XX_PA_SC_VIZ_QUERY 0x00002293 -#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ENA 0x00000001 -#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__MASK 0x0000007e -#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__SHIFT 1 -static inline uint32_t A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID(uint32_t val) -{ - return ((val) << A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__SHIFT) & A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__MASK; -} -#define A2XX_PA_SC_VIZ_QUERY_KILL_PIX_POST_EARLY_Z 0x00000100 - -#define REG_A2XX_VGT_ENHANCE 0x00002294 - -#define REG_A2XX_PA_SC_LINE_CNTL 0x00002300 -#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK 0x0000ffff -#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT 0 -static inline uint32_t A2XX_PA_SC_LINE_CNTL_BRES_CNTL(uint32_t val) -{ - return ((val) << A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT) & A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK; -} -#define A2XX_PA_SC_LINE_CNTL_USE_BRES_CNTL 0x00000100 -#define A2XX_PA_SC_LINE_CNTL_EXPAND_LINE_WIDTH 0x00000200 -#define A2XX_PA_SC_LINE_CNTL_LAST_PIXEL 0x00000400 - -#define REG_A2XX_PA_SC_AA_CONFIG 0x00002301 -#define A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__MASK 0x00000007 -#define A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__SHIFT 0 -static inline uint32_t A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES(uint32_t val) -{ - return ((val) << A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__SHIFT) & A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__MASK; -} -#define A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__MASK 0x0001e000 -#define A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__SHIFT 13 -static inline uint32_t A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST(uint32_t val) -{ - return ((val) << A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__SHIFT) & A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__MASK; -} - -#define REG_A2XX_PA_SU_VTX_CNTL 0x00002302 -#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK 0x00000001 -#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT 0 -static inline uint32_t A2XX_PA_SU_VTX_CNTL_PIX_CENTER(enum a2xx_pa_pixcenter val) -{ - return ((val) << A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT) & A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK; -} -#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK 0x00000006 -#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT 1 -static inline uint32_t A2XX_PA_SU_VTX_CNTL_ROUND_MODE(enum a2xx_pa_roundmode val) -{ - return ((val) << A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK; -} -#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK 0x00000380 -#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT 7 -static inline uint32_t A2XX_PA_SU_VTX_CNTL_QUANT_MODE(enum a2xx_pa_quantmode val) -{ - return ((val) << A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK; -} - -#define REG_A2XX_PA_CL_GB_VERT_CLIP_ADJ 0x00002303 -#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK 0xffffffff -#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT 0 -static inline uint32_t A2XX_PA_CL_GB_VERT_CLIP_ADJ(float val) -{ - return ((fui(val)) << A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK; -} - -#define REG_A2XX_PA_CL_GB_VERT_DISC_ADJ 0x00002304 -#define A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK 0xffffffff -#define A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT 0 -static inline uint32_t A2XX_PA_CL_GB_VERT_DISC_ADJ(float val) -{ - return ((fui(val)) << A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK; -} - -#define REG_A2XX_PA_CL_GB_HORZ_CLIP_ADJ 0x00002305 -#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK 0xffffffff -#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT 0 -static inline uint32_t A2XX_PA_CL_GB_HORZ_CLIP_ADJ(float val) -{ - return ((fui(val)) << A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK; -} - -#define REG_A2XX_PA_CL_GB_HORZ_DISC_ADJ 0x00002306 -#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK 0xffffffff -#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT 0 -static inline uint32_t A2XX_PA_CL_GB_HORZ_DISC_ADJ(float val) -{ - return ((fui(val)) << A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK; -} - -#define REG_A2XX_SQ_VS_CONST 0x00002307 -#define A2XX_SQ_VS_CONST_BASE__MASK 0x000001ff -#define A2XX_SQ_VS_CONST_BASE__SHIFT 0 -static inline uint32_t A2XX_SQ_VS_CONST_BASE(uint32_t val) -{ - return ((val) << A2XX_SQ_VS_CONST_BASE__SHIFT) & A2XX_SQ_VS_CONST_BASE__MASK; -} -#define A2XX_SQ_VS_CONST_SIZE__MASK 0x001ff000 -#define A2XX_SQ_VS_CONST_SIZE__SHIFT 12 -static inline uint32_t A2XX_SQ_VS_CONST_SIZE(uint32_t val) -{ - return ((val) << A2XX_SQ_VS_CONST_SIZE__SHIFT) & A2XX_SQ_VS_CONST_SIZE__MASK; -} - -#define REG_A2XX_SQ_PS_CONST 0x00002308 -#define A2XX_SQ_PS_CONST_BASE__MASK 0x000001ff -#define A2XX_SQ_PS_CONST_BASE__SHIFT 0 -static inline uint32_t A2XX_SQ_PS_CONST_BASE(uint32_t val) -{ - return ((val) << A2XX_SQ_PS_CONST_BASE__SHIFT) & A2XX_SQ_PS_CONST_BASE__MASK; -} -#define A2XX_SQ_PS_CONST_SIZE__MASK 0x001ff000 -#define A2XX_SQ_PS_CONST_SIZE__SHIFT 12 -static inline uint32_t A2XX_SQ_PS_CONST_SIZE(uint32_t val) -{ - return ((val) << A2XX_SQ_PS_CONST_SIZE__SHIFT) & A2XX_SQ_PS_CONST_SIZE__MASK; -} - -#define REG_A2XX_SQ_DEBUG_MISC_0 0x00002309 - -#define REG_A2XX_SQ_DEBUG_MISC_1 0x0000230a - -#define REG_A2XX_PA_SC_AA_MASK 0x00002312 - -#define REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL 0x00002316 -#define A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__MASK 0x00000007 -#define A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__SHIFT 0 -static inline uint32_t A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH(uint32_t val) -{ - return ((val) << A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__SHIFT) & A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__MASK; -} - -#define REG_A2XX_VGT_OUT_DEALLOC_CNTL 0x00002317 -#define A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__MASK 0x00000003 -#define A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__SHIFT 0 -static inline uint32_t A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST(uint32_t val) -{ - return ((val) << A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__SHIFT) & A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__MASK; -} - -#define REG_A2XX_RB_COPY_CONTROL 0x00002318 -#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK 0x00000007 -#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT 0 -static inline uint32_t A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT(enum a2xx_rb_copy_sample_select val) -{ - return ((val) << A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT) & A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK; -} -#define A2XX_RB_COPY_CONTROL_DEPTH_CLEAR_ENABLE 0x00000008 -#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK 0x000000f0 -#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT 4 -static inline uint32_t A2XX_RB_COPY_CONTROL_CLEAR_MASK(uint32_t val) -{ - return ((val) << A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT) & A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK; -} - -#define REG_A2XX_RB_COPY_DEST_BASE 0x00002319 - -#define REG_A2XX_RB_COPY_DEST_PITCH 0x0000231a -#define A2XX_RB_COPY_DEST_PITCH__MASK 0xffffffff -#define A2XX_RB_COPY_DEST_PITCH__SHIFT 0 -static inline uint32_t A2XX_RB_COPY_DEST_PITCH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK; -} - -#define REG_A2XX_RB_COPY_DEST_INFO 0x0000231b -#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK 0x00000007 -#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT 0 -static inline uint32_t A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN(enum adreno_rb_surface_endian val) -{ - return ((val) << A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT) & A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK; -} -#define A2XX_RB_COPY_DEST_INFO_LINEAR 0x00000008 -#define A2XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000f0 -#define A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 4 -static inline uint32_t A2XX_RB_COPY_DEST_INFO_FORMAT(enum a2xx_colorformatx val) -{ - return ((val) << A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A2XX_RB_COPY_DEST_INFO_FORMAT__MASK; -} -#define A2XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300 -#define A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8 -static inline uint32_t A2XX_RB_COPY_DEST_INFO_SWAP(uint32_t val) -{ - return ((val) << A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A2XX_RB_COPY_DEST_INFO_SWAP__MASK; -} -#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00 -#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10 -static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val) -{ - return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK; -} -#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK 0x00003000 -#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT 12 -static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_TYPE(enum a2xx_rb_dither_type val) -{ - return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK; -} -#define A2XX_RB_COPY_DEST_INFO_WRITE_RED 0x00004000 -#define A2XX_RB_COPY_DEST_INFO_WRITE_GREEN 0x00008000 -#define A2XX_RB_COPY_DEST_INFO_WRITE_BLUE 0x00010000 -#define A2XX_RB_COPY_DEST_INFO_WRITE_ALPHA 0x00020000 - -#define REG_A2XX_RB_COPY_DEST_OFFSET 0x0000231c -#define A2XX_RB_COPY_DEST_OFFSET_X__MASK 0x00001fff -#define A2XX_RB_COPY_DEST_OFFSET_X__SHIFT 0 -static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_X(uint32_t val) -{ - return ((val) << A2XX_RB_COPY_DEST_OFFSET_X__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_X__MASK; -} -#define A2XX_RB_COPY_DEST_OFFSET_Y__MASK 0x03ffe000 -#define A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT 13 -static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val) -{ - return ((val) << A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_Y__MASK; -} - -#define REG_A2XX_RB_DEPTH_CLEAR 0x0000231d - -#define REG_A2XX_RB_SAMPLE_COUNT_CTL 0x00002324 - -#define REG_A2XX_RB_COLOR_DEST_MASK 0x00002326 - -#define REG_A2XX_A225_GRAS_UCP0X 0x00002340 - -#define REG_A2XX_A225_GRAS_UCP5W 0x00002357 - -#define REG_A2XX_A225_GRAS_UCP_ENABLED 0x00002360 - -#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE 0x00002380 - -#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_OFFSET 0x00002381 - -#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_SCALE 0x00002382 - -#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_OFFSET 0x00002383 - -#define REG_A2XX_SQ_CONSTANT_0 0x00004000 - -#define REG_A2XX_SQ_FETCH_0 0x00004800 - -#define REG_A2XX_SQ_CF_BOOLEANS 0x00004900 - -#define REG_A2XX_SQ_CF_LOOP 0x00004908 - -#define REG_A2XX_COHER_SIZE_PM4 0x00000a29 - -#define REG_A2XX_COHER_BASE_PM4 0x00000a2a - -#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b - -#define REG_A2XX_PA_SU_PERFCOUNTER0_SELECT 0x00000c88 - -#define REG_A2XX_PA_SU_PERFCOUNTER1_SELECT 0x00000c89 - -#define REG_A2XX_PA_SU_PERFCOUNTER2_SELECT 0x00000c8a - -#define REG_A2XX_PA_SU_PERFCOUNTER3_SELECT 0x00000c8b - -#define REG_A2XX_PA_SU_PERFCOUNTER0_LOW 0x00000c8c - -#define REG_A2XX_PA_SU_PERFCOUNTER0_HI 0x00000c8d - -#define REG_A2XX_PA_SU_PERFCOUNTER1_LOW 0x00000c8e - -#define REG_A2XX_PA_SU_PERFCOUNTER1_HI 0x00000c8f - -#define REG_A2XX_PA_SU_PERFCOUNTER2_LOW 0x00000c90 - -#define REG_A2XX_PA_SU_PERFCOUNTER2_HI 0x00000c91 - -#define REG_A2XX_PA_SU_PERFCOUNTER3_LOW 0x00000c92 - -#define REG_A2XX_PA_SU_PERFCOUNTER3_HI 0x00000c93 - -#define REG_A2XX_PA_SC_PERFCOUNTER0_SELECT 0x00000c98 - -#define REG_A2XX_PA_SC_PERFCOUNTER0_LOW 0x00000c99 - -#define REG_A2XX_PA_SC_PERFCOUNTER0_HI 0x00000c9a - -#define REG_A2XX_VGT_PERFCOUNTER0_SELECT 0x00000c48 - -#define REG_A2XX_VGT_PERFCOUNTER1_SELECT 0x00000c49 - -#define REG_A2XX_VGT_PERFCOUNTER2_SELECT 0x00000c4a - -#define REG_A2XX_VGT_PERFCOUNTER3_SELECT 0x00000c4b - -#define REG_A2XX_VGT_PERFCOUNTER0_LOW 0x00000c4c - -#define REG_A2XX_VGT_PERFCOUNTER1_LOW 0x00000c4e - -#define REG_A2XX_VGT_PERFCOUNTER2_LOW 0x00000c50 - -#define REG_A2XX_VGT_PERFCOUNTER3_LOW 0x00000c52 - -#define REG_A2XX_VGT_PERFCOUNTER0_HI 0x00000c4d - -#define REG_A2XX_VGT_PERFCOUNTER1_HI 0x00000c4f - -#define REG_A2XX_VGT_PERFCOUNTER2_HI 0x00000c51 - -#define REG_A2XX_VGT_PERFCOUNTER3_HI 0x00000c53 - -#define REG_A2XX_TCR_PERFCOUNTER0_SELECT 0x00000e05 - -#define REG_A2XX_TCR_PERFCOUNTER1_SELECT 0x00000e08 - -#define REG_A2XX_TCR_PERFCOUNTER0_HI 0x00000e06 - -#define REG_A2XX_TCR_PERFCOUNTER1_HI 0x00000e09 - -#define REG_A2XX_TCR_PERFCOUNTER0_LOW 0x00000e07 - -#define REG_A2XX_TCR_PERFCOUNTER1_LOW 0x00000e0a - -#define REG_A2XX_TP0_PERFCOUNTER0_SELECT 0x00000e1f - -#define REG_A2XX_TP0_PERFCOUNTER0_HI 0x00000e20 - -#define REG_A2XX_TP0_PERFCOUNTER0_LOW 0x00000e21 - -#define REG_A2XX_TP0_PERFCOUNTER1_SELECT 0x00000e22 - -#define REG_A2XX_TP0_PERFCOUNTER1_HI 0x00000e23 - -#define REG_A2XX_TP0_PERFCOUNTER1_LOW 0x00000e24 - -#define REG_A2XX_TCM_PERFCOUNTER0_SELECT 0x00000e54 - -#define REG_A2XX_TCM_PERFCOUNTER1_SELECT 0x00000e57 - -#define REG_A2XX_TCM_PERFCOUNTER0_HI 0x00000e55 - -#define REG_A2XX_TCM_PERFCOUNTER1_HI 0x00000e58 - -#define REG_A2XX_TCM_PERFCOUNTER0_LOW 0x00000e56 - -#define REG_A2XX_TCM_PERFCOUNTER1_LOW 0x00000e59 - -#define REG_A2XX_TCF_PERFCOUNTER0_SELECT 0x00000e5a - -#define REG_A2XX_TCF_PERFCOUNTER1_SELECT 0x00000e5d - -#define REG_A2XX_TCF_PERFCOUNTER2_SELECT 0x00000e60 - -#define REG_A2XX_TCF_PERFCOUNTER3_SELECT 0x00000e63 - -#define REG_A2XX_TCF_PERFCOUNTER4_SELECT 0x00000e66 - -#define REG_A2XX_TCF_PERFCOUNTER5_SELECT 0x00000e69 - -#define REG_A2XX_TCF_PERFCOUNTER6_SELECT 0x00000e6c - -#define REG_A2XX_TCF_PERFCOUNTER7_SELECT 0x00000e6f - -#define REG_A2XX_TCF_PERFCOUNTER8_SELECT 0x00000e72 - -#define REG_A2XX_TCF_PERFCOUNTER9_SELECT 0x00000e75 - -#define REG_A2XX_TCF_PERFCOUNTER10_SELECT 0x00000e78 - -#define REG_A2XX_TCF_PERFCOUNTER11_SELECT 0x00000e7b - -#define REG_A2XX_TCF_PERFCOUNTER0_HI 0x00000e5b - -#define REG_A2XX_TCF_PERFCOUNTER1_HI 0x00000e5e - -#define REG_A2XX_TCF_PERFCOUNTER2_HI 0x00000e61 - -#define REG_A2XX_TCF_PERFCOUNTER3_HI 0x00000e64 - -#define REG_A2XX_TCF_PERFCOUNTER4_HI 0x00000e67 - -#define REG_A2XX_TCF_PERFCOUNTER5_HI 0x00000e6a - -#define REG_A2XX_TCF_PERFCOUNTER6_HI 0x00000e6d - -#define REG_A2XX_TCF_PERFCOUNTER7_HI 0x00000e70 - -#define REG_A2XX_TCF_PERFCOUNTER8_HI 0x00000e73 - -#define REG_A2XX_TCF_PERFCOUNTER9_HI 0x00000e76 - -#define REG_A2XX_TCF_PERFCOUNTER10_HI 0x00000e79 - -#define REG_A2XX_TCF_PERFCOUNTER11_HI 0x00000e7c - -#define REG_A2XX_TCF_PERFCOUNTER0_LOW 0x00000e5c - -#define REG_A2XX_TCF_PERFCOUNTER1_LOW 0x00000e5f - -#define REG_A2XX_TCF_PERFCOUNTER2_LOW 0x00000e62 - -#define REG_A2XX_TCF_PERFCOUNTER3_LOW 0x00000e65 - -#define REG_A2XX_TCF_PERFCOUNTER4_LOW 0x00000e68 - -#define REG_A2XX_TCF_PERFCOUNTER5_LOW 0x00000e6b - -#define REG_A2XX_TCF_PERFCOUNTER6_LOW 0x00000e6e - -#define REG_A2XX_TCF_PERFCOUNTER7_LOW 0x00000e71 - -#define REG_A2XX_TCF_PERFCOUNTER8_LOW 0x00000e74 - -#define REG_A2XX_TCF_PERFCOUNTER9_LOW 0x00000e77 - -#define REG_A2XX_TCF_PERFCOUNTER10_LOW 0x00000e7a - -#define REG_A2XX_TCF_PERFCOUNTER11_LOW 0x00000e7d - -#define REG_A2XX_SQ_PERFCOUNTER0_SELECT 0x00000dc8 - -#define REG_A2XX_SQ_PERFCOUNTER1_SELECT 0x00000dc9 - -#define REG_A2XX_SQ_PERFCOUNTER2_SELECT 0x00000dca - -#define REG_A2XX_SQ_PERFCOUNTER3_SELECT 0x00000dcb - -#define REG_A2XX_SQ_PERFCOUNTER0_LOW 0x00000dcc - -#define REG_A2XX_SQ_PERFCOUNTER0_HI 0x00000dcd - -#define REG_A2XX_SQ_PERFCOUNTER1_LOW 0x00000dce - -#define REG_A2XX_SQ_PERFCOUNTER1_HI 0x00000dcf - -#define REG_A2XX_SQ_PERFCOUNTER2_LOW 0x00000dd0 - -#define REG_A2XX_SQ_PERFCOUNTER2_HI 0x00000dd1 - -#define REG_A2XX_SQ_PERFCOUNTER3_LOW 0x00000dd2 - -#define REG_A2XX_SQ_PERFCOUNTER3_HI 0x00000dd3 - -#define REG_A2XX_SX_PERFCOUNTER0_SELECT 0x00000dd4 - -#define REG_A2XX_SX_PERFCOUNTER0_LOW 0x00000dd8 - -#define REG_A2XX_SX_PERFCOUNTER0_HI 0x00000dd9 - -#define REG_A2XX_MH_PERFCOUNTER0_SELECT 0x00000a46 - -#define REG_A2XX_MH_PERFCOUNTER1_SELECT 0x00000a4a - -#define REG_A2XX_MH_PERFCOUNTER0_CONFIG 0x00000a47 - -#define REG_A2XX_MH_PERFCOUNTER1_CONFIG 0x00000a4b - -#define REG_A2XX_MH_PERFCOUNTER0_LOW 0x00000a48 - -#define REG_A2XX_MH_PERFCOUNTER1_LOW 0x00000a4c - -#define REG_A2XX_MH_PERFCOUNTER0_HI 0x00000a49 - -#define REG_A2XX_MH_PERFCOUNTER1_HI 0x00000a4d - -#define REG_A2XX_RB_PERFCOUNTER0_SELECT 0x00000f04 - -#define REG_A2XX_RB_PERFCOUNTER1_SELECT 0x00000f05 - -#define REG_A2XX_RB_PERFCOUNTER2_SELECT 0x00000f06 - -#define REG_A2XX_RB_PERFCOUNTER3_SELECT 0x00000f07 - -#define REG_A2XX_RB_PERFCOUNTER0_LOW 0x00000f08 - -#define REG_A2XX_RB_PERFCOUNTER0_HI 0x00000f09 - -#define REG_A2XX_RB_PERFCOUNTER1_LOW 0x00000f0a - -#define REG_A2XX_RB_PERFCOUNTER1_HI 0x00000f0b - -#define REG_A2XX_RB_PERFCOUNTER2_LOW 0x00000f0c - -#define REG_A2XX_RB_PERFCOUNTER2_HI 0x00000f0d - -#define REG_A2XX_RB_PERFCOUNTER3_LOW 0x00000f0e - -#define REG_A2XX_RB_PERFCOUNTER3_HI 0x00000f0f - -#define REG_A2XX_SQ_TEX_0 0x00000000 -#define A2XX_SQ_TEX_0_TYPE__MASK 0x00000003 -#define A2XX_SQ_TEX_0_TYPE__SHIFT 0 -static inline uint32_t A2XX_SQ_TEX_0_TYPE(enum sq_tex_type val) -{ - return ((val) << A2XX_SQ_TEX_0_TYPE__SHIFT) & A2XX_SQ_TEX_0_TYPE__MASK; -} -#define A2XX_SQ_TEX_0_SIGN_X__MASK 0x0000000c -#define A2XX_SQ_TEX_0_SIGN_X__SHIFT 2 -static inline uint32_t A2XX_SQ_TEX_0_SIGN_X(enum sq_tex_sign val) -{ - return ((val) << A2XX_SQ_TEX_0_SIGN_X__SHIFT) & A2XX_SQ_TEX_0_SIGN_X__MASK; -} -#define A2XX_SQ_TEX_0_SIGN_Y__MASK 0x00000030 -#define A2XX_SQ_TEX_0_SIGN_Y__SHIFT 4 -static inline uint32_t A2XX_SQ_TEX_0_SIGN_Y(enum sq_tex_sign val) -{ - return ((val) << A2XX_SQ_TEX_0_SIGN_Y__SHIFT) & A2XX_SQ_TEX_0_SIGN_Y__MASK; -} -#define A2XX_SQ_TEX_0_SIGN_Z__MASK 0x000000c0 -#define A2XX_SQ_TEX_0_SIGN_Z__SHIFT 6 -static inline uint32_t A2XX_SQ_TEX_0_SIGN_Z(enum sq_tex_sign val) -{ - return ((val) << A2XX_SQ_TEX_0_SIGN_Z__SHIFT) & A2XX_SQ_TEX_0_SIGN_Z__MASK; -} -#define A2XX_SQ_TEX_0_SIGN_W__MASK 0x00000300 -#define A2XX_SQ_TEX_0_SIGN_W__SHIFT 8 -static inline uint32_t A2XX_SQ_TEX_0_SIGN_W(enum sq_tex_sign val) -{ - return ((val) << A2XX_SQ_TEX_0_SIGN_W__SHIFT) & A2XX_SQ_TEX_0_SIGN_W__MASK; -} -#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00 -#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10 -static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val) -{ - return ((val) << A2XX_SQ_TEX_0_CLAMP_X__SHIFT) & A2XX_SQ_TEX_0_CLAMP_X__MASK; -} -#define A2XX_SQ_TEX_0_CLAMP_Y__MASK 0x0000e000 -#define A2XX_SQ_TEX_0_CLAMP_Y__SHIFT 13 -static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Y(enum sq_tex_clamp val) -{ - return ((val) << A2XX_SQ_TEX_0_CLAMP_Y__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Y__MASK; -} -#define A2XX_SQ_TEX_0_CLAMP_Z__MASK 0x00070000 -#define A2XX_SQ_TEX_0_CLAMP_Z__SHIFT 16 -static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val) -{ - return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK; -} -#define A2XX_SQ_TEX_0_PITCH__MASK 0x7fc00000 -#define A2XX_SQ_TEX_0_PITCH__SHIFT 22 -static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK; -} -#define A2XX_SQ_TEX_0_TILED 0x80000000 - -#define REG_A2XX_SQ_TEX_1 0x00000001 -#define A2XX_SQ_TEX_1_FORMAT__MASK 0x0000003f -#define A2XX_SQ_TEX_1_FORMAT__SHIFT 0 -static inline uint32_t A2XX_SQ_TEX_1_FORMAT(enum a2xx_sq_surfaceformat val) -{ - return ((val) << A2XX_SQ_TEX_1_FORMAT__SHIFT) & A2XX_SQ_TEX_1_FORMAT__MASK; -} -#define A2XX_SQ_TEX_1_ENDIANNESS__MASK 0x000000c0 -#define A2XX_SQ_TEX_1_ENDIANNESS__SHIFT 6 -static inline uint32_t A2XX_SQ_TEX_1_ENDIANNESS(enum sq_tex_endian val) -{ - return ((val) << A2XX_SQ_TEX_1_ENDIANNESS__SHIFT) & A2XX_SQ_TEX_1_ENDIANNESS__MASK; -} -#define A2XX_SQ_TEX_1_REQUEST_SIZE__MASK 0x00000300 -#define A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT 8 -static inline uint32_t A2XX_SQ_TEX_1_REQUEST_SIZE(uint32_t val) -{ - return ((val) << A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT) & A2XX_SQ_TEX_1_REQUEST_SIZE__MASK; -} -#define A2XX_SQ_TEX_1_STACKED 0x00000400 -#define A2XX_SQ_TEX_1_CLAMP_POLICY__MASK 0x00000800 -#define A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT 11 -static inline uint32_t A2XX_SQ_TEX_1_CLAMP_POLICY(enum sq_tex_clamp_policy val) -{ - return ((val) << A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT) & A2XX_SQ_TEX_1_CLAMP_POLICY__MASK; -} -#define A2XX_SQ_TEX_1_BASE_ADDRESS__MASK 0xfffff000 -#define A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT 12 -static inline uint32_t A2XX_SQ_TEX_1_BASE_ADDRESS(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK; -} - -#define REG_A2XX_SQ_TEX_2 0x00000002 -#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff -#define A2XX_SQ_TEX_2_WIDTH__SHIFT 0 -static inline uint32_t A2XX_SQ_TEX_2_WIDTH(uint32_t val) -{ - return ((val) << A2XX_SQ_TEX_2_WIDTH__SHIFT) & A2XX_SQ_TEX_2_WIDTH__MASK; -} -#define A2XX_SQ_TEX_2_HEIGHT__MASK 0x03ffe000 -#define A2XX_SQ_TEX_2_HEIGHT__SHIFT 13 -static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val) -{ - return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK; -} -#define A2XX_SQ_TEX_2_DEPTH__MASK 0xfc000000 -#define A2XX_SQ_TEX_2_DEPTH__SHIFT 26 -static inline uint32_t A2XX_SQ_TEX_2_DEPTH(uint32_t val) -{ - return ((val) << A2XX_SQ_TEX_2_DEPTH__SHIFT) & A2XX_SQ_TEX_2_DEPTH__MASK; -} - -#define REG_A2XX_SQ_TEX_3 0x00000003 -#define A2XX_SQ_TEX_3_NUM_FORMAT__MASK 0x00000001 -#define A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT 0 -static inline uint32_t A2XX_SQ_TEX_3_NUM_FORMAT(enum sq_tex_num_format val) -{ - return ((val) << A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT) & A2XX_SQ_TEX_3_NUM_FORMAT__MASK; -} -#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e -#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1 -static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val) -{ - return ((val) << A2XX_SQ_TEX_3_SWIZ_X__SHIFT) & A2XX_SQ_TEX_3_SWIZ_X__MASK; -} -#define A2XX_SQ_TEX_3_SWIZ_Y__MASK 0x00000070 -#define A2XX_SQ_TEX_3_SWIZ_Y__SHIFT 4 -static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Y(enum sq_tex_swiz val) -{ - return ((val) << A2XX_SQ_TEX_3_SWIZ_Y__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Y__MASK; -} -#define A2XX_SQ_TEX_3_SWIZ_Z__MASK 0x00000380 -#define A2XX_SQ_TEX_3_SWIZ_Z__SHIFT 7 -static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Z(enum sq_tex_swiz val) -{ - return ((val) << A2XX_SQ_TEX_3_SWIZ_Z__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Z__MASK; -} -#define A2XX_SQ_TEX_3_SWIZ_W__MASK 0x00001c00 -#define A2XX_SQ_TEX_3_SWIZ_W__SHIFT 10 -static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val) -{ - return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK; -} -#define A2XX_SQ_TEX_3_EXP_ADJUST__MASK 0x0007e000 -#define A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT 13 -static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(int32_t val) -{ - return ((val) << A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT) & A2XX_SQ_TEX_3_EXP_ADJUST__MASK; -} -#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000 -#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19 -static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val) -{ - return ((val) << A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK; -} -#define A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK 0x00600000 -#define A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT 21 -static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val) -{ - return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK; -} -#define A2XX_SQ_TEX_3_MIP_FILTER__MASK 0x01800000 -#define A2XX_SQ_TEX_3_MIP_FILTER__SHIFT 23 -static inline uint32_t A2XX_SQ_TEX_3_MIP_FILTER(enum sq_tex_filter val) -{ - return ((val) << A2XX_SQ_TEX_3_MIP_FILTER__SHIFT) & A2XX_SQ_TEX_3_MIP_FILTER__MASK; -} -#define A2XX_SQ_TEX_3_ANISO_FILTER__MASK 0x0e000000 -#define A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT 25 -static inline uint32_t A2XX_SQ_TEX_3_ANISO_FILTER(enum sq_tex_aniso_filter val) -{ - return ((val) << A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT) & A2XX_SQ_TEX_3_ANISO_FILTER__MASK; -} -#define A2XX_SQ_TEX_3_BORDER_SIZE__MASK 0x80000000 -#define A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT 31 -static inline uint32_t A2XX_SQ_TEX_3_BORDER_SIZE(uint32_t val) -{ - return ((val) << A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT) & A2XX_SQ_TEX_3_BORDER_SIZE__MASK; -} - -#define REG_A2XX_SQ_TEX_4 0x00000004 -#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK 0x00000001 -#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT 0 -static inline uint32_t A2XX_SQ_TEX_4_VOL_MAG_FILTER(enum sq_tex_filter val) -{ - return ((val) << A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK; -} -#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK 0x00000002 -#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT 1 -static inline uint32_t A2XX_SQ_TEX_4_VOL_MIN_FILTER(enum sq_tex_filter val) -{ - return ((val) << A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK; -} -#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK 0x0000003c -#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT 2 -static inline uint32_t A2XX_SQ_TEX_4_MIP_MIN_LEVEL(uint32_t val) -{ - return ((val) << A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK; -} -#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK 0x000003c0 -#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT 6 -static inline uint32_t A2XX_SQ_TEX_4_MIP_MAX_LEVEL(uint32_t val) -{ - return ((val) << A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK; -} -#define A2XX_SQ_TEX_4_MAX_ANISO_WALK 0x00000400 -#define A2XX_SQ_TEX_4_MIN_ANISO_WALK 0x00000800 -#define A2XX_SQ_TEX_4_LOD_BIAS__MASK 0x003ff000 -#define A2XX_SQ_TEX_4_LOD_BIAS__SHIFT 12 -static inline uint32_t A2XX_SQ_TEX_4_LOD_BIAS(float val) -{ - return ((((int32_t)(val * 32.0))) << A2XX_SQ_TEX_4_LOD_BIAS__SHIFT) & A2XX_SQ_TEX_4_LOD_BIAS__MASK; -} -#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK 0x07c00000 -#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT 22 -static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H(uint32_t val) -{ - return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK; -} -#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK 0xf8000000 -#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT 27 -static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V(uint32_t val) -{ - return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK; -} - -#define REG_A2XX_SQ_TEX_5 0x00000005 -#define A2XX_SQ_TEX_5_BORDER_COLOR__MASK 0x00000003 -#define A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT 0 -static inline uint32_t A2XX_SQ_TEX_5_BORDER_COLOR(enum sq_tex_border_color val) -{ - return ((val) << A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT) & A2XX_SQ_TEX_5_BORDER_COLOR__MASK; -} -#define A2XX_SQ_TEX_5_FORCE_BCW_MAX 0x00000004 -#define A2XX_SQ_TEX_5_TRI_CLAMP__MASK 0x00000018 -#define A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT 3 -static inline uint32_t A2XX_SQ_TEX_5_TRI_CLAMP(uint32_t val) -{ - return ((val) << A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT) & A2XX_SQ_TEX_5_TRI_CLAMP__MASK; -} -#define A2XX_SQ_TEX_5_ANISO_BIAS__MASK 0x000001e0 -#define A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT 5 -static inline uint32_t A2XX_SQ_TEX_5_ANISO_BIAS(float val) -{ - return ((((int32_t)(val * 1.0))) << A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT) & A2XX_SQ_TEX_5_ANISO_BIAS__MASK; -} -#define A2XX_SQ_TEX_5_DIMENSION__MASK 0x00000600 -#define A2XX_SQ_TEX_5_DIMENSION__SHIFT 9 -static inline uint32_t A2XX_SQ_TEX_5_DIMENSION(enum sq_tex_dimension val) -{ - return ((val) << A2XX_SQ_TEX_5_DIMENSION__SHIFT) & A2XX_SQ_TEX_5_DIMENSION__MASK; -} -#define A2XX_SQ_TEX_5_PACKED_MIPS 0x00000800 -#define A2XX_SQ_TEX_5_MIP_ADDRESS__MASK 0xfffff000 -#define A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT 12 -static inline uint32_t A2XX_SQ_TEX_5_MIP_ADDRESS(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK; -} - -#ifdef __cplusplus -#endif - -#endif /* A2XX_XML */ diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 0d8133f317..0dc255ddf5 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -113,7 +113,7 @@ static int a2xx_hw_init(struct msm_gpu *gpu) uint32_t *ptr, len; int i, ret; - msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error); + a2xx_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error); DBG("%s", gpu->name); @@ -469,7 +469,7 @@ static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu) static struct msm_gem_address_space * a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) { - struct msm_mmu *mmu = msm_gpummu_new(&pdev->dev, gpu); + struct msm_mmu *mmu = a2xx_gpummu_new(&pdev->dev, gpu); struct msm_gem_address_space *aspace; aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M, diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h index 161a075f94..53702f1999 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h @@ -19,4 +19,8 @@ struct a2xx_gpu { }; #define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base) +struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu); +void a2xx_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base, + dma_addr_t *tran_error); + #endif /* __A2XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c new file mode 100644 index 0000000000..39641551ee --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */ + +#include + +#include "msm_drv.h" +#include "msm_mmu.h" + +#include "adreno_gpu.h" +#include "a2xx_gpu.h" + +#include "a2xx.xml.h" + +struct a2xx_gpummu { + struct msm_mmu base; + struct msm_gpu *gpu; + dma_addr_t pt_base; + uint32_t *table; +}; +#define to_a2xx_gpummu(x) container_of(x, struct a2xx_gpummu, base) + +#define GPUMMU_VA_START SZ_16M +#define GPUMMU_VA_RANGE (0xfff * SZ_64K) +#define GPUMMU_PAGE_SIZE SZ_4K +#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE) + +static void a2xx_gpummu_detach(struct msm_mmu *mmu) +{ +} + +static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova, + struct sg_table *sgt, size_t len, int prot) +{ + struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); + unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; + struct sg_dma_page_iter dma_iter; + unsigned prot_bits = 0; + + if (prot & IOMMU_WRITE) + prot_bits |= 1; + if (prot & IOMMU_READ) + prot_bits |= 2; + + for_each_sgtable_dma_page(sgt, &dma_iter, 0) { + dma_addr_t addr = sg_page_iter_dma_address(&dma_iter); + int i; + + for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE) + gpummu->table[idx++] = (addr + i) | prot_bits; + } + + /* we can improve by deferring flush for multiple map() */ + gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE, + A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL | + A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC); + return 0; +} + +static int a2xx_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) +{ + struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); + unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; + unsigned i; + + for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++) + gpummu->table[idx] = 0; + + gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE, + A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL | + A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC); + return 0; +} + +static void a2xx_gpummu_resume_translation(struct msm_mmu *mmu) +{ +} + +static void a2xx_gpummu_destroy(struct msm_mmu *mmu) +{ + struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); + + dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base, + DMA_ATTR_FORCE_CONTIGUOUS); + + kfree(gpummu); +} + +static const struct msm_mmu_funcs funcs = { + .detach = a2xx_gpummu_detach, + .map = a2xx_gpummu_map, + .unmap = a2xx_gpummu_unmap, + .destroy = a2xx_gpummu_destroy, + .resume_translation = a2xx_gpummu_resume_translation, +}; + +struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu) +{ + struct a2xx_gpummu *gpummu; + + gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL); + if (!gpummu) + return ERR_PTR(-ENOMEM); + + gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base, + GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS); + if (!gpummu->table) { + kfree(gpummu); + return ERR_PTR(-ENOMEM); + } + + gpummu->gpu = gpu; + msm_mmu_init(&gpummu->base, dev, &funcs, MSM_MMU_GPUMMU); + + return &gpummu->base; +} + +void a2xx_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base, + dma_addr_t *tran_error) +{ + dma_addr_t base = to_a2xx_gpummu(mmu)->pt_base; + + *pt_base = base; + *tran_error = base + TABLE_SIZE; /* 32-byte aligned */ +} diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h deleted file mode 100644 index 5edd740ad3..0000000000 --- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h +++ /dev/null @@ -1,3268 +0,0 @@ -#ifndef A3XX_XML -#define A3XX_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng gen_header.py tool in this git repository: -http://gitlab.freedesktop.org/mesa/mesa/ -git clone https://gitlab.freedesktop.org/mesa/mesa.git - -The rules-ng-ng source files this header was generated from are: - -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84323 bytes, from Wed Aug 23 10:39:39 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85691 bytes, from Fri Feb 16 09:49:01 2024) - -Copyright (C) 2013-2024 by the following authors: -- Rob Clark Rob Clark -- Ilia Mirkin Ilia Mirkin - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -*/ - -#ifdef __KERNEL__ -#include -#define assert(x) BUG_ON(!(x)) -#else -#include -#endif - -#ifdef __cplusplus -#define __struct_cast(X) -#else -#define __struct_cast(X) (struct X) -#endif - -enum a3xx_tile_mode { - LINEAR = 0, - TILE_4X4 = 1, - TILE_32X32 = 2, - TILE_4X2 = 3, -}; - -enum a3xx_state_block_id { - HLSQ_BLOCK_ID_TP_TEX = 2, - HLSQ_BLOCK_ID_TP_MIPMAP = 3, - HLSQ_BLOCK_ID_SP_VS = 4, - HLSQ_BLOCK_ID_SP_FS = 6, -}; - -enum a3xx_cache_opcode { - INVALIDATE = 1, -}; - -enum a3xx_vtx_fmt { - VFMT_32_FLOAT = 0, - VFMT_32_32_FLOAT = 1, - VFMT_32_32_32_FLOAT = 2, - VFMT_32_32_32_32_FLOAT = 3, - VFMT_16_FLOAT = 4, - VFMT_16_16_FLOAT = 5, - VFMT_16_16_16_FLOAT = 6, - VFMT_16_16_16_16_FLOAT = 7, - VFMT_32_FIXED = 8, - VFMT_32_32_FIXED = 9, - VFMT_32_32_32_FIXED = 10, - VFMT_32_32_32_32_FIXED = 11, - VFMT_16_SINT = 16, - VFMT_16_16_SINT = 17, - VFMT_16_16_16_SINT = 18, - VFMT_16_16_16_16_SINT = 19, - VFMT_16_UINT = 20, - VFMT_16_16_UINT = 21, - VFMT_16_16_16_UINT = 22, - VFMT_16_16_16_16_UINT = 23, - VFMT_16_SNORM = 24, - VFMT_16_16_SNORM = 25, - VFMT_16_16_16_SNORM = 26, - VFMT_16_16_16_16_SNORM = 27, - VFMT_16_UNORM = 28, - VFMT_16_16_UNORM = 29, - VFMT_16_16_16_UNORM = 30, - VFMT_16_16_16_16_UNORM = 31, - VFMT_32_UINT = 32, - VFMT_32_32_UINT = 33, - VFMT_32_32_32_UINT = 34, - VFMT_32_32_32_32_UINT = 35, - VFMT_32_SINT = 36, - VFMT_32_32_SINT = 37, - VFMT_32_32_32_SINT = 38, - VFMT_32_32_32_32_SINT = 39, - VFMT_8_UINT = 40, - VFMT_8_8_UINT = 41, - VFMT_8_8_8_UINT = 42, - VFMT_8_8_8_8_UINT = 43, - VFMT_8_UNORM = 44, - VFMT_8_8_UNORM = 45, - VFMT_8_8_8_UNORM = 46, - VFMT_8_8_8_8_UNORM = 47, - VFMT_8_SINT = 48, - VFMT_8_8_SINT = 49, - VFMT_8_8_8_SINT = 50, - VFMT_8_8_8_8_SINT = 51, - VFMT_8_SNORM = 52, - VFMT_8_8_SNORM = 53, - VFMT_8_8_8_SNORM = 54, - VFMT_8_8_8_8_SNORM = 55, - VFMT_10_10_10_2_UINT = 56, - VFMT_10_10_10_2_UNORM = 57, - VFMT_10_10_10_2_SINT = 58, - VFMT_10_10_10_2_SNORM = 59, - VFMT_2_10_10_10_UINT = 60, - VFMT_2_10_10_10_UNORM = 61, - VFMT_2_10_10_10_SINT = 62, - VFMT_2_10_10_10_SNORM = 63, - VFMT_NONE = 255, -}; - -enum a3xx_tex_fmt { - TFMT_5_6_5_UNORM = 4, - TFMT_5_5_5_1_UNORM = 5, - TFMT_4_4_4_4_UNORM = 7, - TFMT_Z16_UNORM = 9, - TFMT_X8Z24_UNORM = 10, - TFMT_Z32_FLOAT = 11, - TFMT_UV_64X32 = 16, - TFMT_VU_64X32 = 17, - TFMT_Y_64X32 = 18, - TFMT_NV12_64X32 = 19, - TFMT_UV_LINEAR = 20, - TFMT_VU_LINEAR = 21, - TFMT_Y_LINEAR = 22, - TFMT_NV12_LINEAR = 23, - TFMT_I420_Y = 24, - TFMT_I420_U = 26, - TFMT_I420_V = 27, - TFMT_ATC_RGB = 32, - TFMT_ATC_RGBA_EXPLICIT = 33, - TFMT_ETC1 = 34, - TFMT_ATC_RGBA_INTERPOLATED = 35, - TFMT_DXT1 = 36, - TFMT_DXT3 = 37, - TFMT_DXT5 = 38, - TFMT_2_10_10_10_UNORM = 40, - TFMT_10_10_10_2_UNORM = 41, - TFMT_9_9_9_E5_FLOAT = 42, - TFMT_11_11_10_FLOAT = 43, - TFMT_A8_UNORM = 44, - TFMT_L8_UNORM = 45, - TFMT_L8_A8_UNORM = 47, - TFMT_8_UNORM = 48, - TFMT_8_8_UNORM = 49, - TFMT_8_8_8_UNORM = 50, - TFMT_8_8_8_8_UNORM = 51, - TFMT_8_SNORM = 52, - TFMT_8_8_SNORM = 53, - TFMT_8_8_8_SNORM = 54, - TFMT_8_8_8_8_SNORM = 55, - TFMT_8_UINT = 56, - TFMT_8_8_UINT = 57, - TFMT_8_8_8_UINT = 58, - TFMT_8_8_8_8_UINT = 59, - TFMT_8_SINT = 60, - TFMT_8_8_SINT = 61, - TFMT_8_8_8_SINT = 62, - TFMT_8_8_8_8_SINT = 63, - TFMT_16_FLOAT = 64, - TFMT_16_16_FLOAT = 65, - TFMT_16_16_16_16_FLOAT = 67, - TFMT_16_UINT = 68, - TFMT_16_16_UINT = 69, - TFMT_16_16_16_16_UINT = 71, - TFMT_16_SINT = 72, - TFMT_16_16_SINT = 73, - TFMT_16_16_16_16_SINT = 75, - TFMT_16_UNORM = 76, - TFMT_16_16_UNORM = 77, - TFMT_16_16_16_16_UNORM = 79, - TFMT_16_SNORM = 80, - TFMT_16_16_SNORM = 81, - TFMT_16_16_16_16_SNORM = 83, - TFMT_32_FLOAT = 84, - TFMT_32_32_FLOAT = 85, - TFMT_32_32_32_32_FLOAT = 87, - TFMT_32_UINT = 88, - TFMT_32_32_UINT = 89, - TFMT_32_32_32_32_UINT = 91, - TFMT_32_SINT = 92, - TFMT_32_32_SINT = 93, - TFMT_32_32_32_32_SINT = 95, - TFMT_2_10_10_10_UINT = 96, - TFMT_10_10_10_2_UINT = 97, - TFMT_ETC2_RG11_SNORM = 112, - TFMT_ETC2_RG11_UNORM = 113, - TFMT_ETC2_R11_SNORM = 114, - TFMT_ETC2_R11_UNORM = 115, - TFMT_ETC2_RGBA8 = 116, - TFMT_ETC2_RGB8A1 = 117, - TFMT_ETC2_RGB8 = 118, - TFMT_NONE = 255, -}; - -enum a3xx_color_fmt { - RB_R5G6B5_UNORM = 0, - RB_R5G5B5A1_UNORM = 1, - RB_R4G4B4A4_UNORM = 3, - RB_R8G8B8_UNORM = 4, - RB_R8G8B8A8_UNORM = 8, - RB_R8G8B8A8_SNORM = 9, - RB_R8G8B8A8_UINT = 10, - RB_R8G8B8A8_SINT = 11, - RB_R8G8_UNORM = 12, - RB_R8G8_SNORM = 13, - RB_R8G8_UINT = 14, - RB_R8G8_SINT = 15, - RB_R10G10B10A2_UNORM = 16, - RB_A2R10G10B10_UNORM = 17, - RB_R10G10B10A2_UINT = 18, - RB_A2R10G10B10_UINT = 19, - RB_A8_UNORM = 20, - RB_R8_UNORM = 21, - RB_R16_FLOAT = 24, - RB_R16G16_FLOAT = 25, - RB_R16G16B16A16_FLOAT = 27, - RB_R11G11B10_FLOAT = 28, - RB_R16_SNORM = 32, - RB_R16G16_SNORM = 33, - RB_R16G16B16A16_SNORM = 35, - RB_R16_UNORM = 36, - RB_R16G16_UNORM = 37, - RB_R16G16B16A16_UNORM = 39, - RB_R16_SINT = 40, - RB_R16G16_SINT = 41, - RB_R16G16B16A16_SINT = 43, - RB_R16_UINT = 44, - RB_R16G16_UINT = 45, - RB_R16G16B16A16_UINT = 47, - RB_R32_FLOAT = 48, - RB_R32G32_FLOAT = 49, - RB_R32G32B32A32_FLOAT = 51, - RB_R32_SINT = 52, - RB_R32G32_SINT = 53, - RB_R32G32B32A32_SINT = 55, - RB_R32_UINT = 56, - RB_R32G32_UINT = 57, - RB_R32G32B32A32_UINT = 59, - RB_NONE = 255, -}; - -enum a3xx_cp_perfcounter_select { - CP_ALWAYS_COUNT = 0, - CP_AHB_PFPTRANS_WAIT = 3, - CP_AHB_NRTTRANS_WAIT = 6, - CP_CSF_NRT_READ_WAIT = 8, - CP_CSF_I1_FIFO_FULL = 9, - CP_CSF_I2_FIFO_FULL = 10, - CP_CSF_ST_FIFO_FULL = 11, - CP_RESERVED_12 = 12, - CP_CSF_RING_ROQ_FULL = 13, - CP_CSF_I1_ROQ_FULL = 14, - CP_CSF_I2_ROQ_FULL = 15, - CP_CSF_ST_ROQ_FULL = 16, - CP_RESERVED_17 = 17, - CP_MIU_TAG_MEM_FULL = 18, - CP_MIU_NRT_WRITE_STALLED = 22, - CP_MIU_NRT_READ_STALLED = 23, - CP_ME_REGS_RB_DONE_FIFO_FULL = 26, - CP_ME_REGS_VS_EVENT_FIFO_FULL = 27, - CP_ME_REGS_PS_EVENT_FIFO_FULL = 28, - CP_ME_REGS_CF_EVENT_FIFO_FULL = 29, - CP_ME_MICRO_RB_STARVED = 30, - CP_AHB_RBBM_DWORD_SENT = 40, - CP_ME_BUSY_CLOCKS = 41, - CP_ME_WAIT_CONTEXT_AVAIL = 42, - CP_PFP_TYPE0_PACKET = 43, - CP_PFP_TYPE3_PACKET = 44, - CP_CSF_RB_WPTR_NEQ_RPTR = 45, - CP_CSF_I1_SIZE_NEQ_ZERO = 46, - CP_CSF_I2_SIZE_NEQ_ZERO = 47, - CP_CSF_RBI1I2_FETCHING = 48, -}; - -enum a3xx_gras_tse_perfcounter_select { - GRAS_TSEPERF_INPUT_PRIM = 0, - GRAS_TSEPERF_INPUT_NULL_PRIM = 1, - GRAS_TSEPERF_TRIVAL_REJ_PRIM = 2, - GRAS_TSEPERF_CLIPPED_PRIM = 3, - GRAS_TSEPERF_NEW_PRIM = 4, - GRAS_TSEPERF_ZERO_AREA_PRIM = 5, - GRAS_TSEPERF_FACENESS_CULLED_PRIM = 6, - GRAS_TSEPERF_ZERO_PIXEL_PRIM = 7, - GRAS_TSEPERF_OUTPUT_NULL_PRIM = 8, - GRAS_TSEPERF_OUTPUT_VISIBLE_PRIM = 9, - GRAS_TSEPERF_PRE_CLIP_PRIM = 10, - GRAS_TSEPERF_POST_CLIP_PRIM = 11, - GRAS_TSEPERF_WORKING_CYCLES = 12, - GRAS_TSEPERF_PC_STARVE = 13, - GRAS_TSERASPERF_STALL = 14, -}; - -enum a3xx_gras_ras_perfcounter_select { - GRAS_RASPERF_16X16_TILES = 0, - GRAS_RASPERF_8X8_TILES = 1, - GRAS_RASPERF_4X4_TILES = 2, - GRAS_RASPERF_WORKING_CYCLES = 3, - GRAS_RASPERF_STALL_CYCLES_BY_RB = 4, - GRAS_RASPERF_STALL_CYCLES_BY_VSC = 5, - GRAS_RASPERF_STARVE_CYCLES_BY_TSE = 6, -}; - -enum a3xx_hlsq_perfcounter_select { - HLSQ_PERF_SP_VS_CONSTANT = 0, - HLSQ_PERF_SP_VS_INSTRUCTIONS = 1, - HLSQ_PERF_SP_FS_CONSTANT = 2, - HLSQ_PERF_SP_FS_INSTRUCTIONS = 3, - HLSQ_PERF_TP_STATE = 4, - HLSQ_PERF_QUADS = 5, - HLSQ_PERF_PIXELS = 6, - HLSQ_PERF_VERTICES = 7, - HLSQ_PERF_FS8_THREADS = 8, - HLSQ_PERF_FS16_THREADS = 9, - HLSQ_PERF_FS32_THREADS = 10, - HLSQ_PERF_VS8_THREADS = 11, - HLSQ_PERF_VS16_THREADS = 12, - HLSQ_PERF_SP_VS_DATA_BYTES = 13, - HLSQ_PERF_SP_FS_DATA_BYTES = 14, - HLSQ_PERF_ACTIVE_CYCLES = 15, - HLSQ_PERF_STALL_CYCLES_SP_STATE = 16, - HLSQ_PERF_STALL_CYCLES_SP_VS = 17, - HLSQ_PERF_STALL_CYCLES_SP_FS = 18, - HLSQ_PERF_STALL_CYCLES_UCHE = 19, - HLSQ_PERF_RBBM_LOAD_CYCLES = 20, - HLSQ_PERF_DI_TO_VS_START_SP0 = 21, - HLSQ_PERF_DI_TO_FS_START_SP0 = 22, - HLSQ_PERF_VS_START_TO_DONE_SP0 = 23, - HLSQ_PERF_FS_START_TO_DONE_SP0 = 24, - HLSQ_PERF_SP_STATE_COPY_CYCLES_VS = 25, - HLSQ_PERF_SP_STATE_COPY_CYCLES_FS = 26, - HLSQ_PERF_UCHE_LATENCY_CYCLES = 27, - HLSQ_PERF_UCHE_LATENCY_COUNT = 28, -}; - -enum a3xx_pc_perfcounter_select { - PC_PCPERF_VISIBILITY_STREAMS = 0, - PC_PCPERF_TOTAL_INSTANCES = 1, - PC_PCPERF_PRIMITIVES_PC_VPC = 2, - PC_PCPERF_PRIMITIVES_KILLED_BY_VS = 3, - PC_PCPERF_PRIMITIVES_VISIBLE_BY_VS = 4, - PC_PCPERF_DRAWCALLS_KILLED_BY_VS = 5, - PC_PCPERF_DRAWCALLS_VISIBLE_BY_VS = 6, - PC_PCPERF_VERTICES_TO_VFD = 7, - PC_PCPERF_REUSED_VERTICES = 8, - PC_PCPERF_CYCLES_STALLED_BY_VFD = 9, - PC_PCPERF_CYCLES_STALLED_BY_TSE = 10, - PC_PCPERF_CYCLES_STALLED_BY_VBIF = 11, - PC_PCPERF_CYCLES_IS_WORKING = 12, -}; - -enum a3xx_rb_perfcounter_select { - RB_RBPERF_ACTIVE_CYCLES_ANY = 0, - RB_RBPERF_ACTIVE_CYCLES_ALL = 1, - RB_RBPERF_STARVE_CYCLES_BY_SP = 2, - RB_RBPERF_STARVE_CYCLES_BY_RAS = 3, - RB_RBPERF_STARVE_CYCLES_BY_MARB = 4, - RB_RBPERF_STALL_CYCLES_BY_MARB = 5, - RB_RBPERF_STALL_CYCLES_BY_HLSQ = 6, - RB_RBPERF_RB_MARB_DATA = 7, - RB_RBPERF_SP_RB_QUAD = 8, - RB_RBPERF_RAS_EARLY_Z_QUADS = 9, - RB_RBPERF_GMEM_CH0_READ = 10, - RB_RBPERF_GMEM_CH1_READ = 11, - RB_RBPERF_GMEM_CH0_WRITE = 12, - RB_RBPERF_GMEM_CH1_WRITE = 13, - RB_RBPERF_CP_CONTEXT_DONE = 14, - RB_RBPERF_CP_CACHE_FLUSH = 15, - RB_RBPERF_CP_ZPASS_DONE = 16, -}; - -enum a3xx_rbbm_perfcounter_select { - RBBM_ALAWYS_ON = 0, - RBBM_VBIF_BUSY = 1, - RBBM_TSE_BUSY = 2, - RBBM_RAS_BUSY = 3, - RBBM_PC_DCALL_BUSY = 4, - RBBM_PC_VSD_BUSY = 5, - RBBM_VFD_BUSY = 6, - RBBM_VPC_BUSY = 7, - RBBM_UCHE_BUSY = 8, - RBBM_VSC_BUSY = 9, - RBBM_HLSQ_BUSY = 10, - RBBM_ANY_RB_BUSY = 11, - RBBM_ANY_TEX_BUSY = 12, - RBBM_ANY_USP_BUSY = 13, - RBBM_ANY_MARB_BUSY = 14, - RBBM_ANY_ARB_BUSY = 15, - RBBM_AHB_STATUS_BUSY = 16, - RBBM_AHB_STATUS_STALLED = 17, - RBBM_AHB_STATUS_TXFR = 18, - RBBM_AHB_STATUS_TXFR_SPLIT = 19, - RBBM_AHB_STATUS_TXFR_ERROR = 20, - RBBM_AHB_STATUS_LONG_STALL = 21, - RBBM_RBBM_STATUS_MASKED = 22, -}; - -enum a3xx_sp_perfcounter_select { - SP_LM_LOAD_INSTRUCTIONS = 0, - SP_LM_STORE_INSTRUCTIONS = 1, - SP_LM_ATOMICS = 2, - SP_UCHE_LOAD_INSTRUCTIONS = 3, - SP_UCHE_STORE_INSTRUCTIONS = 4, - SP_UCHE_ATOMICS = 5, - SP_VS_TEX_INSTRUCTIONS = 6, - SP_VS_CFLOW_INSTRUCTIONS = 7, - SP_VS_EFU_INSTRUCTIONS = 8, - SP_VS_FULL_ALU_INSTRUCTIONS = 9, - SP_VS_HALF_ALU_INSTRUCTIONS = 10, - SP_FS_TEX_INSTRUCTIONS = 11, - SP_FS_CFLOW_INSTRUCTIONS = 12, - SP_FS_EFU_INSTRUCTIONS = 13, - SP_FS_FULL_ALU_INSTRUCTIONS = 14, - SP_FS_HALF_ALU_INSTRUCTIONS = 15, - SP_FS_BARY_INSTRUCTIONS = 16, - SP_VS_INSTRUCTIONS = 17, - SP_FS_INSTRUCTIONS = 18, - SP_ADDR_LOCK_COUNT = 19, - SP_UCHE_READ_TRANS = 20, - SP_UCHE_WRITE_TRANS = 21, - SP_EXPORT_VPC_TRANS = 22, - SP_EXPORT_RB_TRANS = 23, - SP_PIXELS_KILLED = 24, - SP_ICL1_REQUESTS = 25, - SP_ICL1_MISSES = 26, - SP_ICL0_REQUESTS = 27, - SP_ICL0_MISSES = 28, - SP_ALU_ACTIVE_CYCLES = 29, - SP_EFU_ACTIVE_CYCLES = 30, - SP_STALL_CYCLES_BY_VPC = 31, - SP_STALL_CYCLES_BY_TP = 32, - SP_STALL_CYCLES_BY_UCHE = 33, - SP_STALL_CYCLES_BY_RB = 34, - SP_ACTIVE_CYCLES_ANY = 35, - SP_ACTIVE_CYCLES_ALL = 36, -}; - -enum a3xx_tp_perfcounter_select { - TPL1_TPPERF_L1_REQUESTS = 0, - TPL1_TPPERF_TP0_L1_REQUESTS = 1, - TPL1_TPPERF_TP0_L1_MISSES = 2, - TPL1_TPPERF_TP1_L1_REQUESTS = 3, - TPL1_TPPERF_TP1_L1_MISSES = 4, - TPL1_TPPERF_TP2_L1_REQUESTS = 5, - TPL1_TPPERF_TP2_L1_MISSES = 6, - TPL1_TPPERF_TP3_L1_REQUESTS = 7, - TPL1_TPPERF_TP3_L1_MISSES = 8, - TPL1_TPPERF_OUTPUT_TEXELS_POINT = 9, - TPL1_TPPERF_OUTPUT_TEXELS_BILINEAR = 10, - TPL1_TPPERF_OUTPUT_TEXELS_MIP = 11, - TPL1_TPPERF_OUTPUT_TEXELS_ANISO = 12, - TPL1_TPPERF_BILINEAR_OPS = 13, - TPL1_TPPERF_QUADSQUADS_OFFSET = 14, - TPL1_TPPERF_QUADQUADS_SHADOW = 15, - TPL1_TPPERF_QUADS_ARRAY = 16, - TPL1_TPPERF_QUADS_PROJECTION = 17, - TPL1_TPPERF_QUADS_GRADIENT = 18, - TPL1_TPPERF_QUADS_1D2D = 19, - TPL1_TPPERF_QUADS_3DCUBE = 20, - TPL1_TPPERF_ZERO_LOD = 21, - TPL1_TPPERF_OUTPUT_TEXELS = 22, - TPL1_TPPERF_ACTIVE_CYCLES_ANY = 23, - TPL1_TPPERF_ACTIVE_CYCLES_ALL = 24, - TPL1_TPPERF_STALL_CYCLES_BY_ARB = 25, - TPL1_TPPERF_LATENCY = 26, - TPL1_TPPERF_LATENCY_TRANS = 27, -}; - -enum a3xx_vfd_perfcounter_select { - VFD_PERF_UCHE_BYTE_FETCHED = 0, - VFD_PERF_UCHE_TRANS = 1, - VFD_PERF_VPC_BYPASS_COMPONENTS = 2, - VFD_PERF_FETCH_INSTRUCTIONS = 3, - VFD_PERF_DECODE_INSTRUCTIONS = 4, - VFD_PERF_ACTIVE_CYCLES = 5, - VFD_PERF_STALL_CYCLES_UCHE = 6, - VFD_PERF_STALL_CYCLES_HLSQ = 7, - VFD_PERF_STALL_CYCLES_VPC_BYPASS = 8, - VFD_PERF_STALL_CYCLES_VPC_ALLOC = 9, -}; - -enum a3xx_vpc_perfcounter_select { - VPC_PERF_SP_LM_PRIMITIVES = 0, - VPC_PERF_COMPONENTS_FROM_SP = 1, - VPC_PERF_SP_LM_COMPONENTS = 2, - VPC_PERF_ACTIVE_CYCLES = 3, - VPC_PERF_STALL_CYCLES_LM = 4, - VPC_PERF_STALL_CYCLES_RAS = 5, -}; - -enum a3xx_uche_perfcounter_select { - UCHE_UCHEPERF_VBIF_READ_BEATS_TP = 0, - UCHE_UCHEPERF_VBIF_READ_BEATS_VFD = 1, - UCHE_UCHEPERF_VBIF_READ_BEATS_HLSQ = 2, - UCHE_UCHEPERF_VBIF_READ_BEATS_MARB = 3, - UCHE_UCHEPERF_VBIF_READ_BEATS_SP = 4, - UCHE_UCHEPERF_READ_REQUESTS_TP = 8, - UCHE_UCHEPERF_READ_REQUESTS_VFD = 9, - UCHE_UCHEPERF_READ_REQUESTS_HLSQ = 10, - UCHE_UCHEPERF_READ_REQUESTS_MARB = 11, - UCHE_UCHEPERF_READ_REQUESTS_SP = 12, - UCHE_UCHEPERF_WRITE_REQUESTS_MARB = 13, - UCHE_UCHEPERF_WRITE_REQUESTS_SP = 14, - UCHE_UCHEPERF_TAG_CHECK_FAILS = 15, - UCHE_UCHEPERF_EVICTS = 16, - UCHE_UCHEPERF_FLUSHES = 17, - UCHE_UCHEPERF_VBIF_LATENCY_CYCLES = 18, - UCHE_UCHEPERF_VBIF_LATENCY_SAMPLES = 19, - UCHE_UCHEPERF_ACTIVE_CYCLES = 20, -}; - -enum a3xx_intp_mode { - SMOOTH = 0, - FLAT = 1, - ZERO = 2, - ONE = 3, -}; - -enum a3xx_repl_mode { - S = 1, - T = 2, - ONE_T = 3, -}; - -enum a3xx_tex_filter { - A3XX_TEX_NEAREST = 0, - A3XX_TEX_LINEAR = 1, - A3XX_TEX_ANISO = 2, -}; - -enum a3xx_tex_clamp { - A3XX_TEX_REPEAT = 0, - A3XX_TEX_CLAMP_TO_EDGE = 1, - A3XX_TEX_MIRROR_REPEAT = 2, - A3XX_TEX_CLAMP_TO_BORDER = 3, - A3XX_TEX_MIRROR_CLAMP = 4, -}; - -enum a3xx_tex_aniso { - A3XX_TEX_ANISO_1 = 0, - A3XX_TEX_ANISO_2 = 1, - A3XX_TEX_ANISO_4 = 2, - A3XX_TEX_ANISO_8 = 3, - A3XX_TEX_ANISO_16 = 4, -}; - -enum a3xx_tex_swiz { - A3XX_TEX_X = 0, - A3XX_TEX_Y = 1, - A3XX_TEX_Z = 2, - A3XX_TEX_W = 3, - A3XX_TEX_ZERO = 4, - A3XX_TEX_ONE = 5, -}; - -enum a3xx_tex_type { - A3XX_TEX_1D = 0, - A3XX_TEX_2D = 1, - A3XX_TEX_CUBE = 2, - A3XX_TEX_3D = 3, -}; - -enum a3xx_tex_msaa { - A3XX_TPL1_MSAA1X = 0, - A3XX_TPL1_MSAA2X = 1, - A3XX_TPL1_MSAA4X = 2, - A3XX_TPL1_MSAA8X = 3, -}; - -#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001 -#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002 -#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004 -#define A3XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008 -#define A3XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010 -#define A3XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020 -#define A3XX_INT0_VFD_ERROR 0x00000040 -#define A3XX_INT0_CP_SW_INT 0x00000080 -#define A3XX_INT0_CP_T0_PACKET_IN_IB 0x00000100 -#define A3XX_INT0_CP_OPCODE_ERROR 0x00000200 -#define A3XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400 -#define A3XX_INT0_CP_HW_FAULT 0x00000800 -#define A3XX_INT0_CP_DMA 0x00001000 -#define A3XX_INT0_CP_IB2_INT 0x00002000 -#define A3XX_INT0_CP_IB1_INT 0x00004000 -#define A3XX_INT0_CP_RB_INT 0x00008000 -#define A3XX_INT0_CP_REG_PROTECT_FAULT 0x00010000 -#define A3XX_INT0_CP_RB_DONE_TS 0x00020000 -#define A3XX_INT0_CP_VS_DONE_TS 0x00040000 -#define A3XX_INT0_CP_PS_DONE_TS 0x00080000 -#define A3XX_INT0_CACHE_FLUSH_TS 0x00100000 -#define A3XX_INT0_CP_AHB_ERROR_HALT 0x00200000 -#define A3XX_INT0_MISC_HANG_DETECT 0x01000000 -#define A3XX_INT0_UCHE_OOB_ACCESS 0x02000000 - -#define REG_A3XX_RBBM_HW_VERSION 0x00000000 - -#define REG_A3XX_RBBM_HW_RELEASE 0x00000001 - -#define REG_A3XX_RBBM_HW_CONFIGURATION 0x00000002 - -#define REG_A3XX_RBBM_CLOCK_CTL 0x00000010 - -#define REG_A3XX_RBBM_SP_HYST_CNT 0x00000012 - -#define REG_A3XX_RBBM_SW_RESET_CMD 0x00000018 - -#define REG_A3XX_RBBM_AHB_CTL0 0x00000020 - -#define REG_A3XX_RBBM_AHB_CTL1 0x00000021 - -#define REG_A3XX_RBBM_AHB_CMD 0x00000022 - -#define REG_A3XX_RBBM_AHB_ERROR_STATUS 0x00000027 - -#define REG_A3XX_RBBM_GPR0_CTL 0x0000002e - -#define REG_A3XX_RBBM_STATUS 0x00000030 -#define A3XX_RBBM_STATUS_HI_BUSY 0x00000001 -#define A3XX_RBBM_STATUS_CP_ME_BUSY 0x00000002 -#define A3XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004 -#define A3XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000 -#define A3XX_RBBM_STATUS_VBIF_BUSY 0x00008000 -#define A3XX_RBBM_STATUS_TSE_BUSY 0x00010000 -#define A3XX_RBBM_STATUS_RAS_BUSY 0x00020000 -#define A3XX_RBBM_STATUS_RB_BUSY 0x00040000 -#define A3XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000 -#define A3XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000 -#define A3XX_RBBM_STATUS_VFD_BUSY 0x00200000 -#define A3XX_RBBM_STATUS_VPC_BUSY 0x00400000 -#define A3XX_RBBM_STATUS_UCHE_BUSY 0x00800000 -#define A3XX_RBBM_STATUS_SP_BUSY 0x01000000 -#define A3XX_RBBM_STATUS_TPL1_BUSY 0x02000000 -#define A3XX_RBBM_STATUS_MARB_BUSY 0x04000000 -#define A3XX_RBBM_STATUS_VSC_BUSY 0x08000000 -#define A3XX_RBBM_STATUS_ARB_BUSY 0x10000000 -#define A3XX_RBBM_STATUS_HLSQ_BUSY 0x20000000 -#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000 -#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000 - -#define REG_A3XX_RBBM_NQWAIT_UNTIL 0x00000040 - -#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033 - -#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050 - -#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x00000051 - -#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x00000054 - -#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x00000057 - -#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a - -#define REG_A3XX_RBBM_INT_SET_CMD 0x00000060 -#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061 -#define REG_A3XX_RBBM_INT_0_MASK 0x00000063 -#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064 -#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080 -#define A3XX_RBBM_PERFCTR_CTL_ENABLE 0x00000001 - -#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081 - -#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD1 0x00000082 - -#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000084 - -#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000085 - -#define REG_A3XX_RBBM_PERFCOUNTER0_SELECT 0x00000086 - -#define REG_A3XX_RBBM_PERFCOUNTER1_SELECT 0x00000087 - -#define REG_A3XX_RBBM_GPU_BUSY_MASKED 0x00000088 - -#define REG_A3XX_RBBM_PERFCTR_CP_0_LO 0x00000090 - -#define REG_A3XX_RBBM_PERFCTR_CP_0_HI 0x00000091 - -#define REG_A3XX_RBBM_PERFCTR_RBBM_0_LO 0x00000092 - -#define REG_A3XX_RBBM_PERFCTR_RBBM_0_HI 0x00000093 - -#define REG_A3XX_RBBM_PERFCTR_RBBM_1_LO 0x00000094 - -#define REG_A3XX_RBBM_PERFCTR_RBBM_1_HI 0x00000095 - -#define REG_A3XX_RBBM_PERFCTR_PC_0_LO 0x00000096 - -#define REG_A3XX_RBBM_PERFCTR_PC_0_HI 0x00000097 - -#define REG_A3XX_RBBM_PERFCTR_PC_1_LO 0x00000098 - -#define REG_A3XX_RBBM_PERFCTR_PC_1_HI 0x00000099 - -#define REG_A3XX_RBBM_PERFCTR_PC_2_LO 0x0000009a - -#define REG_A3XX_RBBM_PERFCTR_PC_2_HI 0x0000009b - -#define REG_A3XX_RBBM_PERFCTR_PC_3_LO 0x0000009c - -#define REG_A3XX_RBBM_PERFCTR_PC_3_HI 0x0000009d - -#define REG_A3XX_RBBM_PERFCTR_VFD_0_LO 0x0000009e - -#define REG_A3XX_RBBM_PERFCTR_VFD_0_HI 0x0000009f - -#define REG_A3XX_RBBM_PERFCTR_VFD_1_LO 0x000000a0 - -#define REG_A3XX_RBBM_PERFCTR_VFD_1_HI 0x000000a1 - -#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000a2 - -#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000a3 - -#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000a4 - -#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000a5 - -#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000a6 - -#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000a7 - -#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000a8 - -#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000a9 - -#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000aa - -#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000ab - -#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000ac - -#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000ad - -#define REG_A3XX_RBBM_PERFCTR_VPC_0_LO 0x000000ae - -#define REG_A3XX_RBBM_PERFCTR_VPC_0_HI 0x000000af - -#define REG_A3XX_RBBM_PERFCTR_VPC_1_LO 0x000000b0 - -#define REG_A3XX_RBBM_PERFCTR_VPC_1_HI 0x000000b1 - -#define REG_A3XX_RBBM_PERFCTR_TSE_0_LO 0x000000b2 - -#define REG_A3XX_RBBM_PERFCTR_TSE_0_HI 0x000000b3 - -#define REG_A3XX_RBBM_PERFCTR_TSE_1_LO 0x000000b4 - -#define REG_A3XX_RBBM_PERFCTR_TSE_1_HI 0x000000b5 - -#define REG_A3XX_RBBM_PERFCTR_RAS_0_LO 0x000000b6 - -#define REG_A3XX_RBBM_PERFCTR_RAS_0_HI 0x000000b7 - -#define REG_A3XX_RBBM_PERFCTR_RAS_1_LO 0x000000b8 - -#define REG_A3XX_RBBM_PERFCTR_RAS_1_HI 0x000000b9 - -#define REG_A3XX_RBBM_PERFCTR_UCHE_0_LO 0x000000ba - -#define REG_A3XX_RBBM_PERFCTR_UCHE_0_HI 0x000000bb - -#define REG_A3XX_RBBM_PERFCTR_UCHE_1_LO 0x000000bc - -#define REG_A3XX_RBBM_PERFCTR_UCHE_1_HI 0x000000bd - -#define REG_A3XX_RBBM_PERFCTR_UCHE_2_LO 0x000000be - -#define REG_A3XX_RBBM_PERFCTR_UCHE_2_HI 0x000000bf - -#define REG_A3XX_RBBM_PERFCTR_UCHE_3_LO 0x000000c0 - -#define REG_A3XX_RBBM_PERFCTR_UCHE_3_HI 0x000000c1 - -#define REG_A3XX_RBBM_PERFCTR_UCHE_4_LO 0x000000c2 - -#define REG_A3XX_RBBM_PERFCTR_UCHE_4_HI 0x000000c3 - -#define REG_A3XX_RBBM_PERFCTR_UCHE_5_LO 0x000000c4 - -#define REG_A3XX_RBBM_PERFCTR_UCHE_5_HI 0x000000c5 - -#define REG_A3XX_RBBM_PERFCTR_TP_0_LO 0x000000c6 - -#define REG_A3XX_RBBM_PERFCTR_TP_0_HI 0x000000c7 - -#define REG_A3XX_RBBM_PERFCTR_TP_1_LO 0x000000c8 - -#define REG_A3XX_RBBM_PERFCTR_TP_1_HI 0x000000c9 - -#define REG_A3XX_RBBM_PERFCTR_TP_2_LO 0x000000ca - -#define REG_A3XX_RBBM_PERFCTR_TP_2_HI 0x000000cb - -#define REG_A3XX_RBBM_PERFCTR_TP_3_LO 0x000000cc - -#define REG_A3XX_RBBM_PERFCTR_TP_3_HI 0x000000cd - -#define REG_A3XX_RBBM_PERFCTR_TP_4_LO 0x000000ce - -#define REG_A3XX_RBBM_PERFCTR_TP_4_HI 0x000000cf - -#define REG_A3XX_RBBM_PERFCTR_TP_5_LO 0x000000d0 - -#define REG_A3XX_RBBM_PERFCTR_TP_5_HI 0x000000d1 - -#define REG_A3XX_RBBM_PERFCTR_SP_0_LO 0x000000d2 - -#define REG_A3XX_RBBM_PERFCTR_SP_0_HI 0x000000d3 - -#define REG_A3XX_RBBM_PERFCTR_SP_1_LO 0x000000d4 - -#define REG_A3XX_RBBM_PERFCTR_SP_1_HI 0x000000d5 - -#define REG_A3XX_RBBM_PERFCTR_SP_2_LO 0x000000d6 - -#define REG_A3XX_RBBM_PERFCTR_SP_2_HI 0x000000d7 - -#define REG_A3XX_RBBM_PERFCTR_SP_3_LO 0x000000d8 - -#define REG_A3XX_RBBM_PERFCTR_SP_3_HI 0x000000d9 - -#define REG_A3XX_RBBM_PERFCTR_SP_4_LO 0x000000da - -#define REG_A3XX_RBBM_PERFCTR_SP_4_HI 0x000000db - -#define REG_A3XX_RBBM_PERFCTR_SP_5_LO 0x000000dc - -#define REG_A3XX_RBBM_PERFCTR_SP_5_HI 0x000000dd - -#define REG_A3XX_RBBM_PERFCTR_SP_6_LO 0x000000de - -#define REG_A3XX_RBBM_PERFCTR_SP_6_HI 0x000000df - -#define REG_A3XX_RBBM_PERFCTR_SP_7_LO 0x000000e0 - -#define REG_A3XX_RBBM_PERFCTR_SP_7_HI 0x000000e1 - -#define REG_A3XX_RBBM_PERFCTR_RB_0_LO 0x000000e2 - -#define REG_A3XX_RBBM_PERFCTR_RB_0_HI 0x000000e3 - -#define REG_A3XX_RBBM_PERFCTR_RB_1_LO 0x000000e4 - -#define REG_A3XX_RBBM_PERFCTR_RB_1_HI 0x000000e5 - -#define REG_A3XX_RBBM_PERFCTR_PWR_0_LO 0x000000ea - -#define REG_A3XX_RBBM_PERFCTR_PWR_0_HI 0x000000eb - -#define REG_A3XX_RBBM_PERFCTR_PWR_1_LO 0x000000ec - -#define REG_A3XX_RBBM_PERFCTR_PWR_1_HI 0x000000ed - -#define REG_A3XX_RBBM_RBBM_CTL 0x00000100 - -#define REG_A3XX_RBBM_DEBUG_BUS_CTL 0x00000111 - -#define REG_A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x00000112 - -#define REG_A3XX_CP_PFP_UCODE_ADDR 0x000001c9 - -#define REG_A3XX_CP_PFP_UCODE_DATA 0x000001ca - -#define REG_A3XX_CP_ROQ_ADDR 0x000001cc - -#define REG_A3XX_CP_ROQ_DATA 0x000001cd - -#define REG_A3XX_CP_MERCIU_ADDR 0x000001d1 - -#define REG_A3XX_CP_MERCIU_DATA 0x000001d2 - -#define REG_A3XX_CP_MERCIU_DATA2 0x000001d3 - -#define REG_A3XX_CP_MEQ_ADDR 0x000001da - -#define REG_A3XX_CP_MEQ_DATA 0x000001db - -#define REG_A3XX_CP_WFI_PEND_CTR 0x000001f5 - -#define REG_A3XX_RBBM_PM_OVERRIDE2 0x0000039d - -#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445 - -#define REG_A3XX_CP_HW_FAULT 0x0000045c - -#define REG_A3XX_CP_PROTECT_CTRL 0x0000045e - -#define REG_A3XX_CP_PROTECT_STATUS 0x0000045f - -#define REG_A3XX_CP_PROTECT(i0) (0x00000460 + 0x1*(i0)) - -static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 + 0x1*i0; } - -#define REG_A3XX_CP_AHB_FAULT 0x0000054d - -#define REG_A3XX_SQ_GPR_MANAGEMENT 0x00000d00 - -#define REG_A3XX_SQ_INST_STORE_MANAGMENT 0x00000d02 - -#define REG_A3XX_TP0_CHICKEN 0x00000e1e - -#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22 - -#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23 - -#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040 -#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000 -#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTER 0x00002000 -#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTROID 0x00004000 -#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTROID 0x00008000 -#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000 -#define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000 -#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000 -#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000 -#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000 -#define A3XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000 -#define A3XX_GRAS_CL_CLIP_CNTL_ZCOORD 0x00800000 -#define A3XX_GRAS_CL_CLIP_CNTL_WCOORD 0x01000000 -#define A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE 0x02000000 -#define A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__MASK 0x1c000000 -#define A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__SHIFT 26 -static inline uint32_t A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES(uint32_t val) -{ - return ((val) << A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__SHIFT) & A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__MASK; -} - -#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044 -#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff -#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0 -static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val) -{ - return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK; -} -#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00 -#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10 -static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val) -{ - return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK; -} - -#define REG_A3XX_GRAS_CL_VPORT_XOFFSET 0x00002048 -#define A3XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff -#define A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0 -static inline uint32_t A3XX_GRAS_CL_VPORT_XOFFSET(float val) -{ - return ((fui(val)) << A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_XOFFSET__MASK; -} - -#define REG_A3XX_GRAS_CL_VPORT_XSCALE 0x00002049 -#define A3XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff -#define A3XX_GRAS_CL_VPORT_XSCALE__SHIFT 0 -static inline uint32_t A3XX_GRAS_CL_VPORT_XSCALE(float val) -{ - return ((fui(val)) << A3XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_XSCALE__MASK; -} - -#define REG_A3XX_GRAS_CL_VPORT_YOFFSET 0x0000204a -#define A3XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff -#define A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0 -static inline uint32_t A3XX_GRAS_CL_VPORT_YOFFSET(float val) -{ - return ((fui(val)) << A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_YOFFSET__MASK; -} - -#define REG_A3XX_GRAS_CL_VPORT_YSCALE 0x0000204b -#define A3XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff -#define A3XX_GRAS_CL_VPORT_YSCALE__SHIFT 0 -static inline uint32_t A3XX_GRAS_CL_VPORT_YSCALE(float val) -{ - return ((fui(val)) << A3XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_YSCALE__MASK; -} - -#define REG_A3XX_GRAS_CL_VPORT_ZOFFSET 0x0000204c -#define A3XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff -#define A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0 -static inline uint32_t A3XX_GRAS_CL_VPORT_ZOFFSET(float val) -{ - return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_ZOFFSET__MASK; -} - -#define REG_A3XX_GRAS_CL_VPORT_ZSCALE 0x0000204d -#define A3XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff -#define A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0 -static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val) -{ - return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_ZSCALE__MASK; -} - -#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068 -#define A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff -#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0 -static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val) -{ - return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK; -} -#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000 -#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16 -static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val) -{ - return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK; -} - -#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069 -#define A3XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff -#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0 -static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val) -{ - return ((((int32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK; -} - -#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c -#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff -#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0 -static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val) -{ - return ((((int32_t)(val * 1048576.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK; -} - -#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d -#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff -#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0 -static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) -{ - return ((((int32_t)(val * 64.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; -} - -#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070 -#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001 -#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002 -#define A3XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004 -#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8 -#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3 -static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val) -{ - return ((((int32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK; -} -#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800 - -#define REG_A3XX_GRAS_SC_CONTROL 0x00002072 -#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x000000f0 -#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 4 -static inline uint32_t A3XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val) -{ - return ((val) << A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK; -} -#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000f00 -#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 8 -static inline uint32_t A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK; -} -#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000 -#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12 -static inline uint32_t A3XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val) -{ - return ((val) << A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK; -} - -#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x00002074 -#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 -#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff -#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0 -static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val) -{ - return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK; -} -#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000 -#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16 -static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val) -{ - return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK; -} - -#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x00002075 -#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 -#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff -#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0 -static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val) -{ - return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK; -} -#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000 -#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16 -static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val) -{ - return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK; -} - -#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x00002079 -#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 -#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff -#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0 -static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val) -{ - return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK; -} -#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000 -#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16 -static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val) -{ - return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK; -} - -#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000207a -#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 -#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff -#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0 -static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val) -{ - return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK; -} -#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000 -#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16 -static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val) -{ - return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK; -} - -#define REG_A3XX_RB_MODE_CONTROL 0x000020c0 -#define A3XX_RB_MODE_CONTROL_GMEM_BYPASS 0x00000080 -#define A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK 0x00000700 -#define A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT 8 -static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode val) -{ - return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK; -} -#define A3XX_RB_MODE_CONTROL_MRT__MASK 0x00003000 -#define A3XX_RB_MODE_CONTROL_MRT__SHIFT 12 -static inline uint32_t A3XX_RB_MODE_CONTROL_MRT(uint32_t val) -{ - return ((val) << A3XX_RB_MODE_CONTROL_MRT__SHIFT) & A3XX_RB_MODE_CONTROL_MRT__MASK; -} -#define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000 -#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000 - -#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1 -#define A3XX_RB_RENDER_CONTROL_DUAL_COLOR_IN_ENABLE 0x00000001 -#define A3XX_RB_RENDER_CONTROL_YUV_IN_ENABLE 0x00000002 -#define A3XX_RB_RENDER_CONTROL_COV_VALUE_INPUT_ENABLE 0x00000004 -#define A3XX_RB_RENDER_CONTROL_FACENESS 0x00000008 -#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0 -#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4 -static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK; -} -#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000 -#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000 -#define A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK 0x0003c000 -#define A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT 14 -static inline uint32_t A3XX_RB_RENDER_CONTROL_COORD_MASK(uint32_t val) -{ - return ((val) << A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT) & A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK; -} -#define A3XX_RB_RENDER_CONTROL_I_CLAMP_ENABLE 0x00080000 -#define A3XX_RB_RENDER_CONTROL_COV_VALUE_OUTPUT_ENABLE 0x00100000 -#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000 -#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000 -#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24 -static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val) -{ - return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK; -} -#define A3XX_RB_RENDER_CONTROL_ALPHA_TO_COVERAGE 0x40000000 -#define A3XX_RB_RENDER_CONTROL_ALPHA_TO_ONE 0x80000000 - -#define REG_A3XX_RB_MSAA_CONTROL 0x000020c2 -#define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400 -#define A3XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000f000 -#define A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 12 -static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLES__MASK; -} -#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK 0xffff0000 -#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT 16 -static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val) -{ - return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK; -} - -#define REG_A3XX_RB_ALPHA_REF 0x000020c3 -#define A3XX_RB_ALPHA_REF_UINT__MASK 0x0000ff00 -#define A3XX_RB_ALPHA_REF_UINT__SHIFT 8 -static inline uint32_t A3XX_RB_ALPHA_REF_UINT(uint32_t val) -{ - return ((val) << A3XX_RB_ALPHA_REF_UINT__SHIFT) & A3XX_RB_ALPHA_REF_UINT__MASK; -} -#define A3XX_RB_ALPHA_REF_FLOAT__MASK 0xffff0000 -#define A3XX_RB_ALPHA_REF_FLOAT__SHIFT 16 -static inline uint32_t A3XX_RB_ALPHA_REF_FLOAT(float val) -{ - return ((_mesa_float_to_half(val)) << A3XX_RB_ALPHA_REF_FLOAT__SHIFT) & A3XX_RB_ALPHA_REF_FLOAT__MASK; -} - -#define REG_A3XX_RB_MRT(i0) (0x000020c4 + 0x4*(i0)) - -static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 + 0x4*i0; } -#define A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008 -#define A3XX_RB_MRT_CONTROL_BLEND 0x00000010 -#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020 -#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00 -#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8 -static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val) -{ - return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK; -} -#define A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK 0x00003000 -#define A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT 12 -static inline uint32_t A3XX_RB_MRT_CONTROL_DITHER_MODE(enum adreno_rb_dither_mode val) -{ - return ((val) << A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT) & A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK; -} -#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000 -#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24 -static inline uint32_t A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val) -{ - return ((val) << A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK; -} - -static inline uint32_t REG_A3XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020c5 + 0x4*i0; } -#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f -#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0 -static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a3xx_color_fmt val) -{ - return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK; -} -#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0 -#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6 -static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a3xx_tile_mode val) -{ - return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK; -} -#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00000c00 -#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 10 -static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; -} -#define A3XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00004000 -#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000 -#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17 -static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK; -} - -static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6 + 0x4*i0; } -#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK 0xfffffff0 -#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT 4 -static inline uint32_t A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK; -} - -static inline uint32_t REG_A3XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020c7 + 0x4*i0; } -#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f -#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0 -static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK; -} -#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0 -#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5 -static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) -{ - return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK; -} -#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00 -#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8 -static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK; -} -#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000 -#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16 -static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK; -} -#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000 -#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21 -static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) -{ - return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK; -} -#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000 -#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24 -static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK; -} -#define A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE 0x20000000 - -#define REG_A3XX_RB_BLEND_RED 0x000020e4 -#define A3XX_RB_BLEND_RED_UINT__MASK 0x000000ff -#define A3XX_RB_BLEND_RED_UINT__SHIFT 0 -static inline uint32_t A3XX_RB_BLEND_RED_UINT(uint32_t val) -{ - return ((val) << A3XX_RB_BLEND_RED_UINT__SHIFT) & A3XX_RB_BLEND_RED_UINT__MASK; -} -#define A3XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000 -#define A3XX_RB_BLEND_RED_FLOAT__SHIFT 16 -static inline uint32_t A3XX_RB_BLEND_RED_FLOAT(float val) -{ - return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_RED_FLOAT__SHIFT) & A3XX_RB_BLEND_RED_FLOAT__MASK; -} - -#define REG_A3XX_RB_BLEND_GREEN 0x000020e5 -#define A3XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff -#define A3XX_RB_BLEND_GREEN_UINT__SHIFT 0 -static inline uint32_t A3XX_RB_BLEND_GREEN_UINT(uint32_t val) -{ - return ((val) << A3XX_RB_BLEND_GREEN_UINT__SHIFT) & A3XX_RB_BLEND_GREEN_UINT__MASK; -} -#define A3XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000 -#define A3XX_RB_BLEND_GREEN_FLOAT__SHIFT 16 -static inline uint32_t A3XX_RB_BLEND_GREEN_FLOAT(float val) -{ - return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A3XX_RB_BLEND_GREEN_FLOAT__MASK; -} - -#define REG_A3XX_RB_BLEND_BLUE 0x000020e6 -#define A3XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff -#define A3XX_RB_BLEND_BLUE_UINT__SHIFT 0 -static inline uint32_t A3XX_RB_BLEND_BLUE_UINT(uint32_t val) -{ - return ((val) << A3XX_RB_BLEND_BLUE_UINT__SHIFT) & A3XX_RB_BLEND_BLUE_UINT__MASK; -} -#define A3XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000 -#define A3XX_RB_BLEND_BLUE_FLOAT__SHIFT 16 -static inline uint32_t A3XX_RB_BLEND_BLUE_FLOAT(float val) -{ - return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A3XX_RB_BLEND_BLUE_FLOAT__MASK; -} - -#define REG_A3XX_RB_BLEND_ALPHA 0x000020e7 -#define A3XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff -#define A3XX_RB_BLEND_ALPHA_UINT__SHIFT 0 -static inline uint32_t A3XX_RB_BLEND_ALPHA_UINT(uint32_t val) -{ - return ((val) << A3XX_RB_BLEND_ALPHA_UINT__SHIFT) & A3XX_RB_BLEND_ALPHA_UINT__MASK; -} -#define A3XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000 -#define A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16 -static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val) -{ - return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK; -} - -#define REG_A3XX_RB_CLEAR_COLOR_DW0 0x000020e8 - -#define REG_A3XX_RB_CLEAR_COLOR_DW1 0x000020e9 - -#define REG_A3XX_RB_CLEAR_COLOR_DW2 0x000020ea - -#define REG_A3XX_RB_CLEAR_COLOR_DW3 0x000020eb - -#define REG_A3XX_RB_COPY_CONTROL 0x000020ec -#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003 -#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0 -static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val) -{ - return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK; -} -#define A3XX_RB_COPY_CONTROL_DEPTHCLEAR 0x00000008 -#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070 -#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4 -static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val) -{ - return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK; -} -#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE 0x00000080 -#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00 -#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8 -static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val) -{ - return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK; -} -#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE 0x00001000 -#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000 -#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14 -static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val) -{ - assert(!(val & 0x3fff)); - return (((val >> 14)) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK; -} - -#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed -#define A3XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0 -#define A3XX_RB_COPY_DEST_BASE_BASE__SHIFT 4 -static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK; -} - -#define REG_A3XX_RB_COPY_DEST_PITCH 0x000020ee -#define A3XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff -#define A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0 -static inline uint32_t A3XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK; -} - -#define REG_A3XX_RB_COPY_DEST_INFO 0x000020ef -#define A3XX_RB_COPY_DEST_INFO_TILE__MASK 0x00000003 -#define A3XX_RB_COPY_DEST_INFO_TILE__SHIFT 0 -static inline uint32_t A3XX_RB_COPY_DEST_INFO_TILE(enum a3xx_tile_mode val) -{ - return ((val) << A3XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A3XX_RB_COPY_DEST_INFO_TILE__MASK; -} -#define A3XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc -#define A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2 -static inline uint32_t A3XX_RB_COPY_DEST_INFO_FORMAT(enum a3xx_color_fmt val) -{ - return ((val) << A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A3XX_RB_COPY_DEST_INFO_FORMAT__MASK; -} -#define A3XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300 -#define A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8 -static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK; -} -#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00 -#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10 -static inline uint32_t A3XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val) -{ - return ((val) << A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK; -} -#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000 -#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14 -static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val) -{ - return ((val) << A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK; -} -#define A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000 -#define A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18 -static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val) -{ - return ((val) << A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK; -} - -#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100 -#define A3XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001 -#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x00000002 -#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004 -#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008 -#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070 -#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4 -static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val) -{ - return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK; -} -#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080 -#define A3XX_RB_DEPTH_CONTROL_Z_READ_ENABLE 0x80000000 - -#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101 - -#define REG_A3XX_RB_DEPTH_INFO 0x00002102 -#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003 -#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0 -static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val) -{ - return ((val) << A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK; -} -#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff800 -#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11 -static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK; -} - -#define REG_A3XX_RB_DEPTH_PITCH 0x00002103 -#define A3XX_RB_DEPTH_PITCH__MASK 0xffffffff -#define A3XX_RB_DEPTH_PITCH__SHIFT 0 -static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val) -{ - assert(!(val & 0x7)); - return (((val >> 3)) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK; -} - -#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104 -#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001 -#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002 -#define A3XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004 -#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700 -#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8 -static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val) -{ - return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC__MASK; -} -#define A3XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800 -#define A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11 -static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val) -{ - return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL__MASK; -} -#define A3XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000 -#define A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14 -static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val) -{ - return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS__MASK; -} -#define A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000 -#define A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17 -static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val) -{ - return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK; -} -#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000 -#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20 -static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val) -{ - return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK; -} -#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000 -#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23 -static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val) -{ - return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK; -} -#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000 -#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26 -static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val) -{ - return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK; -} -#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000 -#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29 -static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val) -{ - return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK; -} - -#define REG_A3XX_RB_STENCIL_CLEAR 0x00002105 - -#define REG_A3XX_RB_STENCIL_INFO 0x00002106 -#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff800 -#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 11 -static inline uint32_t A3XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK; -} - -#define REG_A3XX_RB_STENCIL_PITCH 0x00002107 -#define A3XX_RB_STENCIL_PITCH__MASK 0xffffffff -#define A3XX_RB_STENCIL_PITCH__SHIFT 0 -static inline uint32_t A3XX_RB_STENCIL_PITCH(uint32_t val) -{ - assert(!(val & 0x7)); - return (((val >> 3)) << A3XX_RB_STENCIL_PITCH__SHIFT) & A3XX_RB_STENCIL_PITCH__MASK; -} - -#define REG_A3XX_RB_STENCILREFMASK 0x00002108 -#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff -#define A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0 -static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILREF(uint32_t val) -{ - return ((val) << A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILREF__MASK; -} -#define A3XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00 -#define A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8 -static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val) -{ - return ((val) << A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILMASK__MASK; -} -#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000 -#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16 -static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val) -{ - return ((val) << A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK; -} - -#define REG_A3XX_RB_STENCILREFMASK_BF 0x00002109 -#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff -#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0 -static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val) -{ - return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK; -} -#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00 -#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8 -static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val) -{ - return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK; -} -#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000 -#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16 -static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val) -{ - return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK; -} - -#define REG_A3XX_RB_LRZ_VSC_CONTROL 0x0000210c -#define A3XX_RB_LRZ_VSC_CONTROL_BINNING_ENABLE 0x00000002 - -#define REG_A3XX_RB_WINDOW_OFFSET 0x0000210e -#define A3XX_RB_WINDOW_OFFSET_X__MASK 0x0000ffff -#define A3XX_RB_WINDOW_OFFSET_X__SHIFT 0 -static inline uint32_t A3XX_RB_WINDOW_OFFSET_X(uint32_t val) -{ - return ((val) << A3XX_RB_WINDOW_OFFSET_X__SHIFT) & A3XX_RB_WINDOW_OFFSET_X__MASK; -} -#define A3XX_RB_WINDOW_OFFSET_Y__MASK 0xffff0000 -#define A3XX_RB_WINDOW_OFFSET_Y__SHIFT 16 -static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val) -{ - return ((val) << A3XX_RB_WINDOW_OFFSET_Y__SHIFT) & A3XX_RB_WINDOW_OFFSET_Y__MASK; -} - -#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110 -#define A3XX_RB_SAMPLE_COUNT_CONTROL_RESET 0x00000001 -#define A3XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002 - -#define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111 - -#define REG_A3XX_RB_Z_CLAMP_MIN 0x00002114 - -#define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115 - -#define REG_A3XX_VGT_BIN_BASE 0x000021e1 - -#define REG_A3XX_VGT_BIN_SIZE 0x000021e2 - -#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4 -#define A3XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000 -#define A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16 -static inline uint32_t A3XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val) -{ - return ((val) << A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A3XX_PC_VSTREAM_CONTROL_SIZE__MASK; -} -#define A3XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000 -#define A3XX_PC_VSTREAM_CONTROL_N__SHIFT 22 -static inline uint32_t A3XX_PC_VSTREAM_CONTROL_N(uint32_t val) -{ - return ((val) << A3XX_PC_VSTREAM_CONTROL_N__SHIFT) & A3XX_PC_VSTREAM_CONTROL_N__MASK; -} - -#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea - -#define REG_A3XX_PC_PRIM_VTX_CNTL 0x000021ec -#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK 0x0000001f -#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT 0 -static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(uint32_t val) -{ - return ((val) << A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK; -} -#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x000000e0 -#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 5 -static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val) -{ - return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK; -} -#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000700 -#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT 8 -static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val) -{ - return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK; -} -#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_ENABLE 0x00001000 -#define A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000 -#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000 -#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000 - -#define REG_A3XX_PC_RESTART_INDEX 0x000021ed - -#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200 -#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000030 -#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4 -static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK; -} -#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040 -#define A3XX_HLSQ_CONTROL_0_REG_COMPUTEMODE 0x00000100 -#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200 -#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400 -#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK 0x00fff000 -#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT 12 -static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK; -} -#define A3XX_HLSQ_CONTROL_0_REG_FSONLYTEX 0x02000000 -#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000 -#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000 -#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27 -static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK; -} -#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000 -#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000 -#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000 -#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000 - -#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201 -#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x000000c0 -#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6 -static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK; -} -#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100 -#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK 0x00ff0000 -#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT 16 -static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK; -} -#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK 0xff000000 -#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT 24 -static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK; -} - -#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202 -#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK 0x000003fc -#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT 2 -static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK; -} -#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK 0x03fc0000 -#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT 18 -static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK; -} -#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000 -#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26 -static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK; -} - -#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203 -#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK 0x000000ff -#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT 0 -static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK; -} -#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK 0x0000ff00 -#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT 8 -static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK; -} -#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK 0x00ff0000 -#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT 16 -static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK; -} -#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK 0xff000000 -#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT 24 -static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK; -} - -#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204 -#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff -#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0 -static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val) -{ - return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK; -} -#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000 -#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12 -static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val) -{ - return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK; -} -#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 -#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24 -static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val) -{ - return ((val) << A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK; -} - -#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205 -#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff -#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0 -static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val) -{ - return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK; -} -#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000 -#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12 -static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val) -{ - return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK; -} -#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 -#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24 -static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val) -{ - return ((val) << A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK; -} - -#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206 -#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff -#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0 -static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK; -} -#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000 -#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16 -static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK; -} - -#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207 -#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff -#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0 -static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK; -} -#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000 -#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16 -static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK; -} - -#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a -#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK 0x00000003 -#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT 0 -static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK; -} -#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK 0x00000ffc -#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT 2 -static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK; -} -#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK 0x003ff000 -#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT 12 -static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK; -} -#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK 0xffc00000 -#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT 22 -static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2(uint32_t val) -{ - return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK; -} - -#define REG_A3XX_HLSQ_CL_GLOBAL_WORK(i0) (0x0000220b + 0x2*(i0)) - -static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_SIZE(uint32_t i0) { return 0x0000220b + 0x2*i0; } - -static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_OFFSET(uint32_t i0) { return 0x0000220c + 0x2*i0; } - -#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211 - -#define REG_A3XX_HLSQ_CL_CONTROL_1_REG 0x00002212 - -#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214 - -#define REG_A3XX_HLSQ_CL_KERNEL_GROUP(i0) (0x00002215 + 0x1*(i0)) - -static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP_RATIO(uint32_t i0) { return 0x00002215 + 0x1*i0; } - -#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216 - -#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217 - -#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a - -#define REG_A3XX_VFD_CONTROL_0 0x00002240 -#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x0003ffff -#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0 -static inline uint32_t A3XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val) -{ - return ((val) << A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK; -} -#define A3XX_VFD_CONTROL_0_PACKETSIZE__MASK 0x003c0000 -#define A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT 18 -static inline uint32_t A3XX_VFD_CONTROL_0_PACKETSIZE(uint32_t val) -{ - return ((val) << A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT) & A3XX_VFD_CONTROL_0_PACKETSIZE__MASK; -} -#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x07c00000 -#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 22 -static inline uint32_t A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val) -{ - return ((val) << A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK; -} -#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xf8000000 -#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 27 -static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val) -{ - return ((val) << A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK; -} - -#define REG_A3XX_VFD_CONTROL_1 0x00002241 -#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000000f -#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0 -static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val) -{ - return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK; -} -#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK 0x000000f0 -#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT 4 -static inline uint32_t A3XX_VFD_CONTROL_1_MAXTHRESHOLD(uint32_t val) -{ - return ((val) << A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK; -} -#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK 0x00000f00 -#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT 8 -static inline uint32_t A3XX_VFD_CONTROL_1_MINTHRESHOLD(uint32_t val) -{ - return ((val) << A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK; -} -#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000 -#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16 -static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val) -{ - return ((val) << A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A3XX_VFD_CONTROL_1_REGID4VTX__MASK; -} -#define A3XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000 -#define A3XX_VFD_CONTROL_1_REGID4INST__SHIFT 24 -static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val) -{ - return ((val) << A3XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A3XX_VFD_CONTROL_1_REGID4INST__MASK; -} - -#define REG_A3XX_VFD_INDEX_MIN 0x00002242 - -#define REG_A3XX_VFD_INDEX_MAX 0x00002243 - -#define REG_A3XX_VFD_INSTANCEID_OFFSET 0x00002244 - -#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245 - -#define REG_A3XX_VFD_FETCH(i0) (0x00002246 + 0x2*(i0)) - -static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; } -#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f -#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0 -static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val) -{ - return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK; -} -#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0000ff80 -#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7 -static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val) -{ - return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK; -} -#define A3XX_VFD_FETCH_INSTR_0_INSTANCED 0x00010000 -#define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000 -#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000 -#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18 -static inline uint32_t A3XX_VFD_FETCH_INSTR_0_INDEXCODE(uint32_t val) -{ - return ((val) << A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK; -} -#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000 -#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24 -static inline uint32_t A3XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val) -{ - return ((val) << A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK; -} - -static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x00002247 + 0x2*i0; } - -#define REG_A3XX_VFD_DECODE(i0) (0x00002266 + 0x1*(i0)) - -static inline uint32_t REG_A3XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x00002266 + 0x1*i0; } -#define A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f -#define A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0 -static inline uint32_t A3XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val) -{ - return ((val) << A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK; -} -#define A3XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010 -#define A3XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0 -#define A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6 -static inline uint32_t A3XX_VFD_DECODE_INSTR_FORMAT(enum a3xx_vtx_fmt val) -{ - return ((val) << A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A3XX_VFD_DECODE_INSTR_FORMAT__MASK; -} -#define A3XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000 -#define A3XX_VFD_DECODE_INSTR_REGID__SHIFT 12 -static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val) -{ - return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK; -} -#define A3XX_VFD_DECODE_INSTR_INT 0x00100000 -#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000 -#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22 -static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A3XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A3XX_VFD_DECODE_INSTR_SWAP__MASK; -} -#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000 -#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24 -static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val) -{ - return ((val) << A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK; -} -#define A3XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000 -#define A3XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000 - -#define REG_A3XX_VFD_VS_THREADING_THRESHOLD 0x0000227e -#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK 0x0000000f -#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT 0 -static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD(uint32_t val) -{ - return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK; -} -#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK 0x0000ff00 -#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT 8 -static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val) -{ - return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK; -} - -#define REG_A3XX_VPC_ATTR 0x00002280 -#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff -#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0 -static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val) -{ - return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK; -} -#define A3XX_VPC_ATTR_PSIZE 0x00000200 -#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000 -#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12 -static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val) -{ - return ((val) << A3XX_VPC_ATTR_THRDASSIGN__SHIFT) & A3XX_VPC_ATTR_THRDASSIGN__MASK; -} -#define A3XX_VPC_ATTR_LMSIZE__MASK 0xf0000000 -#define A3XX_VPC_ATTR_LMSIZE__SHIFT 28 -static inline uint32_t A3XX_VPC_ATTR_LMSIZE(uint32_t val) -{ - return ((val) << A3XX_VPC_ATTR_LMSIZE__SHIFT) & A3XX_VPC_ATTR_LMSIZE__MASK; -} - -#define REG_A3XX_VPC_PACK 0x00002281 -#define A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00 -#define A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8 -static inline uint32_t A3XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val) -{ - return ((val) << A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK; -} -#define A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000 -#define A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16 -static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val) -{ - return ((val) << A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK; -} - -#define REG_A3XX_VPC_VARYING_INTERP(i0) (0x00002282 + 0x1*(i0)) - -static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; } -#define A3XX_VPC_VARYING_INTERP_MODE_C0__MASK 0x00000003 -#define A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT 0 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C0(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C0__MASK; -} -#define A3XX_VPC_VARYING_INTERP_MODE_C1__MASK 0x0000000c -#define A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT 2 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C1(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C1__MASK; -} -#define A3XX_VPC_VARYING_INTERP_MODE_C2__MASK 0x00000030 -#define A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT 4 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C2(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C2__MASK; -} -#define A3XX_VPC_VARYING_INTERP_MODE_C3__MASK 0x000000c0 -#define A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT 6 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C3(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C3__MASK; -} -#define A3XX_VPC_VARYING_INTERP_MODE_C4__MASK 0x00000300 -#define A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT 8 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C4(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C4__MASK; -} -#define A3XX_VPC_VARYING_INTERP_MODE_C5__MASK 0x00000c00 -#define A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT 10 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C5(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C5__MASK; -} -#define A3XX_VPC_VARYING_INTERP_MODE_C6__MASK 0x00003000 -#define A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT 12 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C6(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C6__MASK; -} -#define A3XX_VPC_VARYING_INTERP_MODE_C7__MASK 0x0000c000 -#define A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT 14 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C7(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C7__MASK; -} -#define A3XX_VPC_VARYING_INTERP_MODE_C8__MASK 0x00030000 -#define A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT 16 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C8(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C8__MASK; -} -#define A3XX_VPC_VARYING_INTERP_MODE_C9__MASK 0x000c0000 -#define A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT 18 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C9(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C9__MASK; -} -#define A3XX_VPC_VARYING_INTERP_MODE_CA__MASK 0x00300000 -#define A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT 20 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CA(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CA__MASK; -} -#define A3XX_VPC_VARYING_INTERP_MODE_CB__MASK 0x00c00000 -#define A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT 22 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CB(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CB__MASK; -} -#define A3XX_VPC_VARYING_INTERP_MODE_CC__MASK 0x03000000 -#define A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT 24 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CC(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CC__MASK; -} -#define A3XX_VPC_VARYING_INTERP_MODE_CD__MASK 0x0c000000 -#define A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT 26 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CD(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CD__MASK; -} -#define A3XX_VPC_VARYING_INTERP_MODE_CE__MASK 0x30000000 -#define A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT 28 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CE(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CE__MASK; -} -#define A3XX_VPC_VARYING_INTERP_MODE_CF__MASK 0xc0000000 -#define A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT 30 -static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CF(enum a3xx_intp_mode val) -{ - return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CF__MASK; -} - -#define REG_A3XX_VPC_VARYING_PS_REPL(i0) (0x00002286 + 0x1*(i0)) - -static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; } -#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK 0x00000003 -#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__SHIFT 0 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C0(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C0__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK; -} -#define A3XX_VPC_VARYING_PS_REPL_MODE_C1__MASK 0x0000000c -#define A3XX_VPC_VARYING_PS_REPL_MODE_C1__SHIFT 2 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C1(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C1__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C1__MASK; -} -#define A3XX_VPC_VARYING_PS_REPL_MODE_C2__MASK 0x00000030 -#define A3XX_VPC_VARYING_PS_REPL_MODE_C2__SHIFT 4 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C2(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C2__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C2__MASK; -} -#define A3XX_VPC_VARYING_PS_REPL_MODE_C3__MASK 0x000000c0 -#define A3XX_VPC_VARYING_PS_REPL_MODE_C3__SHIFT 6 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C3(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C3__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C3__MASK; -} -#define A3XX_VPC_VARYING_PS_REPL_MODE_C4__MASK 0x00000300 -#define A3XX_VPC_VARYING_PS_REPL_MODE_C4__SHIFT 8 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C4(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C4__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C4__MASK; -} -#define A3XX_VPC_VARYING_PS_REPL_MODE_C5__MASK 0x00000c00 -#define A3XX_VPC_VARYING_PS_REPL_MODE_C5__SHIFT 10 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C5(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C5__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C5__MASK; -} -#define A3XX_VPC_VARYING_PS_REPL_MODE_C6__MASK 0x00003000 -#define A3XX_VPC_VARYING_PS_REPL_MODE_C6__SHIFT 12 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C6(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C6__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C6__MASK; -} -#define A3XX_VPC_VARYING_PS_REPL_MODE_C7__MASK 0x0000c000 -#define A3XX_VPC_VARYING_PS_REPL_MODE_C7__SHIFT 14 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C7(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C7__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C7__MASK; -} -#define A3XX_VPC_VARYING_PS_REPL_MODE_C8__MASK 0x00030000 -#define A3XX_VPC_VARYING_PS_REPL_MODE_C8__SHIFT 16 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C8(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C8__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C8__MASK; -} -#define A3XX_VPC_VARYING_PS_REPL_MODE_C9__MASK 0x000c0000 -#define A3XX_VPC_VARYING_PS_REPL_MODE_C9__SHIFT 18 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C9(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C9__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C9__MASK; -} -#define A3XX_VPC_VARYING_PS_REPL_MODE_CA__MASK 0x00300000 -#define A3XX_VPC_VARYING_PS_REPL_MODE_CA__SHIFT 20 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CA(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CA__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CA__MASK; -} -#define A3XX_VPC_VARYING_PS_REPL_MODE_CB__MASK 0x00c00000 -#define A3XX_VPC_VARYING_PS_REPL_MODE_CB__SHIFT 22 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CB(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CB__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CB__MASK; -} -#define A3XX_VPC_VARYING_PS_REPL_MODE_CC__MASK 0x03000000 -#define A3XX_VPC_VARYING_PS_REPL_MODE_CC__SHIFT 24 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CC(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CC__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CC__MASK; -} -#define A3XX_VPC_VARYING_PS_REPL_MODE_CD__MASK 0x0c000000 -#define A3XX_VPC_VARYING_PS_REPL_MODE_CD__SHIFT 26 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CD(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CD__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CD__MASK; -} -#define A3XX_VPC_VARYING_PS_REPL_MODE_CE__MASK 0x30000000 -#define A3XX_VPC_VARYING_PS_REPL_MODE_CE__SHIFT 28 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CE(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CE__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CE__MASK; -} -#define A3XX_VPC_VARYING_PS_REPL_MODE_CF__MASK 0xc0000000 -#define A3XX_VPC_VARYING_PS_REPL_MODE_CF__SHIFT 30 -static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CF(enum a3xx_repl_mode val) -{ - return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CF__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CF__MASK; -} - -#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a - -#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x0000228b - -#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0 -#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000 -#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x00040000 -#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18 -static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val) -{ - return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK; -} -#define A3XX_SP_SP_CTRL_REG_BINNING 0x00080000 -#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000 -#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20 -static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val) -{ - return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK; -} -#define A3XX_SP_SP_CTRL_REG_L0MODE__MASK 0x00c00000 -#define A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT 22 -static inline uint32_t A3XX_SP_SP_CTRL_REG_L0MODE(uint32_t val) -{ - return ((val) << A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT) & A3XX_SP_SP_CTRL_REG_L0MODE__MASK; -} - -#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4 -#define A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001 -#define A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0 -static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) -{ - return ((val) << A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK; -} -#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002 -#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1 -static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val) -{ - return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK; -} -#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004 -#define A3XX_SP_VS_CTRL_REG0_ALUSCHMODE 0x00000008 -#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 -#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 -static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 -#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 -static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000 -#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20 -static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK; -} -#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000 -#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000 -#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24 -static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val) -{ - return ((val) << A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG0_LENGTH__MASK; -} - -#define REG_A3XX_SP_VS_CTRL_REG1 0x000022c5 -#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff -#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0 -static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val) -{ - return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK; -} -#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00 -#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10 -static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val) -{ - return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK; -} -#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x7f000000 -#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24 -static inline uint32_t A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val) -{ - return ((val) << A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK; -} - -#define REG_A3XX_SP_VS_PARAM_REG 0x000022c6 -#define A3XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff -#define A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0 -static inline uint32_t A3XX_SP_VS_PARAM_REG_POSREGID(uint32_t val) -{ - return ((val) << A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_POSREGID__MASK; -} -#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00 -#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8 -static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val) -{ - return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK; -} -#define A3XX_SP_VS_PARAM_REG_POS2DMODE 0x00010000 -#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0x01f00000 -#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20 -static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val) -{ - return ((val) << A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK; -} - -#define REG_A3XX_SP_VS_OUT(i0) (0x000022c7 + 0x1*(i0)) - -static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; } -#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff -#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0 -static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val) -{ - return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK; -} -#define A3XX_SP_VS_OUT_REG_A_HALF 0x00000100 -#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00 -#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9 -static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val) -{ - return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK; -} -#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000 -#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16 -static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val) -{ - return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK; -} -#define A3XX_SP_VS_OUT_REG_B_HALF 0x01000000 -#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000 -#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25 -static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val) -{ - return ((val) << A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK; -} - -#define REG_A3XX_SP_VS_VPC_DST(i0) (0x000022d0 + 0x1*(i0)) - -static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; } -#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x0000007f -#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0 -static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val) -{ - return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK; -} -#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x00007f00 -#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8 -static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val) -{ - return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK; -} -#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x007f0000 -#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16 -static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val) -{ - return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK; -} -#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0x7f000000 -#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24 -static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val) -{ - return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK; -} - -#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4 -#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff -#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0 -static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val) -{ - return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK; -} -#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 -#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 -static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; -} -#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 -#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 -static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; -} - -#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5 - -#define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6 -#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff -#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0 -static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val) -{ - assert(!(val & 0x7f)); - return (((val >> 7)) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK; -} -#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00 -#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8 -static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val) -{ - return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK; -} -#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000 -#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24 -static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val) -{ - return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK; -} - -#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7 -#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f -#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0 -static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val) -{ - return ((val) << A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK; -} -#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0 -#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5 -static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK; -} - -#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8 - -#define REG_A3XX_SP_VS_LENGTH_REG 0x000022df -#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff -#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT 0 -static inline uint32_t A3XX_SP_VS_LENGTH_REG_SHADERLENGTH(uint32_t val) -{ - return ((val) << A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK; -} - -#define REG_A3XX_SP_FS_CTRL_REG0 0x000022e0 -#define A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001 -#define A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0 -static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) -{ - return ((val) << A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK; -} -#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002 -#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1 -static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val) -{ - return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK; -} -#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004 -#define A3XX_SP_FS_CTRL_REG0_ALUSCHMODE 0x00000008 -#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 -#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 -static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 -#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 -static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A3XX_SP_FS_CTRL_REG0_FSBYPASSENABLE 0x00020000 -#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP 0x00040000 -#define A3XX_SP_FS_CTRL_REG0_OUTORDERED 0x00080000 -#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000 -#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20 -static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK; -} -#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000 -#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000 -#define A3XX_SP_FS_CTRL_REG0_COMPUTEMODE 0x00800000 -#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000 -#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24 -static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val) -{ - return ((val) << A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG0_LENGTH__MASK; -} - -#define REG_A3XX_SP_FS_CTRL_REG1 0x000022e1 -#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff -#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0 -static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val) -{ - return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK; -} -#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00 -#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10 -static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val) -{ - return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK; -} -#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x00f00000 -#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 20 -static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val) -{ - return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK; -} -#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x7f000000 -#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24 -static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val) -{ - return ((val) << A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT) & A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK; -} - -#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2 -#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff -#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0 -static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val) -{ - return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK; -} -#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 -#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 -static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; -} -#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 -#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 -static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; -} - -#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3 - -#define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4 -#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff -#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0 -static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val) -{ - return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK; -} -#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00 -#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8 -static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val) -{ - return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK; -} -#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000 -#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24 -static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val) -{ - return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK; -} - -#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5 -#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f -#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0 -static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val) -{ - return ((val) << A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK; -} -#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0 -#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5 -static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK; -} - -#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6 - -#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x000022e8 - -#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9 - -#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec -#define A3XX_SP_FS_OUTPUT_REG_MRT__MASK 0x00000003 -#define A3XX_SP_FS_OUTPUT_REG_MRT__SHIFT 0 -static inline uint32_t A3XX_SP_FS_OUTPUT_REG_MRT(uint32_t val) -{ - return ((val) << A3XX_SP_FS_OUTPUT_REG_MRT__SHIFT) & A3XX_SP_FS_OUTPUT_REG_MRT__MASK; -} -#define A3XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080 -#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00 -#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8 -static inline uint32_t A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val) -{ - return ((val) << A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK; -} - -#define REG_A3XX_SP_FS_MRT(i0) (0x000022f0 + 0x1*(i0)) - -static inline uint32_t REG_A3XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f0 + 0x1*i0; } -#define A3XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff -#define A3XX_SP_FS_MRT_REG_REGID__SHIFT 0 -static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val) -{ - return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK; -} -#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100 -#define A3XX_SP_FS_MRT_REG_SINT 0x00000400 -#define A3XX_SP_FS_MRT_REG_UINT 0x00000800 - -#define REG_A3XX_SP_FS_IMAGE_OUTPUT(i0) (0x000022f4 + 0x1*(i0)) - -static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT_REG(uint32_t i0) { return 0x000022f4 + 0x1*i0; } -#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK 0x0000003f -#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT 0 -static inline uint32_t A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT(enum a3xx_color_fmt val) -{ - return ((val) << A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT) & A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK; -} - -#define REG_A3XX_SP_FS_LENGTH_REG 0x000022ff -#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff -#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT 0 -static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val) -{ - return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK; -} - -#define REG_A3XX_PA_SC_AA_CONFIG 0x00002301 - -#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340 -#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff -#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0 -static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val) -{ - return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK; -} -#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00 -#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8 -static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val) -{ - return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK; -} -#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000 -#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT 16 -static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(uint32_t val) -{ - return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK; -} - -#define REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002341 - -#define REG_A3XX_TPL1_TP_FS_TEX_OFFSET 0x00002342 -#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff -#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0 -static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val) -{ - return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK; -} -#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00 -#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8 -static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val) -{ - return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK; -} -#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000 -#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT 16 -static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val) -{ - return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK; -} - -#define REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x00002343 - -#define REG_A3XX_VBIF_CLKON 0x00003001 - -#define REG_A3XX_VBIF_FIXED_SORT_EN 0x0000300c - -#define REG_A3XX_VBIF_FIXED_SORT_SEL0 0x0000300d - -#define REG_A3XX_VBIF_FIXED_SORT_SEL1 0x0000300e - -#define REG_A3XX_VBIF_ABIT_SORT 0x0000301c - -#define REG_A3XX_VBIF_ABIT_SORT_CONF 0x0000301d - -#define REG_A3XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a - -#define REG_A3XX_VBIF_IN_RD_LIM_CONF0 0x0000302c - -#define REG_A3XX_VBIF_IN_RD_LIM_CONF1 0x0000302d - -#define REG_A3XX_VBIF_IN_WR_LIM_CONF0 0x00003030 - -#define REG_A3XX_VBIF_IN_WR_LIM_CONF1 0x00003031 - -#define REG_A3XX_VBIF_OUT_RD_LIM_CONF0 0x00003034 - -#define REG_A3XX_VBIF_OUT_WR_LIM_CONF0 0x00003035 - -#define REG_A3XX_VBIF_DDR_OUT_MAX_BURST 0x00003036 - -#define REG_A3XX_VBIF_ARB_CTL 0x0000303c - -#define REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049 - -#define REG_A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x00003058 - -#define REG_A3XX_VBIF_OUT_AXI_AOOO_EN 0x0000305e - -#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f - -#define REG_A3XX_VBIF_PERF_CNT_EN 0x00003070 -#define A3XX_VBIF_PERF_CNT_EN_CNT0 0x00000001 -#define A3XX_VBIF_PERF_CNT_EN_CNT1 0x00000002 -#define A3XX_VBIF_PERF_CNT_EN_PWRCNT0 0x00000004 -#define A3XX_VBIF_PERF_CNT_EN_PWRCNT1 0x00000008 -#define A3XX_VBIF_PERF_CNT_EN_PWRCNT2 0x00000010 - -#define REG_A3XX_VBIF_PERF_CNT_CLR 0x00003071 -#define A3XX_VBIF_PERF_CNT_CLR_CNT0 0x00000001 -#define A3XX_VBIF_PERF_CNT_CLR_CNT1 0x00000002 -#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT0 0x00000004 -#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT1 0x00000008 -#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT2 0x00000010 - -#define REG_A3XX_VBIF_PERF_CNT_SEL 0x00003072 - -#define REG_A3XX_VBIF_PERF_CNT0_LO 0x00003073 - -#define REG_A3XX_VBIF_PERF_CNT0_HI 0x00003074 - -#define REG_A3XX_VBIF_PERF_CNT1_LO 0x00003075 - -#define REG_A3XX_VBIF_PERF_CNT1_HI 0x00003076 - -#define REG_A3XX_VBIF_PERF_PWR_CNT0_LO 0x00003077 - -#define REG_A3XX_VBIF_PERF_PWR_CNT0_HI 0x00003078 - -#define REG_A3XX_VBIF_PERF_PWR_CNT1_LO 0x00003079 - -#define REG_A3XX_VBIF_PERF_PWR_CNT1_HI 0x0000307a - -#define REG_A3XX_VBIF_PERF_PWR_CNT2_LO 0x0000307b - -#define REG_A3XX_VBIF_PERF_PWR_CNT2_HI 0x0000307c - -#define REG_A3XX_VSC_BIN_SIZE 0x00000c01 -#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f -#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0 -static inline uint32_t A3XX_VSC_BIN_SIZE_WIDTH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK; -} -#define A3XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0 -#define A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5 -static inline uint32_t A3XX_VSC_BIN_SIZE_HEIGHT(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK; -} - -#define REG_A3XX_VSC_SIZE_ADDRESS 0x00000c02 - -#define REG_A3XX_VSC_PIPE(i0) (0x00000c06 + 0x3*(i0)) - -static inline uint32_t REG_A3XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; } -#define A3XX_VSC_PIPE_CONFIG_X__MASK 0x000003ff -#define A3XX_VSC_PIPE_CONFIG_X__SHIFT 0 -static inline uint32_t A3XX_VSC_PIPE_CONFIG_X(uint32_t val) -{ - return ((val) << A3XX_VSC_PIPE_CONFIG_X__SHIFT) & A3XX_VSC_PIPE_CONFIG_X__MASK; -} -#define A3XX_VSC_PIPE_CONFIG_Y__MASK 0x000ffc00 -#define A3XX_VSC_PIPE_CONFIG_Y__SHIFT 10 -static inline uint32_t A3XX_VSC_PIPE_CONFIG_Y(uint32_t val) -{ - return ((val) << A3XX_VSC_PIPE_CONFIG_Y__SHIFT) & A3XX_VSC_PIPE_CONFIG_Y__MASK; -} -#define A3XX_VSC_PIPE_CONFIG_W__MASK 0x00f00000 -#define A3XX_VSC_PIPE_CONFIG_W__SHIFT 20 -static inline uint32_t A3XX_VSC_PIPE_CONFIG_W(uint32_t val) -{ - return ((val) << A3XX_VSC_PIPE_CONFIG_W__SHIFT) & A3XX_VSC_PIPE_CONFIG_W__MASK; -} -#define A3XX_VSC_PIPE_CONFIG_H__MASK 0x0f000000 -#define A3XX_VSC_PIPE_CONFIG_H__SHIFT 24 -static inline uint32_t A3XX_VSC_PIPE_CONFIG_H(uint32_t val) -{ - return ((val) << A3XX_VSC_PIPE_CONFIG_H__SHIFT) & A3XX_VSC_PIPE_CONFIG_H__MASK; -} - -static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; } - -static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; } - -#define REG_A3XX_VSC_BIN_CONTROL 0x00000c3c -#define A3XX_VSC_BIN_CONTROL_BINNING_ENABLE 0x00000001 - -#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d - -#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48 - -#define REG_A3XX_PC_PERFCOUNTER1_SELECT 0x00000c49 - -#define REG_A3XX_PC_PERFCOUNTER2_SELECT 0x00000c4a - -#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b - -#define REG_A3XX_GRAS_TSE_DEBUG_ECO 0x00000c81 - -#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88 - -#define REG_A3XX_GRAS_PERFCOUNTER1_SELECT 0x00000c89 - -#define REG_A3XX_GRAS_PERFCOUNTER2_SELECT 0x00000c8a - -#define REG_A3XX_GRAS_PERFCOUNTER3_SELECT 0x00000c8b - -#define REG_A3XX_GRAS_CL_USER_PLANE(i0) (0x00000ca0 + 0x4*(i0)) - -static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_X(uint32_t i0) { return 0x00000ca0 + 0x4*i0; } - -static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Y(uint32_t i0) { return 0x00000ca1 + 0x4*i0; } - -static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Z(uint32_t i0) { return 0x00000ca2 + 0x4*i0; } - -static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x00000ca3 + 0x4*i0; } - -#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0 - -#define REG_A3XX_RB_DEBUG_ECO_CONTROLS_ADDR 0x00000cc1 - -#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6 - -#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7 - -#define REG_A3XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0 -#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff -#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0 -static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val) -{ - return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK; -} -#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x0fffc000 -#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 14 -static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val) -{ - return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK; -} - -#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00 - -#define REG_A3XX_HLSQ_PERFCOUNTER1_SELECT 0x00000e01 - -#define REG_A3XX_HLSQ_PERFCOUNTER2_SELECT 0x00000e02 - -#define REG_A3XX_HLSQ_PERFCOUNTER3_SELECT 0x00000e03 - -#define REG_A3XX_HLSQ_PERFCOUNTER4_SELECT 0x00000e04 - -#define REG_A3XX_HLSQ_PERFCOUNTER5_SELECT 0x00000e05 - -#define REG_A3XX_UNKNOWN_0E43 0x00000e43 - -#define REG_A3XX_VFD_PERFCOUNTER0_SELECT 0x00000e44 - -#define REG_A3XX_VFD_PERFCOUNTER1_SELECT 0x00000e45 - -#define REG_A3XX_VPC_VPC_DEBUG_RAM_SEL 0x00000e61 - -#define REG_A3XX_VPC_VPC_DEBUG_RAM_READ 0x00000e62 - -#define REG_A3XX_VPC_PERFCOUNTER0_SELECT 0x00000e64 - -#define REG_A3XX_VPC_PERFCOUNTER1_SELECT 0x00000e65 - -#define REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG 0x00000e82 - -#define REG_A3XX_UCHE_PERFCOUNTER0_SELECT 0x00000e84 - -#define REG_A3XX_UCHE_PERFCOUNTER1_SELECT 0x00000e85 - -#define REG_A3XX_UCHE_PERFCOUNTER2_SELECT 0x00000e86 - -#define REG_A3XX_UCHE_PERFCOUNTER3_SELECT 0x00000e87 - -#define REG_A3XX_UCHE_PERFCOUNTER4_SELECT 0x00000e88 - -#define REG_A3XX_UCHE_PERFCOUNTER5_SELECT 0x00000e89 - -#define REG_A3XX_UCHE_CACHE_INVALIDATE0_REG 0x00000ea0 -#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK 0x0fffffff -#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT 0 -static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(uint32_t val) -{ - return ((val) << A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK; -} - -#define REG_A3XX_UCHE_CACHE_INVALIDATE1_REG 0x00000ea1 -#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK 0x0fffffff -#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT 0 -static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(uint32_t val) -{ - return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK; -} -#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK 0x30000000 -#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT 28 -static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_opcode val) -{ - return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK; -} -#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000 - -#define REG_A3XX_UNKNOWN_0EA6 0x00000ea6 - -#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4 - -#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5 - -#define REG_A3XX_SP_PERFCOUNTER2_SELECT 0x00000ec6 - -#define REG_A3XX_SP_PERFCOUNTER3_SELECT 0x00000ec7 - -#define REG_A3XX_SP_PERFCOUNTER4_SELECT 0x00000ec8 - -#define REG_A3XX_SP_PERFCOUNTER5_SELECT 0x00000ec9 - -#define REG_A3XX_SP_PERFCOUNTER6_SELECT 0x00000eca - -#define REG_A3XX_SP_PERFCOUNTER7_SELECT 0x00000ecb - -#define REG_A3XX_UNKNOWN_0EE0 0x00000ee0 - -#define REG_A3XX_UNKNOWN_0F03 0x00000f03 - -#define REG_A3XX_TP_PERFCOUNTER0_SELECT 0x00000f04 - -#define REG_A3XX_TP_PERFCOUNTER1_SELECT 0x00000f05 - -#define REG_A3XX_TP_PERFCOUNTER2_SELECT 0x00000f06 - -#define REG_A3XX_TP_PERFCOUNTER3_SELECT 0x00000f07 - -#define REG_A3XX_TP_PERFCOUNTER4_SELECT 0x00000f08 - -#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09 - -#define REG_A3XX_VGT_CL_INITIATOR 0x000021f0 - -#define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9 - -#define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc -#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f -#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0 -static inline uint32_t A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val) -{ - return ((val) << A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK; -} -#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0 -#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6 -static inline uint32_t A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val) -{ - return ((val) << A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK; -} -#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600 -#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9 -static inline uint32_t A3XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val) -{ - return ((val) << A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK; -} -#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800 -#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11 -static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val) -{ - return ((val) << A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK; -} -#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000 -#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000 -#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000 -#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000 -#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24 -static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val) -{ - return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK; -} - -#define REG_A3XX_VGT_IMMED_DATA 0x000021fd - -#define REG_A3XX_TEX_SAMP_0 0x00000000 -#define A3XX_TEX_SAMP_0_CLAMPENABLE 0x00000001 -#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002 -#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c -#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2 -static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val) -{ - return ((val) << A3XX_TEX_SAMP_0_XY_MAG__SHIFT) & A3XX_TEX_SAMP_0_XY_MAG__MASK; -} -#define A3XX_TEX_SAMP_0_XY_MIN__MASK 0x00000030 -#define A3XX_TEX_SAMP_0_XY_MIN__SHIFT 4 -static inline uint32_t A3XX_TEX_SAMP_0_XY_MIN(enum a3xx_tex_filter val) -{ - return ((val) << A3XX_TEX_SAMP_0_XY_MIN__SHIFT) & A3XX_TEX_SAMP_0_XY_MIN__MASK; -} -#define A3XX_TEX_SAMP_0_WRAP_S__MASK 0x000001c0 -#define A3XX_TEX_SAMP_0_WRAP_S__SHIFT 6 -static inline uint32_t A3XX_TEX_SAMP_0_WRAP_S(enum a3xx_tex_clamp val) -{ - return ((val) << A3XX_TEX_SAMP_0_WRAP_S__SHIFT) & A3XX_TEX_SAMP_0_WRAP_S__MASK; -} -#define A3XX_TEX_SAMP_0_WRAP_T__MASK 0x00000e00 -#define A3XX_TEX_SAMP_0_WRAP_T__SHIFT 9 -static inline uint32_t A3XX_TEX_SAMP_0_WRAP_T(enum a3xx_tex_clamp val) -{ - return ((val) << A3XX_TEX_SAMP_0_WRAP_T__SHIFT) & A3XX_TEX_SAMP_0_WRAP_T__MASK; -} -#define A3XX_TEX_SAMP_0_WRAP_R__MASK 0x00007000 -#define A3XX_TEX_SAMP_0_WRAP_R__SHIFT 12 -static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val) -{ - return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK; -} -#define A3XX_TEX_SAMP_0_ANISO__MASK 0x00038000 -#define A3XX_TEX_SAMP_0_ANISO__SHIFT 15 -static inline uint32_t A3XX_TEX_SAMP_0_ANISO(enum a3xx_tex_aniso val) -{ - return ((val) << A3XX_TEX_SAMP_0_ANISO__SHIFT) & A3XX_TEX_SAMP_0_ANISO__MASK; -} -#define A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK 0x00700000 -#define A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT 20 -static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val) -{ - return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK; -} -#define A3XX_TEX_SAMP_0_CUBEMAPSEAMLESSFILTOFF 0x01000000 -#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000 - -#define REG_A3XX_TEX_SAMP_1 0x00000001 -#define A3XX_TEX_SAMP_1_LOD_BIAS__MASK 0x000007ff -#define A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT 0 -static inline uint32_t A3XX_TEX_SAMP_1_LOD_BIAS(float val) -{ - return ((((int32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT) & A3XX_TEX_SAMP_1_LOD_BIAS__MASK; -} -#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000 -#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12 -static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val) -{ - return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK; -} -#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000 -#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22 -static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val) -{ - return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK; -} - -#define REG_A3XX_TEX_CONST_0 0x00000000 -#define A3XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003 -#define A3XX_TEX_CONST_0_TILE_MODE__SHIFT 0 -static inline uint32_t A3XX_TEX_CONST_0_TILE_MODE(enum a3xx_tile_mode val) -{ - return ((val) << A3XX_TEX_CONST_0_TILE_MODE__SHIFT) & A3XX_TEX_CONST_0_TILE_MODE__MASK; -} -#define A3XX_TEX_CONST_0_SRGB 0x00000004 -#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070 -#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4 -static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val) -{ - return ((val) << A3XX_TEX_CONST_0_SWIZ_X__SHIFT) & A3XX_TEX_CONST_0_SWIZ_X__MASK; -} -#define A3XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380 -#define A3XX_TEX_CONST_0_SWIZ_Y__SHIFT 7 -static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Y(enum a3xx_tex_swiz val) -{ - return ((val) << A3XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Y__MASK; -} -#define A3XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00 -#define A3XX_TEX_CONST_0_SWIZ_Z__SHIFT 10 -static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Z(enum a3xx_tex_swiz val) -{ - return ((val) << A3XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Z__MASK; -} -#define A3XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000 -#define A3XX_TEX_CONST_0_SWIZ_W__SHIFT 13 -static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val) -{ - return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK; -} -#define A3XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000 -#define A3XX_TEX_CONST_0_MIPLVLS__SHIFT 16 -static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val) -{ - return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK; -} -#define A3XX_TEX_CONST_0_MSAATEX__MASK 0x00300000 -#define A3XX_TEX_CONST_0_MSAATEX__SHIFT 20 -static inline uint32_t A3XX_TEX_CONST_0_MSAATEX(enum a3xx_tex_msaa val) -{ - return ((val) << A3XX_TEX_CONST_0_MSAATEX__SHIFT) & A3XX_TEX_CONST_0_MSAATEX__MASK; -} -#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000 -#define A3XX_TEX_CONST_0_FMT__SHIFT 22 -static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val) -{ - return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK; -} -#define A3XX_TEX_CONST_0_NOCONVERT 0x20000000 -#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000 -#define A3XX_TEX_CONST_0_TYPE__SHIFT 30 -static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val) -{ - return ((val) << A3XX_TEX_CONST_0_TYPE__SHIFT) & A3XX_TEX_CONST_0_TYPE__MASK; -} - -#define REG_A3XX_TEX_CONST_1 0x00000001 -#define A3XX_TEX_CONST_1_HEIGHT__MASK 0x00003fff -#define A3XX_TEX_CONST_1_HEIGHT__SHIFT 0 -static inline uint32_t A3XX_TEX_CONST_1_HEIGHT(uint32_t val) -{ - return ((val) << A3XX_TEX_CONST_1_HEIGHT__SHIFT) & A3XX_TEX_CONST_1_HEIGHT__MASK; -} -#define A3XX_TEX_CONST_1_WIDTH__MASK 0x0fffc000 -#define A3XX_TEX_CONST_1_WIDTH__SHIFT 14 -static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val) -{ - return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK; -} -#define A3XX_TEX_CONST_1_PITCHALIGN__MASK 0xf0000000 -#define A3XX_TEX_CONST_1_PITCHALIGN__SHIFT 28 -static inline uint32_t A3XX_TEX_CONST_1_PITCHALIGN(uint32_t val) -{ - return ((val) << A3XX_TEX_CONST_1_PITCHALIGN__SHIFT) & A3XX_TEX_CONST_1_PITCHALIGN__MASK; -} - -#define REG_A3XX_TEX_CONST_2 0x00000002 -#define A3XX_TEX_CONST_2_INDX__MASK 0x000001ff -#define A3XX_TEX_CONST_2_INDX__SHIFT 0 -static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val) -{ - return ((val) << A3XX_TEX_CONST_2_INDX__SHIFT) & A3XX_TEX_CONST_2_INDX__MASK; -} -#define A3XX_TEX_CONST_2_PITCH__MASK 0x3ffff000 -#define A3XX_TEX_CONST_2_PITCH__SHIFT 12 -static inline uint32_t A3XX_TEX_CONST_2_PITCH(uint32_t val) -{ - return ((val) << A3XX_TEX_CONST_2_PITCH__SHIFT) & A3XX_TEX_CONST_2_PITCH__MASK; -} -#define A3XX_TEX_CONST_2_SWAP__MASK 0xc0000000 -#define A3XX_TEX_CONST_2_SWAP__SHIFT 30 -static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A3XX_TEX_CONST_2_SWAP__SHIFT) & A3XX_TEX_CONST_2_SWAP__MASK; -} - -#define REG_A3XX_TEX_CONST_3 0x00000003 -#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0001ffff -#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0 -static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A3XX_TEX_CONST_3_LAYERSZ1__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ1__MASK; -} -#define A3XX_TEX_CONST_3_DEPTH__MASK 0x0ffe0000 -#define A3XX_TEX_CONST_3_DEPTH__SHIFT 17 -static inline uint32_t A3XX_TEX_CONST_3_DEPTH(uint32_t val) -{ - return ((val) << A3XX_TEX_CONST_3_DEPTH__SHIFT) & A3XX_TEX_CONST_3_DEPTH__MASK; -} -#define A3XX_TEX_CONST_3_LAYERSZ2__MASK 0xf0000000 -#define A3XX_TEX_CONST_3_LAYERSZ2__SHIFT 28 -static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ2(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A3XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ2__MASK; -} - -#ifdef __cplusplus -#endif - -#endif /* A3XX_XML */ diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h deleted file mode 100644 index 103a416a78..0000000000 --- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h +++ /dev/null @@ -1,4379 +0,0 @@ -#ifndef A4XX_XML -#define A4XX_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng gen_header.py tool in this git repository: -http://gitlab.freedesktop.org/mesa/mesa/ -git clone https://gitlab.freedesktop.org/mesa/mesa.git - -The rules-ng-ng source files this header was generated from are: - -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from Fri Jun 2 14:59:26 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85691 bytes, from Fri Feb 16 09:49:01 2024) - -Copyright (C) 2013-2024 by the following authors: -- Rob Clark Rob Clark -- Ilia Mirkin Ilia Mirkin - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -*/ - -#ifdef __KERNEL__ -#include -#define assert(x) BUG_ON(!(x)) -#else -#include -#endif - -#ifdef __cplusplus -#define __struct_cast(X) -#else -#define __struct_cast(X) (struct X) -#endif - -enum a4xx_color_fmt { - RB4_A8_UNORM = 1, - RB4_R8_UNORM = 2, - RB4_R8_SNORM = 3, - RB4_R8_UINT = 4, - RB4_R8_SINT = 5, - RB4_R4G4B4A4_UNORM = 8, - RB4_R5G5B5A1_UNORM = 10, - RB4_R5G6B5_UNORM = 14, - RB4_R8G8_UNORM = 15, - RB4_R8G8_SNORM = 16, - RB4_R8G8_UINT = 17, - RB4_R8G8_SINT = 18, - RB4_R16_UNORM = 19, - RB4_R16_SNORM = 20, - RB4_R16_FLOAT = 21, - RB4_R16_UINT = 22, - RB4_R16_SINT = 23, - RB4_R8G8B8_UNORM = 25, - RB4_R8G8B8A8_UNORM = 26, - RB4_R8G8B8A8_SNORM = 28, - RB4_R8G8B8A8_UINT = 29, - RB4_R8G8B8A8_SINT = 30, - RB4_R10G10B10A2_UNORM = 31, - RB4_R10G10B10A2_UINT = 34, - RB4_R11G11B10_FLOAT = 39, - RB4_R16G16_UNORM = 40, - RB4_R16G16_SNORM = 41, - RB4_R16G16_FLOAT = 42, - RB4_R16G16_UINT = 43, - RB4_R16G16_SINT = 44, - RB4_R32_FLOAT = 45, - RB4_R32_UINT = 46, - RB4_R32_SINT = 47, - RB4_R16G16B16A16_UNORM = 52, - RB4_R16G16B16A16_SNORM = 53, - RB4_R16G16B16A16_FLOAT = 54, - RB4_R16G16B16A16_UINT = 55, - RB4_R16G16B16A16_SINT = 56, - RB4_R32G32_FLOAT = 57, - RB4_R32G32_UINT = 58, - RB4_R32G32_SINT = 59, - RB4_R32G32B32A32_FLOAT = 60, - RB4_R32G32B32A32_UINT = 61, - RB4_R32G32B32A32_SINT = 62, - RB4_NONE = 255, -}; - -enum a4xx_tile_mode { - TILE4_LINEAR = 0, - TILE4_2 = 2, - TILE4_3 = 3, -}; - -enum a4xx_vtx_fmt { - VFMT4_32_FLOAT = 1, - VFMT4_32_32_FLOAT = 2, - VFMT4_32_32_32_FLOAT = 3, - VFMT4_32_32_32_32_FLOAT = 4, - VFMT4_16_FLOAT = 5, - VFMT4_16_16_FLOAT = 6, - VFMT4_16_16_16_FLOAT = 7, - VFMT4_16_16_16_16_FLOAT = 8, - VFMT4_32_FIXED = 9, - VFMT4_32_32_FIXED = 10, - VFMT4_32_32_32_FIXED = 11, - VFMT4_32_32_32_32_FIXED = 12, - VFMT4_11_11_10_FLOAT = 13, - VFMT4_16_SINT = 16, - VFMT4_16_16_SINT = 17, - VFMT4_16_16_16_SINT = 18, - VFMT4_16_16_16_16_SINT = 19, - VFMT4_16_UINT = 20, - VFMT4_16_16_UINT = 21, - VFMT4_16_16_16_UINT = 22, - VFMT4_16_16_16_16_UINT = 23, - VFMT4_16_SNORM = 24, - VFMT4_16_16_SNORM = 25, - VFMT4_16_16_16_SNORM = 26, - VFMT4_16_16_16_16_SNORM = 27, - VFMT4_16_UNORM = 28, - VFMT4_16_16_UNORM = 29, - VFMT4_16_16_16_UNORM = 30, - VFMT4_16_16_16_16_UNORM = 31, - VFMT4_32_UINT = 32, - VFMT4_32_32_UINT = 33, - VFMT4_32_32_32_UINT = 34, - VFMT4_32_32_32_32_UINT = 35, - VFMT4_32_SINT = 36, - VFMT4_32_32_SINT = 37, - VFMT4_32_32_32_SINT = 38, - VFMT4_32_32_32_32_SINT = 39, - VFMT4_8_UINT = 40, - VFMT4_8_8_UINT = 41, - VFMT4_8_8_8_UINT = 42, - VFMT4_8_8_8_8_UINT = 43, - VFMT4_8_UNORM = 44, - VFMT4_8_8_UNORM = 45, - VFMT4_8_8_8_UNORM = 46, - VFMT4_8_8_8_8_UNORM = 47, - VFMT4_8_SINT = 48, - VFMT4_8_8_SINT = 49, - VFMT4_8_8_8_SINT = 50, - VFMT4_8_8_8_8_SINT = 51, - VFMT4_8_SNORM = 52, - VFMT4_8_8_SNORM = 53, - VFMT4_8_8_8_SNORM = 54, - VFMT4_8_8_8_8_SNORM = 55, - VFMT4_10_10_10_2_UINT = 56, - VFMT4_10_10_10_2_UNORM = 57, - VFMT4_10_10_10_2_SINT = 58, - VFMT4_10_10_10_2_SNORM = 59, - VFMT4_2_10_10_10_UINT = 60, - VFMT4_2_10_10_10_UNORM = 61, - VFMT4_2_10_10_10_SINT = 62, - VFMT4_2_10_10_10_SNORM = 63, - VFMT4_NONE = 255, -}; - -enum a4xx_tex_fmt { - TFMT4_A8_UNORM = 3, - TFMT4_8_UNORM = 4, - TFMT4_8_SNORM = 5, - TFMT4_8_UINT = 6, - TFMT4_8_SINT = 7, - TFMT4_4_4_4_4_UNORM = 8, - TFMT4_5_5_5_1_UNORM = 9, - TFMT4_5_6_5_UNORM = 11, - TFMT4_L8_A8_UNORM = 13, - TFMT4_8_8_UNORM = 14, - TFMT4_8_8_SNORM = 15, - TFMT4_8_8_UINT = 16, - TFMT4_8_8_SINT = 17, - TFMT4_16_UNORM = 18, - TFMT4_16_SNORM = 19, - TFMT4_16_FLOAT = 20, - TFMT4_16_UINT = 21, - TFMT4_16_SINT = 22, - TFMT4_8_8_8_8_UNORM = 28, - TFMT4_8_8_8_8_SNORM = 29, - TFMT4_8_8_8_8_UINT = 30, - TFMT4_8_8_8_8_SINT = 31, - TFMT4_9_9_9_E5_FLOAT = 32, - TFMT4_10_10_10_2_UNORM = 33, - TFMT4_10_10_10_2_UINT = 34, - TFMT4_11_11_10_FLOAT = 37, - TFMT4_16_16_UNORM = 38, - TFMT4_16_16_SNORM = 39, - TFMT4_16_16_FLOAT = 40, - TFMT4_16_16_UINT = 41, - TFMT4_16_16_SINT = 42, - TFMT4_32_FLOAT = 43, - TFMT4_32_UINT = 44, - TFMT4_32_SINT = 45, - TFMT4_16_16_16_16_UNORM = 51, - TFMT4_16_16_16_16_SNORM = 52, - TFMT4_16_16_16_16_FLOAT = 53, - TFMT4_16_16_16_16_UINT = 54, - TFMT4_16_16_16_16_SINT = 55, - TFMT4_32_32_FLOAT = 56, - TFMT4_32_32_UINT = 57, - TFMT4_32_32_SINT = 58, - TFMT4_32_32_32_FLOAT = 59, - TFMT4_32_32_32_UINT = 60, - TFMT4_32_32_32_SINT = 61, - TFMT4_32_32_32_32_FLOAT = 63, - TFMT4_32_32_32_32_UINT = 64, - TFMT4_32_32_32_32_SINT = 65, - TFMT4_X8Z24_UNORM = 71, - TFMT4_DXT1 = 86, - TFMT4_DXT3 = 87, - TFMT4_DXT5 = 88, - TFMT4_RGTC1_UNORM = 90, - TFMT4_RGTC1_SNORM = 91, - TFMT4_RGTC2_UNORM = 94, - TFMT4_RGTC2_SNORM = 95, - TFMT4_BPTC_UFLOAT = 97, - TFMT4_BPTC_FLOAT = 98, - TFMT4_BPTC = 99, - TFMT4_ATC_RGB = 100, - TFMT4_ATC_RGBA_EXPLICIT = 101, - TFMT4_ATC_RGBA_INTERPOLATED = 102, - TFMT4_ETC2_RG11_UNORM = 103, - TFMT4_ETC2_RG11_SNORM = 104, - TFMT4_ETC2_R11_UNORM = 105, - TFMT4_ETC2_R11_SNORM = 106, - TFMT4_ETC1 = 107, - TFMT4_ETC2_RGB8 = 108, - TFMT4_ETC2_RGBA8 = 109, - TFMT4_ETC2_RGB8A1 = 110, - TFMT4_ASTC_4x4 = 111, - TFMT4_ASTC_5x4 = 112, - TFMT4_ASTC_5x5 = 113, - TFMT4_ASTC_6x5 = 114, - TFMT4_ASTC_6x6 = 115, - TFMT4_ASTC_8x5 = 116, - TFMT4_ASTC_8x6 = 117, - TFMT4_ASTC_8x8 = 118, - TFMT4_ASTC_10x5 = 119, - TFMT4_ASTC_10x6 = 120, - TFMT4_ASTC_10x8 = 121, - TFMT4_ASTC_10x10 = 122, - TFMT4_ASTC_12x10 = 123, - TFMT4_ASTC_12x12 = 124, - TFMT4_NONE = 255, -}; - -enum a4xx_depth_format { - DEPTH4_NONE = 0, - DEPTH4_16 = 1, - DEPTH4_24_8 = 2, - DEPTH4_32 = 3, -}; - -enum a4xx_ccu_perfcounter_select { - CCU_BUSY_CYCLES = 0, - CCU_RB_DEPTH_RETURN_STALL = 2, - CCU_RB_COLOR_RETURN_STALL = 3, - CCU_DEPTH_BLOCKS = 6, - CCU_COLOR_BLOCKS = 7, - CCU_DEPTH_BLOCK_HIT = 8, - CCU_COLOR_BLOCK_HIT = 9, - CCU_DEPTH_FLAG1_COUNT = 10, - CCU_DEPTH_FLAG2_COUNT = 11, - CCU_DEPTH_FLAG3_COUNT = 12, - CCU_DEPTH_FLAG4_COUNT = 13, - CCU_COLOR_FLAG1_COUNT = 14, - CCU_COLOR_FLAG2_COUNT = 15, - CCU_COLOR_FLAG3_COUNT = 16, - CCU_COLOR_FLAG4_COUNT = 17, - CCU_PARTIAL_BLOCK_READ = 18, -}; - -enum a4xx_cp_perfcounter_select { - CP_ALWAYS_COUNT = 0, - CP_BUSY = 1, - CP_PFP_IDLE = 2, - CP_PFP_BUSY_WORKING = 3, - CP_PFP_STALL_CYCLES_ANY = 4, - CP_PFP_STARVE_CYCLES_ANY = 5, - CP_PFP_STARVED_PER_LOAD_ADDR = 6, - CP_PFP_STALLED_PER_STORE_ADDR = 7, - CP_PFP_PC_PROFILE = 8, - CP_PFP_MATCH_PM4_PKT_PROFILE = 9, - CP_PFP_COND_INDIRECT_DISCARDED = 10, - CP_LONG_RESUMPTIONS = 11, - CP_RESUME_CYCLES = 12, - CP_RESUME_TO_BOUNDARY_CYCLES = 13, - CP_LONG_PREEMPTIONS = 14, - CP_PREEMPT_CYCLES = 15, - CP_PREEMPT_TO_BOUNDARY_CYCLES = 16, - CP_ME_FIFO_EMPTY_PFP_IDLE = 17, - CP_ME_FIFO_EMPTY_PFP_BUSY = 18, - CP_ME_FIFO_NOT_EMPTY_NOT_FULL = 19, - CP_ME_FIFO_FULL_ME_BUSY = 20, - CP_ME_FIFO_FULL_ME_NON_WORKING = 21, - CP_ME_WAITING_FOR_PACKETS = 22, - CP_ME_BUSY_WORKING = 23, - CP_ME_STARVE_CYCLES_ANY = 24, - CP_ME_STARVE_CYCLES_PER_PROFILE = 25, - CP_ME_STALL_CYCLES_PER_PROFILE = 26, - CP_ME_PC_PROFILE = 27, - CP_RCIU_FIFO_EMPTY = 28, - CP_RCIU_FIFO_NOT_EMPTY_NOT_FULL = 29, - CP_RCIU_FIFO_FULL = 30, - CP_RCIU_FIFO_FULL_NO_CONTEXT = 31, - CP_RCIU_FIFO_FULL_AHB_MASTER = 32, - CP_RCIU_FIFO_FULL_OTHER = 33, - CP_AHB_IDLE = 34, - CP_AHB_STALL_ON_GRANT_NO_SPLIT = 35, - CP_AHB_STALL_ON_GRANT_SPLIT = 36, - CP_AHB_STALL_ON_GRANT_SPLIT_PROFILE = 37, - CP_AHB_BUSY_WORKING = 38, - CP_AHB_BUSY_STALL_ON_HRDY = 39, - CP_AHB_BUSY_STALL_ON_HRDY_PROFILE = 40, -}; - -enum a4xx_gras_ras_perfcounter_select { - RAS_SUPER_TILES = 0, - RAS_8X8_TILES = 1, - RAS_4X4_TILES = 2, - RAS_BUSY_CYCLES = 3, - RAS_STALL_CYCLES_BY_RB = 4, - RAS_STALL_CYCLES_BY_VSC = 5, - RAS_STARVE_CYCLES_BY_TSE = 6, - RAS_SUPERTILE_CYCLES = 7, - RAS_TILE_CYCLES = 8, - RAS_FULLY_COVERED_SUPER_TILES = 9, - RAS_FULLY_COVERED_8X8_TILES = 10, - RAS_4X4_PRIM = 11, - RAS_8X4_4X8_PRIM = 12, - RAS_8X8_PRIM = 13, -}; - -enum a4xx_gras_tse_perfcounter_select { - TSE_INPUT_PRIM = 0, - TSE_INPUT_NULL_PRIM = 1, - TSE_TRIVAL_REJ_PRIM = 2, - TSE_CLIPPED_PRIM = 3, - TSE_NEW_PRIM = 4, - TSE_ZERO_AREA_PRIM = 5, - TSE_FACENESS_CULLED_PRIM = 6, - TSE_ZERO_PIXEL_PRIM = 7, - TSE_OUTPUT_NULL_PRIM = 8, - TSE_OUTPUT_VISIBLE_PRIM = 9, - TSE_PRE_CLIP_PRIM = 10, - TSE_POST_CLIP_PRIM = 11, - TSE_BUSY_CYCLES = 12, - TSE_PC_STARVE = 13, - TSE_RAS_STALL = 14, - TSE_STALL_BARYPLANE_FIFO_FULL = 15, - TSE_STALL_ZPLANE_FIFO_FULL = 16, -}; - -enum a4xx_hlsq_perfcounter_select { - HLSQ_SP_VS_STAGE_CONSTANT = 0, - HLSQ_SP_VS_STAGE_INSTRUCTIONS = 1, - HLSQ_SP_FS_STAGE_CONSTANT = 2, - HLSQ_SP_FS_STAGE_INSTRUCTIONS = 3, - HLSQ_TP_STATE = 4, - HLSQ_QUADS = 5, - HLSQ_PIXELS = 6, - HLSQ_VERTICES = 7, - HLSQ_SP_VS_STAGE_DATA_BYTES = 13, - HLSQ_SP_FS_STAGE_DATA_BYTES = 14, - HLSQ_BUSY_CYCLES = 15, - HLSQ_STALL_CYCLES_SP_STATE = 16, - HLSQ_STALL_CYCLES_SP_VS_STAGE = 17, - HLSQ_STALL_CYCLES_SP_FS_STAGE = 18, - HLSQ_STALL_CYCLES_UCHE = 19, - HLSQ_RBBM_LOAD_CYCLES = 20, - HLSQ_DI_TO_VS_START_SP = 21, - HLSQ_DI_TO_FS_START_SP = 22, - HLSQ_VS_STAGE_START_TO_DONE_SP = 23, - HLSQ_FS_STAGE_START_TO_DONE_SP = 24, - HLSQ_SP_STATE_COPY_CYCLES_VS_STAGE = 25, - HLSQ_SP_STATE_COPY_CYCLES_FS_STAGE = 26, - HLSQ_UCHE_LATENCY_CYCLES = 27, - HLSQ_UCHE_LATENCY_COUNT = 28, - HLSQ_STARVE_CYCLES_VFD = 29, -}; - -enum a4xx_pc_perfcounter_select { - PC_VIS_STREAMS_LOADED = 0, - PC_VPC_PRIMITIVES = 2, - PC_DEAD_PRIM = 3, - PC_LIVE_PRIM = 4, - PC_DEAD_DRAWCALLS = 5, - PC_LIVE_DRAWCALLS = 6, - PC_VERTEX_MISSES = 7, - PC_STALL_CYCLES_VFD = 9, - PC_STALL_CYCLES_TSE = 10, - PC_STALL_CYCLES_UCHE = 11, - PC_WORKING_CYCLES = 12, - PC_IA_VERTICES = 13, - PC_GS_PRIMITIVES = 14, - PC_HS_INVOCATIONS = 15, - PC_DS_INVOCATIONS = 16, - PC_DS_PRIMITIVES = 17, - PC_STARVE_CYCLES_FOR_INDEX = 20, - PC_STARVE_CYCLES_FOR_TESS_FACTOR = 21, - PC_STARVE_CYCLES_FOR_VIZ_STREAM = 22, - PC_STALL_CYCLES_TESS = 23, - PC_STARVE_CYCLES_FOR_POSITION = 24, - PC_MODE0_DRAWCALL = 25, - PC_MODE1_DRAWCALL = 26, - PC_MODE2_DRAWCALL = 27, - PC_MODE3_DRAWCALL = 28, - PC_MODE4_DRAWCALL = 29, - PC_PREDICATED_DEAD_DRAWCALL = 30, - PC_STALL_CYCLES_BY_TSE_ONLY = 31, - PC_STALL_CYCLES_BY_VPC_ONLY = 32, - PC_VPC_POS_DATA_TRANSACTION = 33, - PC_BUSY_CYCLES = 34, - PC_STARVE_CYCLES_DI = 35, - PC_STALL_CYCLES_VPC = 36, - TESS_WORKING_CYCLES = 37, - TESS_NUM_CYCLES_SETUP_WORKING = 38, - TESS_NUM_CYCLES_PTGEN_WORKING = 39, - TESS_NUM_CYCLES_CONNGEN_WORKING = 40, - TESS_BUSY_CYCLES = 41, - TESS_STARVE_CYCLES_PC = 42, - TESS_STALL_CYCLES_PC = 43, -}; - -enum a4xx_pwr_perfcounter_select { - PWR_CORE_CLOCK_CYCLES = 0, - PWR_BUSY_CLOCK_CYCLES = 1, -}; - -enum a4xx_rb_perfcounter_select { - RB_BUSY_CYCLES = 0, - RB_BUSY_CYCLES_BINNING = 1, - RB_BUSY_CYCLES_RENDERING = 2, - RB_BUSY_CYCLES_RESOLVE = 3, - RB_STARVE_CYCLES_BY_SP = 4, - RB_STARVE_CYCLES_BY_RAS = 5, - RB_STARVE_CYCLES_BY_MARB = 6, - RB_STALL_CYCLES_BY_MARB = 7, - RB_STALL_CYCLES_BY_HLSQ = 8, - RB_RB_RB_MARB_DATA = 9, - RB_SP_RB_QUAD = 10, - RB_RAS_RB_Z_QUADS = 11, - RB_GMEM_CH0_READ = 12, - RB_GMEM_CH1_READ = 13, - RB_GMEM_CH0_WRITE = 14, - RB_GMEM_CH1_WRITE = 15, - RB_CP_CONTEXT_DONE = 16, - RB_CP_CACHE_FLUSH = 17, - RB_CP_ZPASS_DONE = 18, - RB_STALL_FIFO0_FULL = 19, - RB_STALL_FIFO1_FULL = 20, - RB_STALL_FIFO2_FULL = 21, - RB_STALL_FIFO3_FULL = 22, - RB_RB_HLSQ_TRANSACTIONS = 23, - RB_Z_READ = 24, - RB_Z_WRITE = 25, - RB_C_READ = 26, - RB_C_WRITE = 27, - RB_C_READ_LATENCY = 28, - RB_Z_READ_LATENCY = 29, - RB_STALL_BY_UCHE = 30, - RB_MARB_UCHE_TRANSACTIONS = 31, - RB_CACHE_STALL_MISS = 32, - RB_CACHE_STALL_FIFO_FULL = 33, - RB_8BIT_BLENDER_UNITS_ACTIVE = 34, - RB_16BIT_BLENDER_UNITS_ACTIVE = 35, - RB_SAMPLER_UNITS_ACTIVE = 36, - RB_TOTAL_PASS = 38, - RB_Z_PASS = 39, - RB_Z_FAIL = 40, - RB_S_FAIL = 41, - RB_POWER0 = 42, - RB_POWER1 = 43, - RB_POWER2 = 44, - RB_POWER3 = 45, - RB_POWER4 = 46, - RB_POWER5 = 47, - RB_POWER6 = 48, - RB_POWER7 = 49, -}; - -enum a4xx_rbbm_perfcounter_select { - RBBM_ALWAYS_ON = 0, - RBBM_VBIF_BUSY = 1, - RBBM_TSE_BUSY = 2, - RBBM_RAS_BUSY = 3, - RBBM_PC_DCALL_BUSY = 4, - RBBM_PC_VSD_BUSY = 5, - RBBM_VFD_BUSY = 6, - RBBM_VPC_BUSY = 7, - RBBM_UCHE_BUSY = 8, - RBBM_VSC_BUSY = 9, - RBBM_HLSQ_BUSY = 10, - RBBM_ANY_RB_BUSY = 11, - RBBM_ANY_TPL1_BUSY = 12, - RBBM_ANY_SP_BUSY = 13, - RBBM_ANY_MARB_BUSY = 14, - RBBM_ANY_ARB_BUSY = 15, - RBBM_AHB_STATUS_BUSY = 16, - RBBM_AHB_STATUS_STALLED = 17, - RBBM_AHB_STATUS_TXFR = 18, - RBBM_AHB_STATUS_TXFR_SPLIT = 19, - RBBM_AHB_STATUS_TXFR_ERROR = 20, - RBBM_AHB_STATUS_LONG_STALL = 21, - RBBM_STATUS_MASKED = 22, - RBBM_CP_BUSY_GFX_CORE_IDLE = 23, - RBBM_TESS_BUSY = 24, - RBBM_COM_BUSY = 25, - RBBM_DCOM_BUSY = 32, - RBBM_ANY_CCU_BUSY = 33, - RBBM_DPM_BUSY = 34, -}; - -enum a4xx_sp_perfcounter_select { - SP_LM_LOAD_INSTRUCTIONS = 0, - SP_LM_STORE_INSTRUCTIONS = 1, - SP_LM_ATOMICS = 2, - SP_GM_LOAD_INSTRUCTIONS = 3, - SP_GM_STORE_INSTRUCTIONS = 4, - SP_GM_ATOMICS = 5, - SP_VS_STAGE_TEX_INSTRUCTIONS = 6, - SP_VS_STAGE_CFLOW_INSTRUCTIONS = 7, - SP_VS_STAGE_EFU_INSTRUCTIONS = 8, - SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 9, - SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 10, - SP_FS_STAGE_TEX_INSTRUCTIONS = 11, - SP_FS_STAGE_CFLOW_INSTRUCTIONS = 12, - SP_FS_STAGE_EFU_INSTRUCTIONS = 13, - SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 14, - SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 15, - SP_VS_INSTRUCTIONS = 17, - SP_FS_INSTRUCTIONS = 18, - SP_ADDR_LOCK_COUNT = 19, - SP_UCHE_READ_TRANS = 20, - SP_UCHE_WRITE_TRANS = 21, - SP_EXPORT_VPC_TRANS = 22, - SP_EXPORT_RB_TRANS = 23, - SP_PIXELS_KILLED = 24, - SP_ICL1_REQUESTS = 25, - SP_ICL1_MISSES = 26, - SP_ICL0_REQUESTS = 27, - SP_ICL0_MISSES = 28, - SP_ALU_WORKING_CYCLES = 29, - SP_EFU_WORKING_CYCLES = 30, - SP_STALL_CYCLES_BY_VPC = 31, - SP_STALL_CYCLES_BY_TP = 32, - SP_STALL_CYCLES_BY_UCHE = 33, - SP_STALL_CYCLES_BY_RB = 34, - SP_BUSY_CYCLES = 35, - SP_HS_INSTRUCTIONS = 36, - SP_DS_INSTRUCTIONS = 37, - SP_GS_INSTRUCTIONS = 38, - SP_CS_INSTRUCTIONS = 39, - SP_SCHEDULER_NON_WORKING = 40, - SP_WAVE_CONTEXTS = 41, - SP_WAVE_CONTEXT_CYCLES = 42, - SP_POWER0 = 43, - SP_POWER1 = 44, - SP_POWER2 = 45, - SP_POWER3 = 46, - SP_POWER4 = 47, - SP_POWER5 = 48, - SP_POWER6 = 49, - SP_POWER7 = 50, - SP_POWER8 = 51, - SP_POWER9 = 52, - SP_POWER10 = 53, - SP_POWER11 = 54, - SP_POWER12 = 55, - SP_POWER13 = 56, - SP_POWER14 = 57, - SP_POWER15 = 58, -}; - -enum a4xx_tp_perfcounter_select { - TP_L1_REQUESTS = 0, - TP_L1_MISSES = 1, - TP_QUADS_OFFSET = 8, - TP_QUAD_SHADOW = 9, - TP_QUADS_ARRAY = 10, - TP_QUADS_GRADIENT = 11, - TP_QUADS_1D2D = 12, - TP_QUADS_3DCUBE = 13, - TP_BUSY_CYCLES = 16, - TP_STALL_CYCLES_BY_ARB = 17, - TP_STATE_CACHE_REQUESTS = 20, - TP_STATE_CACHE_MISSES = 21, - TP_POWER0 = 22, - TP_POWER1 = 23, - TP_POWER2 = 24, - TP_POWER3 = 25, - TP_POWER4 = 26, - TP_POWER5 = 27, - TP_POWER6 = 28, - TP_POWER7 = 29, -}; - -enum a4xx_uche_perfcounter_select { - UCHE_VBIF_READ_BEATS_TP = 0, - UCHE_VBIF_READ_BEATS_VFD = 1, - UCHE_VBIF_READ_BEATS_HLSQ = 2, - UCHE_VBIF_READ_BEATS_MARB = 3, - UCHE_VBIF_READ_BEATS_SP = 4, - UCHE_READ_REQUESTS_TP = 5, - UCHE_READ_REQUESTS_VFD = 6, - UCHE_READ_REQUESTS_HLSQ = 7, - UCHE_READ_REQUESTS_MARB = 8, - UCHE_READ_REQUESTS_SP = 9, - UCHE_WRITE_REQUESTS_MARB = 10, - UCHE_WRITE_REQUESTS_SP = 11, - UCHE_TAG_CHECK_FAILS = 12, - UCHE_EVICTS = 13, - UCHE_FLUSHES = 14, - UCHE_VBIF_LATENCY_CYCLES = 15, - UCHE_VBIF_LATENCY_SAMPLES = 16, - UCHE_BUSY_CYCLES = 17, - UCHE_VBIF_READ_BEATS_PC = 18, - UCHE_READ_REQUESTS_PC = 19, - UCHE_WRITE_REQUESTS_VPC = 20, - UCHE_STALL_BY_VBIF = 21, - UCHE_WRITE_REQUESTS_VSC = 22, - UCHE_POWER0 = 23, - UCHE_POWER1 = 24, - UCHE_POWER2 = 25, - UCHE_POWER3 = 26, - UCHE_POWER4 = 27, - UCHE_POWER5 = 28, - UCHE_POWER6 = 29, - UCHE_POWER7 = 30, -}; - -enum a4xx_vbif_perfcounter_select { - AXI_READ_REQUESTS_ID_0 = 0, - AXI_READ_REQUESTS_ID_1 = 1, - AXI_READ_REQUESTS_ID_2 = 2, - AXI_READ_REQUESTS_ID_3 = 3, - AXI_READ_REQUESTS_ID_4 = 4, - AXI_READ_REQUESTS_ID_5 = 5, - AXI_READ_REQUESTS_ID_6 = 6, - AXI_READ_REQUESTS_ID_7 = 7, - AXI_READ_REQUESTS_ID_8 = 8, - AXI_READ_REQUESTS_ID_9 = 9, - AXI_READ_REQUESTS_ID_10 = 10, - AXI_READ_REQUESTS_ID_11 = 11, - AXI_READ_REQUESTS_ID_12 = 12, - AXI_READ_REQUESTS_ID_13 = 13, - AXI_READ_REQUESTS_ID_14 = 14, - AXI_READ_REQUESTS_ID_15 = 15, - AXI0_READ_REQUESTS_TOTAL = 16, - AXI1_READ_REQUESTS_TOTAL = 17, - AXI2_READ_REQUESTS_TOTAL = 18, - AXI3_READ_REQUESTS_TOTAL = 19, - AXI_READ_REQUESTS_TOTAL = 20, - AXI_WRITE_REQUESTS_ID_0 = 21, - AXI_WRITE_REQUESTS_ID_1 = 22, - AXI_WRITE_REQUESTS_ID_2 = 23, - AXI_WRITE_REQUESTS_ID_3 = 24, - AXI_WRITE_REQUESTS_ID_4 = 25, - AXI_WRITE_REQUESTS_ID_5 = 26, - AXI_WRITE_REQUESTS_ID_6 = 27, - AXI_WRITE_REQUESTS_ID_7 = 28, - AXI_WRITE_REQUESTS_ID_8 = 29, - AXI_WRITE_REQUESTS_ID_9 = 30, - AXI_WRITE_REQUESTS_ID_10 = 31, - AXI_WRITE_REQUESTS_ID_11 = 32, - AXI_WRITE_REQUESTS_ID_12 = 33, - AXI_WRITE_REQUESTS_ID_13 = 34, - AXI_WRITE_REQUESTS_ID_14 = 35, - AXI_WRITE_REQUESTS_ID_15 = 36, - AXI0_WRITE_REQUESTS_TOTAL = 37, - AXI1_WRITE_REQUESTS_TOTAL = 38, - AXI2_WRITE_REQUESTS_TOTAL = 39, - AXI3_WRITE_REQUESTS_TOTAL = 40, - AXI_WRITE_REQUESTS_TOTAL = 41, - AXI_TOTAL_REQUESTS = 42, - AXI_READ_DATA_BEATS_ID_0 = 43, - AXI_READ_DATA_BEATS_ID_1 = 44, - AXI_READ_DATA_BEATS_ID_2 = 45, - AXI_READ_DATA_BEATS_ID_3 = 46, - AXI_READ_DATA_BEATS_ID_4 = 47, - AXI_READ_DATA_BEATS_ID_5 = 48, - AXI_READ_DATA_BEATS_ID_6 = 49, - AXI_READ_DATA_BEATS_ID_7 = 50, - AXI_READ_DATA_BEATS_ID_8 = 51, - AXI_READ_DATA_BEATS_ID_9 = 52, - AXI_READ_DATA_BEATS_ID_10 = 53, - AXI_READ_DATA_BEATS_ID_11 = 54, - AXI_READ_DATA_BEATS_ID_12 = 55, - AXI_READ_DATA_BEATS_ID_13 = 56, - AXI_READ_DATA_BEATS_ID_14 = 57, - AXI_READ_DATA_BEATS_ID_15 = 58, - AXI0_READ_DATA_BEATS_TOTAL = 59, - AXI1_READ_DATA_BEATS_TOTAL = 60, - AXI2_READ_DATA_BEATS_TOTAL = 61, - AXI3_READ_DATA_BEATS_TOTAL = 62, - AXI_READ_DATA_BEATS_TOTAL = 63, - AXI_WRITE_DATA_BEATS_ID_0 = 64, - AXI_WRITE_DATA_BEATS_ID_1 = 65, - AXI_WRITE_DATA_BEATS_ID_2 = 66, - AXI_WRITE_DATA_BEATS_ID_3 = 67, - AXI_WRITE_DATA_BEATS_ID_4 = 68, - AXI_WRITE_DATA_BEATS_ID_5 = 69, - AXI_WRITE_DATA_BEATS_ID_6 = 70, - AXI_WRITE_DATA_BEATS_ID_7 = 71, - AXI_WRITE_DATA_BEATS_ID_8 = 72, - AXI_WRITE_DATA_BEATS_ID_9 = 73, - AXI_WRITE_DATA_BEATS_ID_10 = 74, - AXI_WRITE_DATA_BEATS_ID_11 = 75, - AXI_WRITE_DATA_BEATS_ID_12 = 76, - AXI_WRITE_DATA_BEATS_ID_13 = 77, - AXI_WRITE_DATA_BEATS_ID_14 = 78, - AXI_WRITE_DATA_BEATS_ID_15 = 79, - AXI0_WRITE_DATA_BEATS_TOTAL = 80, - AXI1_WRITE_DATA_BEATS_TOTAL = 81, - AXI2_WRITE_DATA_BEATS_TOTAL = 82, - AXI3_WRITE_DATA_BEATS_TOTAL = 83, - AXI_WRITE_DATA_BEATS_TOTAL = 84, - AXI_DATA_BEATS_TOTAL = 85, - CYCLES_HELD_OFF_ID_0 = 86, - CYCLES_HELD_OFF_ID_1 = 87, - CYCLES_HELD_OFF_ID_2 = 88, - CYCLES_HELD_OFF_ID_3 = 89, - CYCLES_HELD_OFF_ID_4 = 90, - CYCLES_HELD_OFF_ID_5 = 91, - CYCLES_HELD_OFF_ID_6 = 92, - CYCLES_HELD_OFF_ID_7 = 93, - CYCLES_HELD_OFF_ID_8 = 94, - CYCLES_HELD_OFF_ID_9 = 95, - CYCLES_HELD_OFF_ID_10 = 96, - CYCLES_HELD_OFF_ID_11 = 97, - CYCLES_HELD_OFF_ID_12 = 98, - CYCLES_HELD_OFF_ID_13 = 99, - CYCLES_HELD_OFF_ID_14 = 100, - CYCLES_HELD_OFF_ID_15 = 101, - AXI_READ_REQUEST_HELD_OFF = 102, - AXI_WRITE_REQUEST_HELD_OFF = 103, - AXI_REQUEST_HELD_OFF = 104, - AXI_WRITE_DATA_HELD_OFF = 105, - OCMEM_AXI_READ_REQUEST_HELD_OFF = 106, - OCMEM_AXI_WRITE_REQUEST_HELD_OFF = 107, - OCMEM_AXI_REQUEST_HELD_OFF = 108, - OCMEM_AXI_WRITE_DATA_HELD_OFF = 109, - ELAPSED_CYCLES_DDR = 110, - ELAPSED_CYCLES_OCMEM = 111, -}; - -enum a4xx_vfd_perfcounter_select { - VFD_UCHE_BYTE_FETCHED = 0, - VFD_UCHE_TRANS = 1, - VFD_FETCH_INSTRUCTIONS = 3, - VFD_BUSY_CYCLES = 5, - VFD_STALL_CYCLES_UCHE = 6, - VFD_STALL_CYCLES_HLSQ = 7, - VFD_STALL_CYCLES_VPC_BYPASS = 8, - VFD_STALL_CYCLES_VPC_ALLOC = 9, - VFD_MODE_0_FIBERS = 13, - VFD_MODE_1_FIBERS = 14, - VFD_MODE_2_FIBERS = 15, - VFD_MODE_3_FIBERS = 16, - VFD_MODE_4_FIBERS = 17, - VFD_BFIFO_STALL = 18, - VFD_NUM_VERTICES_TOTAL = 19, - VFD_PACKER_FULL = 20, - VFD_UCHE_REQUEST_FIFO_FULL = 21, - VFD_STARVE_CYCLES_PC = 22, - VFD_STARVE_CYCLES_UCHE = 23, -}; - -enum a4xx_vpc_perfcounter_select { - VPC_SP_LM_COMPONENTS = 2, - VPC_SP0_LM_BYTES = 3, - VPC_SP1_LM_BYTES = 4, - VPC_SP2_LM_BYTES = 5, - VPC_SP3_LM_BYTES = 6, - VPC_WORKING_CYCLES = 7, - VPC_STALL_CYCLES_LM = 8, - VPC_STARVE_CYCLES_RAS = 9, - VPC_STREAMOUT_CYCLES = 10, - VPC_UCHE_TRANSACTIONS = 12, - VPC_STALL_CYCLES_UCHE = 13, - VPC_BUSY_CYCLES = 14, - VPC_STARVE_CYCLES_SP = 15, -}; - -enum a4xx_vsc_perfcounter_select { - VSC_BUSY_CYCLES = 0, - VSC_WORKING_CYCLES = 1, - VSC_STALL_CYCLES_UCHE = 2, - VSC_STARVE_CYCLES_RAS = 3, - VSC_EOT_NUM = 4, -}; - -enum a4xx_tex_filter { - A4XX_TEX_NEAREST = 0, - A4XX_TEX_LINEAR = 1, - A4XX_TEX_ANISO = 2, -}; - -enum a4xx_tex_clamp { - A4XX_TEX_REPEAT = 0, - A4XX_TEX_CLAMP_TO_EDGE = 1, - A4XX_TEX_MIRROR_REPEAT = 2, - A4XX_TEX_CLAMP_TO_BORDER = 3, - A4XX_TEX_MIRROR_CLAMP = 4, -}; - -enum a4xx_tex_aniso { - A4XX_TEX_ANISO_1 = 0, - A4XX_TEX_ANISO_2 = 1, - A4XX_TEX_ANISO_4 = 2, - A4XX_TEX_ANISO_8 = 3, - A4XX_TEX_ANISO_16 = 4, -}; - -enum a4xx_tex_swiz { - A4XX_TEX_X = 0, - A4XX_TEX_Y = 1, - A4XX_TEX_Z = 2, - A4XX_TEX_W = 3, - A4XX_TEX_ZERO = 4, - A4XX_TEX_ONE = 5, -}; - -enum a4xx_tex_type { - A4XX_TEX_1D = 0, - A4XX_TEX_2D = 1, - A4XX_TEX_CUBE = 2, - A4XX_TEX_3D = 3, - A4XX_TEX_BUFFER = 4, -}; - -#define A4XX_CGC_HLSQ_EARLY_CYC__MASK 0x00700000 -#define A4XX_CGC_HLSQ_EARLY_CYC__SHIFT 20 -static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val) -{ - return ((val) << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT) & A4XX_CGC_HLSQ_EARLY_CYC__MASK; -} - -#define A4XX_INT0_RBBM_GPU_IDLE 0x00000001 -#define A4XX_INT0_RBBM_AHB_ERROR 0x00000002 -#define A4XX_INT0_RBBM_REG_TIMEOUT 0x00000004 -#define A4XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008 -#define A4XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010 -#define A4XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020 -#define A4XX_INT0_VFD_ERROR 0x00000040 -#define A4XX_INT0_CP_SW_INT 0x00000080 -#define A4XX_INT0_CP_T0_PACKET_IN_IB 0x00000100 -#define A4XX_INT0_CP_OPCODE_ERROR 0x00000200 -#define A4XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400 -#define A4XX_INT0_CP_HW_FAULT 0x00000800 -#define A4XX_INT0_CP_DMA 0x00001000 -#define A4XX_INT0_CP_IB2_INT 0x00002000 -#define A4XX_INT0_CP_IB1_INT 0x00004000 -#define A4XX_INT0_CP_RB_INT 0x00008000 -#define A4XX_INT0_CP_REG_PROTECT_FAULT 0x00010000 -#define A4XX_INT0_CP_RB_DONE_TS 0x00020000 -#define A4XX_INT0_CP_VS_DONE_TS 0x00040000 -#define A4XX_INT0_CP_PS_DONE_TS 0x00080000 -#define A4XX_INT0_CACHE_FLUSH_TS 0x00100000 -#define A4XX_INT0_CP_AHB_ERROR_HALT 0x00200000 -#define A4XX_INT0_MISC_HANG_DETECT 0x01000000 -#define A4XX_INT0_UCHE_OOB_ACCESS 0x02000000 - -#define REG_A4XX_RB_GMEM_BASE_ADDR 0x00000cc0 - -#define REG_A4XX_RB_PERFCTR_RB_SEL_0 0x00000cc7 - -#define REG_A4XX_RB_PERFCTR_RB_SEL_1 0x00000cc8 - -#define REG_A4XX_RB_PERFCTR_RB_SEL_2 0x00000cc9 - -#define REG_A4XX_RB_PERFCTR_RB_SEL_3 0x00000cca - -#define REG_A4XX_RB_PERFCTR_RB_SEL_4 0x00000ccb - -#define REG_A4XX_RB_PERFCTR_RB_SEL_5 0x00000ccc - -#define REG_A4XX_RB_PERFCTR_RB_SEL_6 0x00000ccd - -#define REG_A4XX_RB_PERFCTR_RB_SEL_7 0x00000cce - -#define REG_A4XX_RB_PERFCTR_CCU_SEL_0 0x00000ccf - -#define REG_A4XX_RB_PERFCTR_CCU_SEL_1 0x00000cd0 - -#define REG_A4XX_RB_PERFCTR_CCU_SEL_2 0x00000cd1 - -#define REG_A4XX_RB_PERFCTR_CCU_SEL_3 0x00000cd2 - -#define REG_A4XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0 -#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff -#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0 -static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val) -{ - return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK; -} -#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x3fff0000 -#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 16 -static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val) -{ - return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK; -} - -#define REG_A4XX_RB_CLEAR_COLOR_DW0 0x000020cc - -#define REG_A4XX_RB_CLEAR_COLOR_DW1 0x000020cd - -#define REG_A4XX_RB_CLEAR_COLOR_DW2 0x000020ce - -#define REG_A4XX_RB_CLEAR_COLOR_DW3 0x000020cf - -#define REG_A4XX_RB_MODE_CONTROL 0x000020a0 -#define A4XX_RB_MODE_CONTROL_WIDTH__MASK 0x0000003f -#define A4XX_RB_MODE_CONTROL_WIDTH__SHIFT 0 -static inline uint32_t A4XX_RB_MODE_CONTROL_WIDTH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A4XX_RB_MODE_CONTROL_WIDTH__SHIFT) & A4XX_RB_MODE_CONTROL_WIDTH__MASK; -} -#define A4XX_RB_MODE_CONTROL_HEIGHT__MASK 0x00003f00 -#define A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT 8 -static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK; -} -#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM 0x00010000 - -#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1 -#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001 -#define A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00000020 - -#define REG_A4XX_RB_MSAA_CONTROL 0x000020a2 -#define A4XX_RB_MSAA_CONTROL_DISABLE 0x00001000 -#define A4XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000e000 -#define A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 13 -static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val) -{ - return ((val) << A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL_SAMPLES__MASK; -} - -#define REG_A4XX_RB_RENDER_CONTROL2 0x000020a3 -#define A4XX_RB_RENDER_CONTROL2_COORD_MASK__MASK 0x0000000f -#define A4XX_RB_RENDER_CONTROL2_COORD_MASK__SHIFT 0 -static inline uint32_t A4XX_RB_RENDER_CONTROL2_COORD_MASK(uint32_t val) -{ - return ((val) << A4XX_RB_RENDER_CONTROL2_COORD_MASK__SHIFT) & A4XX_RB_RENDER_CONTROL2_COORD_MASK__MASK; -} -#define A4XX_RB_RENDER_CONTROL2_SAMPLEMASK 0x00000010 -#define A4XX_RB_RENDER_CONTROL2_FACENESS 0x00000020 -#define A4XX_RB_RENDER_CONTROL2_SAMPLEID 0x00000040 -#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK 0x00000380 -#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT 7 -static inline uint32_t A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES(uint32_t val) -{ - return ((val) << A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK; -} -#define A4XX_RB_RENDER_CONTROL2_SAMPLEID_HR 0x00000800 -#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_PIXEL 0x00001000 -#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_CENTROID 0x00002000 -#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_SAMPLE 0x00004000 -#define A4XX_RB_RENDER_CONTROL2_SIZE 0x00008000 - -#define REG_A4XX_RB_MRT(i0) (0x000020a4 + 0x5*(i0)) - -static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4 + 0x5*i0; } -#define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008 -#define A4XX_RB_MRT_CONTROL_BLEND 0x00000010 -#define A4XX_RB_MRT_CONTROL_BLEND2 0x00000020 -#define A4XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000040 -#define A4XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00 -#define A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8 -static inline uint32_t A4XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val) -{ - return ((val) << A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A4XX_RB_MRT_CONTROL_ROP_CODE__MASK; -} -#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000 -#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24 -static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val) -{ - return ((val) << A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK; -} - -static inline uint32_t REG_A4XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020a5 + 0x5*i0; } -#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f -#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0 -static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a4xx_color_fmt val) -{ - return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK; -} -#define A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0 -#define A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6 -static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a4xx_tile_mode val) -{ - return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK; -} -#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00000600 -#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 9 -static inline uint32_t A4XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val) -{ - return ((val) << A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK; -} -#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00001800 -#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 11 -static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; -} -#define A4XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00002000 -#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xffffc000 -#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14 -static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val) -{ - assert(!(val & 0xf)); - return (((val >> 4)) << A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK; -} - -static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; } - -static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; } -#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x03fffff8 -#define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT 3 -static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val) -{ - return ((val) << A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT) & A4XX_RB_MRT_CONTROL3_STRIDE__MASK; -} - -static inline uint32_t REG_A4XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020a8 + 0x5*i0; } -#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f -#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0 -static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK; -} -#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0 -#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5 -static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) -{ - return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK; -} -#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00 -#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8 -static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK; -} -#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000 -#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16 -static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK; -} -#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000 -#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21 -static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) -{ - return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK; -} -#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000 -#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24 -static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK; -} - -#define REG_A4XX_RB_BLEND_RED 0x000020f0 -#define A4XX_RB_BLEND_RED_UINT__MASK 0x000000ff -#define A4XX_RB_BLEND_RED_UINT__SHIFT 0 -static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val) -{ - return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK; -} -#define A4XX_RB_BLEND_RED_SINT__MASK 0x0000ff00 -#define A4XX_RB_BLEND_RED_SINT__SHIFT 8 -static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val) -{ - return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK; -} -#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000 -#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16 -static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val) -{ - return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK; -} - -#define REG_A4XX_RB_BLEND_RED_F32 0x000020f1 -#define A4XX_RB_BLEND_RED_F32__MASK 0xffffffff -#define A4XX_RB_BLEND_RED_F32__SHIFT 0 -static inline uint32_t A4XX_RB_BLEND_RED_F32(float val) -{ - return ((fui(val)) << A4XX_RB_BLEND_RED_F32__SHIFT) & A4XX_RB_BLEND_RED_F32__MASK; -} - -#define REG_A4XX_RB_BLEND_GREEN 0x000020f2 -#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff -#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0 -static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val) -{ - return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK; -} -#define A4XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00 -#define A4XX_RB_BLEND_GREEN_SINT__SHIFT 8 -static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val) -{ - return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK; -} -#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000 -#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16 -static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val) -{ - return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK; -} - -#define REG_A4XX_RB_BLEND_GREEN_F32 0x000020f3 -#define A4XX_RB_BLEND_GREEN_F32__MASK 0xffffffff -#define A4XX_RB_BLEND_GREEN_F32__SHIFT 0 -static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val) -{ - return ((fui(val)) << A4XX_RB_BLEND_GREEN_F32__SHIFT) & A4XX_RB_BLEND_GREEN_F32__MASK; -} - -#define REG_A4XX_RB_BLEND_BLUE 0x000020f4 -#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff -#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0 -static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val) -{ - return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK; -} -#define A4XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00 -#define A4XX_RB_BLEND_BLUE_SINT__SHIFT 8 -static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val) -{ - return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK; -} -#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000 -#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16 -static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val) -{ - return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK; -} - -#define REG_A4XX_RB_BLEND_BLUE_F32 0x000020f5 -#define A4XX_RB_BLEND_BLUE_F32__MASK 0xffffffff -#define A4XX_RB_BLEND_BLUE_F32__SHIFT 0 -static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val) -{ - return ((fui(val)) << A4XX_RB_BLEND_BLUE_F32__SHIFT) & A4XX_RB_BLEND_BLUE_F32__MASK; -} - -#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6 -#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff -#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0 -static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val) -{ - return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK; -} -#define A4XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00 -#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT 8 -static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val) -{ - return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK; -} -#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000 -#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16 -static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val) -{ - return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK; -} - -#define REG_A4XX_RB_BLEND_ALPHA_F32 0x000020f7 -#define A4XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff -#define A4XX_RB_BLEND_ALPHA_F32__SHIFT 0 -static inline uint32_t A4XX_RB_BLEND_ALPHA_F32(float val) -{ - return ((fui(val)) << A4XX_RB_BLEND_ALPHA_F32__SHIFT) & A4XX_RB_BLEND_ALPHA_F32__MASK; -} - -#define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8 -#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff -#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0 -static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val) -{ - return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK; -} -#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100 -#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00 -#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9 -static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val) -{ - return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK; -} - -#define REG_A4XX_RB_FS_OUTPUT 0x000020f9 -#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK 0x000000ff -#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT 0 -static inline uint32_t A4XX_RB_FS_OUTPUT_ENABLE_BLEND(uint32_t val) -{ - return ((val) << A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT) & A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK; -} -#define A4XX_RB_FS_OUTPUT_INDEPENDENT_BLEND 0x00000100 -#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000 -#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16 -static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val) -{ - return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK; -} - -#define REG_A4XX_RB_SAMPLE_COUNT_CONTROL 0x000020fa -#define A4XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002 -#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK 0xfffffffc -#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT 2 -static inline uint32_t A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT) & A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK; -} - -#define REG_A4XX_RB_RENDER_COMPONENTS 0x000020fb -#define A4XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f -#define A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0 -static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT0(uint32_t val) -{ - return ((val) << A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT0__MASK; -} -#define A4XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0 -#define A4XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4 -static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT1(uint32_t val) -{ - return ((val) << A4XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT1__MASK; -} -#define A4XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00 -#define A4XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8 -static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT2(uint32_t val) -{ - return ((val) << A4XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT2__MASK; -} -#define A4XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000 -#define A4XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12 -static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT3(uint32_t val) -{ - return ((val) << A4XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT3__MASK; -} -#define A4XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000 -#define A4XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16 -static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT4(uint32_t val) -{ - return ((val) << A4XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT4__MASK; -} -#define A4XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000 -#define A4XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20 -static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT5(uint32_t val) -{ - return ((val) << A4XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT5__MASK; -} -#define A4XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000 -#define A4XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24 -static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT6(uint32_t val) -{ - return ((val) << A4XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT6__MASK; -} -#define A4XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000 -#define A4XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28 -static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT7(uint32_t val) -{ - return ((val) << A4XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT7__MASK; -} - -#define REG_A4XX_RB_COPY_CONTROL 0x000020fc -#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003 -#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0 -static inline uint32_t A4XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val) -{ - return ((val) << A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK; -} -#define A4XX_RB_COPY_CONTROL_MODE__MASK 0x00000070 -#define A4XX_RB_COPY_CONTROL_MODE__SHIFT 4 -static inline uint32_t A4XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val) -{ - return ((val) << A4XX_RB_COPY_CONTROL_MODE__SHIFT) & A4XX_RB_COPY_CONTROL_MODE__MASK; -} -#define A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00 -#define A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8 -static inline uint32_t A4XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val) -{ - return ((val) << A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK; -} -#define A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000 -#define A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14 -static inline uint32_t A4XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val) -{ - assert(!(val & 0x3fff)); - return (((val >> 14)) << A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK; -} - -#define REG_A4XX_RB_COPY_DEST_BASE 0x000020fd -#define A4XX_RB_COPY_DEST_BASE_BASE__MASK 0xffffffe0 -#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 5 -static inline uint32_t A4XX_RB_COPY_DEST_BASE_BASE(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK; -} - -#define REG_A4XX_RB_COPY_DEST_PITCH 0x000020fe -#define A4XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff -#define A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0 -static inline uint32_t A4XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A4XX_RB_COPY_DEST_PITCH_PITCH__MASK; -} - -#define REG_A4XX_RB_COPY_DEST_INFO 0x000020ff -#define A4XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc -#define A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2 -static inline uint32_t A4XX_RB_COPY_DEST_INFO_FORMAT(enum a4xx_color_fmt val) -{ - return ((val) << A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A4XX_RB_COPY_DEST_INFO_FORMAT__MASK; -} -#define A4XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300 -#define A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8 -static inline uint32_t A4XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A4XX_RB_COPY_DEST_INFO_SWAP__MASK; -} -#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00 -#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10 -static inline uint32_t A4XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val) -{ - return ((val) << A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK; -} -#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000 -#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14 -static inline uint32_t A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val) -{ - return ((val) << A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK; -} -#define A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000 -#define A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18 -static inline uint32_t A4XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val) -{ - return ((val) << A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK; -} -#define A4XX_RB_COPY_DEST_INFO_TILE__MASK 0x03000000 -#define A4XX_RB_COPY_DEST_INFO_TILE__SHIFT 24 -static inline uint32_t A4XX_RB_COPY_DEST_INFO_TILE(enum a4xx_tile_mode val) -{ - return ((val) << A4XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A4XX_RB_COPY_DEST_INFO_TILE__MASK; -} - -#define REG_A4XX_RB_FS_OUTPUT_REG 0x00002100 -#define A4XX_RB_FS_OUTPUT_REG_MRT__MASK 0x0000000f -#define A4XX_RB_FS_OUTPUT_REG_MRT__SHIFT 0 -static inline uint32_t A4XX_RB_FS_OUTPUT_REG_MRT(uint32_t val) -{ - return ((val) << A4XX_RB_FS_OUTPUT_REG_MRT__SHIFT) & A4XX_RB_FS_OUTPUT_REG_MRT__MASK; -} -#define A4XX_RB_FS_OUTPUT_REG_FRAG_WRITES_Z 0x00000020 - -#define REG_A4XX_RB_DEPTH_CONTROL 0x00002101 -#define A4XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001 -#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x00000002 -#define A4XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004 -#define A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070 -#define A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4 -static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val) -{ - return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK; -} -#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080 -#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000 -#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000 -#define A4XX_RB_DEPTH_CONTROL_Z_READ_ENABLE 0x80000000 - -#define REG_A4XX_RB_DEPTH_CLEAR 0x00002102 - -#define REG_A4XX_RB_DEPTH_INFO 0x00002103 -#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003 -#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0 -static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum a4xx_depth_format val) -{ - return ((val) << A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK; -} -#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000 -#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12 -static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK; -} - -#define REG_A4XX_RB_DEPTH_PITCH 0x00002104 -#define A4XX_RB_DEPTH_PITCH__MASK 0xffffffff -#define A4XX_RB_DEPTH_PITCH__SHIFT 0 -static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK; -} - -#define REG_A4XX_RB_DEPTH_PITCH2 0x00002105 -#define A4XX_RB_DEPTH_PITCH2__MASK 0xffffffff -#define A4XX_RB_DEPTH_PITCH2__SHIFT 0 -static inline uint32_t A4XX_RB_DEPTH_PITCH2(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK; -} - -#define REG_A4XX_RB_STENCIL_CONTROL 0x00002106 -#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001 -#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002 -#define A4XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004 -#define A4XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700 -#define A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8 -static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val) -{ - return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC__MASK; -} -#define A4XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800 -#define A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11 -static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val) -{ - return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL__MASK; -} -#define A4XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000 -#define A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14 -static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val) -{ - return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS__MASK; -} -#define A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000 -#define A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17 -static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val) -{ - return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK; -} -#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000 -#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20 -static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val) -{ - return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK; -} -#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000 -#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23 -static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val) -{ - return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK; -} -#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000 -#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26 -static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val) -{ - return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK; -} -#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000 -#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29 -static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val) -{ - return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK; -} - -#define REG_A4XX_RB_STENCIL_CONTROL2 0x00002107 -#define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER 0x00000001 - -#define REG_A4XX_RB_STENCIL_INFO 0x00002108 -#define A4XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001 -#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff000 -#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 12 -static inline uint32_t A4XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK; -} - -#define REG_A4XX_RB_STENCIL_PITCH 0x00002109 -#define A4XX_RB_STENCIL_PITCH__MASK 0xffffffff -#define A4XX_RB_STENCIL_PITCH__SHIFT 0 -static inline uint32_t A4XX_RB_STENCIL_PITCH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A4XX_RB_STENCIL_PITCH__SHIFT) & A4XX_RB_STENCIL_PITCH__MASK; -} - -#define REG_A4XX_RB_STENCILREFMASK 0x0000210b -#define A4XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff -#define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0 -static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILREF(uint32_t val) -{ - return ((val) << A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILREF__MASK; -} -#define A4XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00 -#define A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8 -static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val) -{ - return ((val) << A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILMASK__MASK; -} -#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000 -#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16 -static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val) -{ - return ((val) << A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK; -} - -#define REG_A4XX_RB_STENCILREFMASK_BF 0x0000210c -#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff -#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0 -static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val) -{ - return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK; -} -#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00 -#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8 -static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val) -{ - return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK; -} -#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000 -#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16 -static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val) -{ - return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK; -} - -#define REG_A4XX_RB_BIN_OFFSET 0x0000210d -#define A4XX_RB_BIN_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000 -#define A4XX_RB_BIN_OFFSET_X__MASK 0x00007fff -#define A4XX_RB_BIN_OFFSET_X__SHIFT 0 -static inline uint32_t A4XX_RB_BIN_OFFSET_X(uint32_t val) -{ - return ((val) << A4XX_RB_BIN_OFFSET_X__SHIFT) & A4XX_RB_BIN_OFFSET_X__MASK; -} -#define A4XX_RB_BIN_OFFSET_Y__MASK 0x7fff0000 -#define A4XX_RB_BIN_OFFSET_Y__SHIFT 16 -static inline uint32_t A4XX_RB_BIN_OFFSET_Y(uint32_t val) -{ - return ((val) << A4XX_RB_BIN_OFFSET_Y__SHIFT) & A4XX_RB_BIN_OFFSET_Y__MASK; -} - -#define REG_A4XX_RB_VPORT_Z_CLAMP(i0) (0x00002120 + 0x2*(i0)) - -static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MIN(uint32_t i0) { return 0x00002120 + 0x2*i0; } - -static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MAX(uint32_t i0) { return 0x00002121 + 0x2*i0; } - -#define REG_A4XX_RBBM_HW_VERSION 0x00000000 - -#define REG_A4XX_RBBM_HW_CONFIGURATION 0x00000002 - -#define REG_A4XX_RBBM_CLOCK_CTL_TP(i0) (0x00000004 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP_REG(uint32_t i0) { return 0x00000004 + 0x1*i0; } - -#define REG_A4XX_RBBM_CLOCK_CTL2_TP(i0) (0x00000008 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP_REG(uint32_t i0) { return 0x00000008 + 0x1*i0; } - -#define REG_A4XX_RBBM_CLOCK_HYST_TP(i0) (0x0000000c + 0x1*(i0)) - -static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP_REG(uint32_t i0) { return 0x0000000c + 0x1*i0; } - -#define REG_A4XX_RBBM_CLOCK_DELAY_TP(i0) (0x00000010 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x00000010 + 0x1*i0; } - -#define REG_A4XX_RBBM_CLOCK_CTL_UCHE 0x00000014 - -#define REG_A4XX_RBBM_CLOCK_CTL2_UCHE 0x00000015 - -#define REG_A4XX_RBBM_CLOCK_CTL3_UCHE 0x00000016 - -#define REG_A4XX_RBBM_CLOCK_CTL4_UCHE 0x00000017 - -#define REG_A4XX_RBBM_CLOCK_HYST_UCHE 0x00000018 - -#define REG_A4XX_RBBM_CLOCK_DELAY_UCHE 0x00000019 - -#define REG_A4XX_RBBM_CLOCK_MODE_GPC 0x0000001a - -#define REG_A4XX_RBBM_CLOCK_DELAY_GPC 0x0000001b - -#define REG_A4XX_RBBM_CLOCK_HYST_GPC 0x0000001c - -#define REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM 0x0000001d - -#define REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000001e - -#define REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x0000001f - -#define REG_A4XX_RBBM_CLOCK_CTL 0x00000020 - -#define REG_A4XX_RBBM_SP_HYST_CNT 0x00000021 - -#define REG_A4XX_RBBM_SW_RESET_CMD 0x00000022 - -#define REG_A4XX_RBBM_AHB_CTL0 0x00000023 - -#define REG_A4XX_RBBM_AHB_CTL1 0x00000024 - -#define REG_A4XX_RBBM_AHB_CMD 0x00000025 - -#define REG_A4XX_RBBM_RB_SUB_BLOCK_SEL_CTL 0x00000026 - -#define REG_A4XX_RBBM_RAM_ACC_63_32 0x00000028 - -#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x0000002b - -#define REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL 0x0000002f - -#define REG_A4XX_RBBM_INTERFACE_HANG_MASK_CTL4 0x00000034 - -#define REG_A4XX_RBBM_INT_CLEAR_CMD 0x00000036 - -#define REG_A4XX_RBBM_INT_0_MASK 0x00000037 - -#define REG_A4XX_RBBM_RBBM_CTL 0x0000003e - -#define REG_A4XX_RBBM_AHB_DEBUG_CTL 0x0000003f - -#define REG_A4XX_RBBM_VBIF_DEBUG_CTL 0x00000041 - -#define REG_A4XX_RBBM_CLOCK_CTL2 0x00000042 - -#define REG_A4XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045 - -#define REG_A4XX_RBBM_RESET_CYCLES 0x00000047 - -#define REG_A4XX_RBBM_EXT_TRACE_BUS_CTL 0x00000049 - -#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_A 0x0000004a - -#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_B 0x0000004b - -#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_C 0x0000004c - -#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D 0x0000004d - -#define REG_A4XX_RBBM_POWER_CNTL_IP 0x00000098 -#define A4XX_RBBM_POWER_CNTL_IP_SW_COLLAPSE 0x00000001 -#define A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON 0x00100000 - -#define REG_A4XX_RBBM_PERFCTR_CP_0_LO 0x0000009c - -#define REG_A4XX_RBBM_PERFCTR_CP_0_HI 0x0000009d - -#define REG_A4XX_RBBM_PERFCTR_CP_1_LO 0x0000009e - -#define REG_A4XX_RBBM_PERFCTR_CP_1_HI 0x0000009f - -#define REG_A4XX_RBBM_PERFCTR_CP_2_LO 0x000000a0 - -#define REG_A4XX_RBBM_PERFCTR_CP_2_HI 0x000000a1 - -#define REG_A4XX_RBBM_PERFCTR_CP_3_LO 0x000000a2 - -#define REG_A4XX_RBBM_PERFCTR_CP_3_HI 0x000000a3 - -#define REG_A4XX_RBBM_PERFCTR_CP_4_LO 0x000000a4 - -#define REG_A4XX_RBBM_PERFCTR_CP_4_HI 0x000000a5 - -#define REG_A4XX_RBBM_PERFCTR_CP_5_LO 0x000000a6 - -#define REG_A4XX_RBBM_PERFCTR_CP_5_HI 0x000000a7 - -#define REG_A4XX_RBBM_PERFCTR_CP_6_LO 0x000000a8 - -#define REG_A4XX_RBBM_PERFCTR_CP_6_HI 0x000000a9 - -#define REG_A4XX_RBBM_PERFCTR_CP_7_LO 0x000000aa - -#define REG_A4XX_RBBM_PERFCTR_CP_7_HI 0x000000ab - -#define REG_A4XX_RBBM_PERFCTR_RBBM_0_LO 0x000000ac - -#define REG_A4XX_RBBM_PERFCTR_RBBM_0_HI 0x000000ad - -#define REG_A4XX_RBBM_PERFCTR_RBBM_1_LO 0x000000ae - -#define REG_A4XX_RBBM_PERFCTR_RBBM_1_HI 0x000000af - -#define REG_A4XX_RBBM_PERFCTR_RBBM_2_LO 0x000000b0 - -#define REG_A4XX_RBBM_PERFCTR_RBBM_2_HI 0x000000b1 - -#define REG_A4XX_RBBM_PERFCTR_RBBM_3_LO 0x000000b2 - -#define REG_A4XX_RBBM_PERFCTR_RBBM_3_HI 0x000000b3 - -#define REG_A4XX_RBBM_PERFCTR_PC_0_LO 0x000000b4 - -#define REG_A4XX_RBBM_PERFCTR_PC_0_HI 0x000000b5 - -#define REG_A4XX_RBBM_PERFCTR_PC_1_LO 0x000000b6 - -#define REG_A4XX_RBBM_PERFCTR_PC_1_HI 0x000000b7 - -#define REG_A4XX_RBBM_PERFCTR_PC_2_LO 0x000000b8 - -#define REG_A4XX_RBBM_PERFCTR_PC_2_HI 0x000000b9 - -#define REG_A4XX_RBBM_PERFCTR_PC_3_LO 0x000000ba - -#define REG_A4XX_RBBM_PERFCTR_PC_3_HI 0x000000bb - -#define REG_A4XX_RBBM_PERFCTR_PC_4_LO 0x000000bc - -#define REG_A4XX_RBBM_PERFCTR_PC_4_HI 0x000000bd - -#define REG_A4XX_RBBM_PERFCTR_PC_5_LO 0x000000be - -#define REG_A4XX_RBBM_PERFCTR_PC_5_HI 0x000000bf - -#define REG_A4XX_RBBM_PERFCTR_PC_6_LO 0x000000c0 - -#define REG_A4XX_RBBM_PERFCTR_PC_6_HI 0x000000c1 - -#define REG_A4XX_RBBM_PERFCTR_PC_7_LO 0x000000c2 - -#define REG_A4XX_RBBM_PERFCTR_PC_7_HI 0x000000c3 - -#define REG_A4XX_RBBM_PERFCTR_VFD_0_LO 0x000000c4 - -#define REG_A4XX_RBBM_PERFCTR_VFD_0_HI 0x000000c5 - -#define REG_A4XX_RBBM_PERFCTR_VFD_1_LO 0x000000c6 - -#define REG_A4XX_RBBM_PERFCTR_VFD_1_HI 0x000000c7 - -#define REG_A4XX_RBBM_PERFCTR_VFD_2_LO 0x000000c8 - -#define REG_A4XX_RBBM_PERFCTR_VFD_2_HI 0x000000c9 - -#define REG_A4XX_RBBM_PERFCTR_VFD_3_LO 0x000000ca - -#define REG_A4XX_RBBM_PERFCTR_VFD_3_HI 0x000000cb - -#define REG_A4XX_RBBM_PERFCTR_VFD_4_LO 0x000000cc - -#define REG_A4XX_RBBM_PERFCTR_VFD_4_HI 0x000000cd - -#define REG_A4XX_RBBM_PERFCTR_VFD_5_LO 0x000000ce - -#define REG_A4XX_RBBM_PERFCTR_VFD_5_HI 0x000000cf - -#define REG_A4XX_RBBM_PERFCTR_VFD_6_LO 0x000000d0 - -#define REG_A4XX_RBBM_PERFCTR_VFD_6_HI 0x000000d1 - -#define REG_A4XX_RBBM_PERFCTR_VFD_7_LO 0x000000d2 - -#define REG_A4XX_RBBM_PERFCTR_VFD_7_HI 0x000000d3 - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000d4 - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000d5 - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000d6 - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000d7 - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000d8 - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000d9 - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000da - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000db - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000dc - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000dd - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000de - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000df - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_LO 0x000000e0 - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_HI 0x000000e1 - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_LO 0x000000e2 - -#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_HI 0x000000e3 - -#define REG_A4XX_RBBM_PERFCTR_VPC_0_LO 0x000000e4 - -#define REG_A4XX_RBBM_PERFCTR_VPC_0_HI 0x000000e5 - -#define REG_A4XX_RBBM_PERFCTR_VPC_1_LO 0x000000e6 - -#define REG_A4XX_RBBM_PERFCTR_VPC_1_HI 0x000000e7 - -#define REG_A4XX_RBBM_PERFCTR_VPC_2_LO 0x000000e8 - -#define REG_A4XX_RBBM_PERFCTR_VPC_2_HI 0x000000e9 - -#define REG_A4XX_RBBM_PERFCTR_VPC_3_LO 0x000000ea - -#define REG_A4XX_RBBM_PERFCTR_VPC_3_HI 0x000000eb - -#define REG_A4XX_RBBM_PERFCTR_CCU_0_LO 0x000000ec - -#define REG_A4XX_RBBM_PERFCTR_CCU_0_HI 0x000000ed - -#define REG_A4XX_RBBM_PERFCTR_CCU_1_LO 0x000000ee - -#define REG_A4XX_RBBM_PERFCTR_CCU_1_HI 0x000000ef - -#define REG_A4XX_RBBM_PERFCTR_CCU_2_LO 0x000000f0 - -#define REG_A4XX_RBBM_PERFCTR_CCU_2_HI 0x000000f1 - -#define REG_A4XX_RBBM_PERFCTR_CCU_3_LO 0x000000f2 - -#define REG_A4XX_RBBM_PERFCTR_CCU_3_HI 0x000000f3 - -#define REG_A4XX_RBBM_PERFCTR_TSE_0_LO 0x000000f4 - -#define REG_A4XX_RBBM_PERFCTR_TSE_0_HI 0x000000f5 - -#define REG_A4XX_RBBM_PERFCTR_TSE_1_LO 0x000000f6 - -#define REG_A4XX_RBBM_PERFCTR_TSE_1_HI 0x000000f7 - -#define REG_A4XX_RBBM_PERFCTR_TSE_2_LO 0x000000f8 - -#define REG_A4XX_RBBM_PERFCTR_TSE_2_HI 0x000000f9 - -#define REG_A4XX_RBBM_PERFCTR_TSE_3_LO 0x000000fa - -#define REG_A4XX_RBBM_PERFCTR_TSE_3_HI 0x000000fb - -#define REG_A4XX_RBBM_PERFCTR_RAS_0_LO 0x000000fc - -#define REG_A4XX_RBBM_PERFCTR_RAS_0_HI 0x000000fd - -#define REG_A4XX_RBBM_PERFCTR_RAS_1_LO 0x000000fe - -#define REG_A4XX_RBBM_PERFCTR_RAS_1_HI 0x000000ff - -#define REG_A4XX_RBBM_PERFCTR_RAS_2_LO 0x00000100 - -#define REG_A4XX_RBBM_PERFCTR_RAS_2_HI 0x00000101 - -#define REG_A4XX_RBBM_PERFCTR_RAS_3_LO 0x00000102 - -#define REG_A4XX_RBBM_PERFCTR_RAS_3_HI 0x00000103 - -#define REG_A4XX_RBBM_PERFCTR_UCHE_0_LO 0x00000104 - -#define REG_A4XX_RBBM_PERFCTR_UCHE_0_HI 0x00000105 - -#define REG_A4XX_RBBM_PERFCTR_UCHE_1_LO 0x00000106 - -#define REG_A4XX_RBBM_PERFCTR_UCHE_1_HI 0x00000107 - -#define REG_A4XX_RBBM_PERFCTR_UCHE_2_LO 0x00000108 - -#define REG_A4XX_RBBM_PERFCTR_UCHE_2_HI 0x00000109 - -#define REG_A4XX_RBBM_PERFCTR_UCHE_3_LO 0x0000010a - -#define REG_A4XX_RBBM_PERFCTR_UCHE_3_HI 0x0000010b - -#define REG_A4XX_RBBM_PERFCTR_UCHE_4_LO 0x0000010c - -#define REG_A4XX_RBBM_PERFCTR_UCHE_4_HI 0x0000010d - -#define REG_A4XX_RBBM_PERFCTR_UCHE_5_LO 0x0000010e - -#define REG_A4XX_RBBM_PERFCTR_UCHE_5_HI 0x0000010f - -#define REG_A4XX_RBBM_PERFCTR_UCHE_6_LO 0x00000110 - -#define REG_A4XX_RBBM_PERFCTR_UCHE_6_HI 0x00000111 - -#define REG_A4XX_RBBM_PERFCTR_UCHE_7_LO 0x00000112 - -#define REG_A4XX_RBBM_PERFCTR_UCHE_7_HI 0x00000113 - -#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114 - -#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115 - -#define REG_A4XX_RBBM_PERFCTR_TP_1_LO 0x00000116 - -#define REG_A4XX_RBBM_PERFCTR_TP_1_HI 0x00000117 - -#define REG_A4XX_RBBM_PERFCTR_TP_2_LO 0x00000118 - -#define REG_A4XX_RBBM_PERFCTR_TP_2_HI 0x00000119 - -#define REG_A4XX_RBBM_PERFCTR_TP_3_LO 0x0000011a - -#define REG_A4XX_RBBM_PERFCTR_TP_3_HI 0x0000011b - -#define REG_A4XX_RBBM_PERFCTR_TP_4_LO 0x0000011c - -#define REG_A4XX_RBBM_PERFCTR_TP_4_HI 0x0000011d - -#define REG_A4XX_RBBM_PERFCTR_TP_5_LO 0x0000011e - -#define REG_A4XX_RBBM_PERFCTR_TP_5_HI 0x0000011f - -#define REG_A4XX_RBBM_PERFCTR_TP_6_LO 0x00000120 - -#define REG_A4XX_RBBM_PERFCTR_TP_6_HI 0x00000121 - -#define REG_A4XX_RBBM_PERFCTR_TP_7_LO 0x00000122 - -#define REG_A4XX_RBBM_PERFCTR_TP_7_HI 0x00000123 - -#define REG_A4XX_RBBM_PERFCTR_SP_0_LO 0x00000124 - -#define REG_A4XX_RBBM_PERFCTR_SP_0_HI 0x00000125 - -#define REG_A4XX_RBBM_PERFCTR_SP_1_LO 0x00000126 - -#define REG_A4XX_RBBM_PERFCTR_SP_1_HI 0x00000127 - -#define REG_A4XX_RBBM_PERFCTR_SP_2_LO 0x00000128 - -#define REG_A4XX_RBBM_PERFCTR_SP_2_HI 0x00000129 - -#define REG_A4XX_RBBM_PERFCTR_SP_3_LO 0x0000012a - -#define REG_A4XX_RBBM_PERFCTR_SP_3_HI 0x0000012b - -#define REG_A4XX_RBBM_PERFCTR_SP_4_LO 0x0000012c - -#define REG_A4XX_RBBM_PERFCTR_SP_4_HI 0x0000012d - -#define REG_A4XX_RBBM_PERFCTR_SP_5_LO 0x0000012e - -#define REG_A4XX_RBBM_PERFCTR_SP_5_HI 0x0000012f - -#define REG_A4XX_RBBM_PERFCTR_SP_6_LO 0x00000130 - -#define REG_A4XX_RBBM_PERFCTR_SP_6_HI 0x00000131 - -#define REG_A4XX_RBBM_PERFCTR_SP_7_LO 0x00000132 - -#define REG_A4XX_RBBM_PERFCTR_SP_7_HI 0x00000133 - -#define REG_A4XX_RBBM_PERFCTR_SP_8_LO 0x00000134 - -#define REG_A4XX_RBBM_PERFCTR_SP_8_HI 0x00000135 - -#define REG_A4XX_RBBM_PERFCTR_SP_9_LO 0x00000136 - -#define REG_A4XX_RBBM_PERFCTR_SP_9_HI 0x00000137 - -#define REG_A4XX_RBBM_PERFCTR_SP_10_LO 0x00000138 - -#define REG_A4XX_RBBM_PERFCTR_SP_10_HI 0x00000139 - -#define REG_A4XX_RBBM_PERFCTR_SP_11_LO 0x0000013a - -#define REG_A4XX_RBBM_PERFCTR_SP_11_HI 0x0000013b - -#define REG_A4XX_RBBM_PERFCTR_RB_0_LO 0x0000013c - -#define REG_A4XX_RBBM_PERFCTR_RB_0_HI 0x0000013d - -#define REG_A4XX_RBBM_PERFCTR_RB_1_LO 0x0000013e - -#define REG_A4XX_RBBM_PERFCTR_RB_1_HI 0x0000013f - -#define REG_A4XX_RBBM_PERFCTR_RB_2_LO 0x00000140 - -#define REG_A4XX_RBBM_PERFCTR_RB_2_HI 0x00000141 - -#define REG_A4XX_RBBM_PERFCTR_RB_3_LO 0x00000142 - -#define REG_A4XX_RBBM_PERFCTR_RB_3_HI 0x00000143 - -#define REG_A4XX_RBBM_PERFCTR_RB_4_LO 0x00000144 - -#define REG_A4XX_RBBM_PERFCTR_RB_4_HI 0x00000145 - -#define REG_A4XX_RBBM_PERFCTR_RB_5_LO 0x00000146 - -#define REG_A4XX_RBBM_PERFCTR_RB_5_HI 0x00000147 - -#define REG_A4XX_RBBM_PERFCTR_RB_6_LO 0x00000148 - -#define REG_A4XX_RBBM_PERFCTR_RB_6_HI 0x00000149 - -#define REG_A4XX_RBBM_PERFCTR_RB_7_LO 0x0000014a - -#define REG_A4XX_RBBM_PERFCTR_RB_7_HI 0x0000014b - -#define REG_A4XX_RBBM_PERFCTR_VSC_0_LO 0x0000014c - -#define REG_A4XX_RBBM_PERFCTR_VSC_0_HI 0x0000014d - -#define REG_A4XX_RBBM_PERFCTR_VSC_1_LO 0x0000014e - -#define REG_A4XX_RBBM_PERFCTR_VSC_1_HI 0x0000014f - -#define REG_A4XX_RBBM_PERFCTR_PWR_0_LO 0x00000166 - -#define REG_A4XX_RBBM_PERFCTR_PWR_0_HI 0x00000167 - -#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168 - -#define REG_A4XX_RBBM_PERFCTR_PWR_1_HI 0x00000169 - -#define REG_A4XX_RBBM_ALWAYSON_COUNTER_LO 0x0000016e - -#define REG_A4XX_RBBM_ALWAYSON_COUNTER_HI 0x0000016f - -#define REG_A4XX_RBBM_CLOCK_CTL_SP(i0) (0x00000068 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; } - -#define REG_A4XX_RBBM_CLOCK_CTL2_SP(i0) (0x0000006c + 0x1*(i0)) - -static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP_REG(uint32_t i0) { return 0x0000006c + 0x1*i0; } - -#define REG_A4XX_RBBM_CLOCK_HYST_SP(i0) (0x00000070 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP_REG(uint32_t i0) { return 0x00000070 + 0x1*i0; } - -#define REG_A4XX_RBBM_CLOCK_DELAY_SP(i0) (0x00000074 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP_REG(uint32_t i0) { return 0x00000074 + 0x1*i0; } - -#define REG_A4XX_RBBM_CLOCK_CTL_RB(i0) (0x00000078 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB_REG(uint32_t i0) { return 0x00000078 + 0x1*i0; } - -#define REG_A4XX_RBBM_CLOCK_CTL2_RB(i0) (0x0000007c + 0x1*(i0)) - -static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB_REG(uint32_t i0) { return 0x0000007c + 0x1*i0; } - -#define REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i0) (0x00000082 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU_REG(uint32_t i0) { return 0x00000082 + 0x1*i0; } - -#define REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i0) (0x00000086 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU_REG(uint32_t i0) { return 0x00000086 + 0x1*i0; } - -#define REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM 0x00000080 - -#define REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM 0x00000081 - -#define REG_A4XX_RBBM_CLOCK_CTL_HLSQ 0x0000008a - -#define REG_A4XX_RBBM_CLOCK_HYST_HLSQ 0x0000008b - -#define REG_A4XX_RBBM_CLOCK_DELAY_HLSQ 0x0000008c - -#define REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM 0x0000008d - -#define REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i0) (0x0000008e + 0x1*(i0)) - -static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; } - -#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0 0x00000099 - -#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1 0x0000009a - -#define REG_A4XX_RBBM_PERFCTR_CTL 0x00000170 - -#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD0 0x00000171 - -#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD1 0x00000172 - -#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD2 0x00000173 - -#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000174 - -#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000175 - -#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_0 0x00000176 - -#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_1 0x00000177 - -#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_2 0x00000178 - -#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_3 0x00000179 - -#define REG_A4XX_RBBM_GPU_BUSY_MASKED 0x0000017a - -#define REG_A4XX_RBBM_INT_0_STATUS 0x0000017d - -#define REG_A4XX_RBBM_CLOCK_STATUS 0x00000182 - -#define REG_A4XX_RBBM_AHB_STATUS 0x00000189 - -#define REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS 0x0000018c - -#define REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS 0x0000018d - -#define REG_A4XX_RBBM_AHB_ERROR_STATUS 0x0000018f - -#define REG_A4XX_RBBM_STATUS 0x00000191 -#define A4XX_RBBM_STATUS_HI_BUSY 0x00000001 -#define A4XX_RBBM_STATUS_CP_ME_BUSY 0x00000002 -#define A4XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004 -#define A4XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000 -#define A4XX_RBBM_STATUS_VBIF_BUSY 0x00008000 -#define A4XX_RBBM_STATUS_TSE_BUSY 0x00010000 -#define A4XX_RBBM_STATUS_RAS_BUSY 0x00020000 -#define A4XX_RBBM_STATUS_RB_BUSY 0x00040000 -#define A4XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000 -#define A4XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000 -#define A4XX_RBBM_STATUS_VFD_BUSY 0x00200000 -#define A4XX_RBBM_STATUS_VPC_BUSY 0x00400000 -#define A4XX_RBBM_STATUS_UCHE_BUSY 0x00800000 -#define A4XX_RBBM_STATUS_SP_BUSY 0x01000000 -#define A4XX_RBBM_STATUS_TPL1_BUSY 0x02000000 -#define A4XX_RBBM_STATUS_MARB_BUSY 0x04000000 -#define A4XX_RBBM_STATUS_VSC_BUSY 0x08000000 -#define A4XX_RBBM_STATUS_ARB_BUSY 0x10000000 -#define A4XX_RBBM_STATUS_HLSQ_BUSY 0x20000000 -#define A4XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000 -#define A4XX_RBBM_STATUS_GPU_BUSY 0x80000000 - -#define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5 0x0000019f - -#define REG_A4XX_RBBM_POWER_STATUS 0x000001b0 -#define A4XX_RBBM_POWER_STATUS_SP_TP_PWR_ON 0x00100000 - -#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2 0x000001b8 - -#define REG_A4XX_CP_SCRATCH_UMASK 0x00000228 - -#define REG_A4XX_CP_SCRATCH_ADDR 0x00000229 - -#define REG_A4XX_CP_RB_BASE 0x00000200 - -#define REG_A4XX_CP_RB_CNTL 0x00000201 - -#define REG_A4XX_CP_RB_WPTR 0x00000205 - -#define REG_A4XX_CP_RB_RPTR_ADDR 0x00000203 - -#define REG_A4XX_CP_RB_RPTR 0x00000204 - -#define REG_A4XX_CP_IB1_BASE 0x00000206 - -#define REG_A4XX_CP_IB1_BUFSZ 0x00000207 - -#define REG_A4XX_CP_IB2_BASE 0x00000208 - -#define REG_A4XX_CP_IB2_BUFSZ 0x00000209 - -#define REG_A4XX_CP_ME_NRT_ADDR 0x0000020c - -#define REG_A4XX_CP_ME_NRT_DATA 0x0000020d - -#define REG_A4XX_CP_ME_RB_DONE_DATA 0x00000217 - -#define REG_A4XX_CP_QUEUE_THRESH2 0x00000219 - -#define REG_A4XX_CP_MERCIU_SIZE 0x0000021b - -#define REG_A4XX_CP_ROQ_ADDR 0x0000021c - -#define REG_A4XX_CP_ROQ_DATA 0x0000021d - -#define REG_A4XX_CP_MEQ_ADDR 0x0000021e - -#define REG_A4XX_CP_MEQ_DATA 0x0000021f - -#define REG_A4XX_CP_MERCIU_ADDR 0x00000220 - -#define REG_A4XX_CP_MERCIU_DATA 0x00000221 - -#define REG_A4XX_CP_MERCIU_DATA2 0x00000222 - -#define REG_A4XX_CP_PFP_UCODE_ADDR 0x00000223 - -#define REG_A4XX_CP_PFP_UCODE_DATA 0x00000224 - -#define REG_A4XX_CP_ME_RAM_WADDR 0x00000225 - -#define REG_A4XX_CP_ME_RAM_RADDR 0x00000226 - -#define REG_A4XX_CP_ME_RAM_DATA 0x00000227 - -#define REG_A4XX_CP_PREEMPT 0x0000022a - -#define REG_A4XX_CP_CNTL 0x0000022c - -#define REG_A4XX_CP_ME_CNTL 0x0000022d - -#define REG_A4XX_CP_DEBUG 0x0000022e - -#define REG_A4XX_CP_DEBUG_ECO_CONTROL 0x00000231 - -#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232 - -#define REG_A4XX_CP_PROTECT(i0) (0x00000240 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; } -#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff -#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0 -static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val) -{ - return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK; -} -#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000 -#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24 -static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val) -{ - return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK; -} -#define A4XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000 -#define A4XX_CP_PROTECT_REG_TRAP_READ 0x40000000 - -#define REG_A4XX_CP_PROTECT_CTRL 0x00000250 - -#define REG_A4XX_CP_ST_BASE 0x000004c0 - -#define REG_A4XX_CP_STQ_AVAIL 0x000004ce - -#define REG_A4XX_CP_MERCIU_STAT 0x000004d0 - -#define REG_A4XX_CP_WFI_PEND_CTR 0x000004d2 - -#define REG_A4XX_CP_HW_FAULT 0x000004d8 - -#define REG_A4XX_CP_PROTECT_STATUS 0x000004da - -#define REG_A4XX_CP_EVENTS_IN_FLIGHT 0x000004dd - -#define REG_A4XX_CP_PERFCTR_CP_SEL_0 0x00000500 - -#define REG_A4XX_CP_PERFCTR_CP_SEL_1 0x00000501 - -#define REG_A4XX_CP_PERFCTR_CP_SEL_2 0x00000502 - -#define REG_A4XX_CP_PERFCTR_CP_SEL_3 0x00000503 - -#define REG_A4XX_CP_PERFCTR_CP_SEL_4 0x00000504 - -#define REG_A4XX_CP_PERFCTR_CP_SEL_5 0x00000505 - -#define REG_A4XX_CP_PERFCTR_CP_SEL_6 0x00000506 - -#define REG_A4XX_CP_PERFCTR_CP_SEL_7 0x00000507 - -#define REG_A4XX_CP_PERFCOMBINER_SELECT 0x0000050b - -#define REG_A4XX_CP_SCRATCH(i0) (0x00000578 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578 + 0x1*i0; } - -#define REG_A4XX_SP_VS_STATUS 0x00000ec0 - -#define REG_A4XX_SP_MODE_CONTROL 0x00000ec3 - -#define REG_A4XX_SP_PERFCTR_SP_SEL_0 0x00000ec4 - -#define REG_A4XX_SP_PERFCTR_SP_SEL_1 0x00000ec5 - -#define REG_A4XX_SP_PERFCTR_SP_SEL_2 0x00000ec6 - -#define REG_A4XX_SP_PERFCTR_SP_SEL_3 0x00000ec7 - -#define REG_A4XX_SP_PERFCTR_SP_SEL_4 0x00000ec8 - -#define REG_A4XX_SP_PERFCTR_SP_SEL_5 0x00000ec9 - -#define REG_A4XX_SP_PERFCTR_SP_SEL_6 0x00000eca - -#define REG_A4XX_SP_PERFCTR_SP_SEL_7 0x00000ecb - -#define REG_A4XX_SP_PERFCTR_SP_SEL_8 0x00000ecc - -#define REG_A4XX_SP_PERFCTR_SP_SEL_9 0x00000ecd - -#define REG_A4XX_SP_PERFCTR_SP_SEL_10 0x00000ece - -#define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf - -#define REG_A4XX_SP_SP_CTRL_REG 0x000022c0 -#define A4XX_SP_SP_CTRL_REG_BINNING_PASS 0x00080000 - -#define REG_A4XX_SP_INSTR_CACHE_CTRL 0x000022c1 -#define A4XX_SP_INSTR_CACHE_CTRL_VS_BUFFER 0x00000080 -#define A4XX_SP_INSTR_CACHE_CTRL_FS_BUFFER 0x00000100 -#define A4XX_SP_INSTR_CACHE_CTRL_INSTR_BUFFER 0x00000400 - -#define REG_A4XX_SP_VS_CTRL_REG0 0x000022c4 -#define A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001 -#define A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0 -static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) -{ - return ((val) << A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK; -} -#define A4XX_SP_VS_CTRL_REG0_VARYING 0x00000002 -#define A4XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004 -#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 -#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 -static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 -#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 -static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000 -#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18 -static inline uint32_t A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val) -{ - return ((val) << A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK; -} -#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000 -#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20 -static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK; -} -#define A4XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000 -#define A4XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000 - -#define REG_A4XX_SP_VS_CTRL_REG1 0x000022c5 -#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff -#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0 -static inline uint32_t A4XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val) -{ - return ((val) << A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK; -} -#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x7f000000 -#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24 -static inline uint32_t A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val) -{ - return ((val) << A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK; -} - -#define REG_A4XX_SP_VS_PARAM_REG 0x000022c6 -#define A4XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff -#define A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0 -static inline uint32_t A4XX_SP_VS_PARAM_REG_POSREGID(uint32_t val) -{ - return ((val) << A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_POSREGID__MASK; -} -#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00 -#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8 -static inline uint32_t A4XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val) -{ - return ((val) << A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK; -} -#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000 -#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20 -static inline uint32_t A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val) -{ - return ((val) << A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK; -} - -#define REG_A4XX_SP_VS_OUT(i0) (0x000022c7 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; } -#define A4XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff -#define A4XX_SP_VS_OUT_REG_A_REGID__SHIFT 0 -static inline uint32_t A4XX_SP_VS_OUT_REG_A_REGID(uint32_t val) -{ - return ((val) << A4XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_A_REGID__MASK; -} -#define A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00 -#define A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9 -static inline uint32_t A4XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val) -{ - return ((val) << A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK; -} -#define A4XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000 -#define A4XX_SP_VS_OUT_REG_B_REGID__SHIFT 16 -static inline uint32_t A4XX_SP_VS_OUT_REG_B_REGID(uint32_t val) -{ - return ((val) << A4XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_B_REGID__MASK; -} -#define A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000 -#define A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25 -static inline uint32_t A4XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val) -{ - return ((val) << A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK; -} - -#define REG_A4XX_SP_VS_VPC_DST(i0) (0x000022d8 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d8 + 0x1*i0; } -#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff -#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0 -static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val) -{ - return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK; -} -#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 -#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8 -static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val) -{ - return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK; -} -#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 -#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16 -static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val) -{ - return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK; -} -#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 -#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24 -static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val) -{ - return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK; -} - -#define REG_A4XX_SP_VS_OBJ_OFFSET_REG 0x000022e0 -#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 -#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 -static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; -} -#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 -#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 -static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; -} - -#define REG_A4XX_SP_VS_OBJ_START 0x000022e1 - -#define REG_A4XX_SP_VS_PVT_MEM_PARAM 0x000022e2 - -#define REG_A4XX_SP_VS_PVT_MEM_ADDR 0x000022e3 - -#define REG_A4XX_SP_VS_LENGTH_REG 0x000022e5 - -#define REG_A4XX_SP_FS_CTRL_REG0 0x000022e8 -#define A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001 -#define A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0 -static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) -{ - return ((val) << A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK; -} -#define A4XX_SP_FS_CTRL_REG0_VARYING 0x00000002 -#define A4XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004 -#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 -#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 -static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 -#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 -static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000 -#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18 -static inline uint32_t A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val) -{ - return ((val) << A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK; -} -#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000 -#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20 -static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK; -} -#define A4XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000 -#define A4XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000 - -#define REG_A4XX_SP_FS_CTRL_REG1 0x000022e9 -#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff -#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0 -static inline uint32_t A4XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val) -{ - return ((val) << A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK; -} -#define A4XX_SP_FS_CTRL_REG1_FACENESS 0x00080000 -#define A4XX_SP_FS_CTRL_REG1_VARYING 0x00100000 -#define A4XX_SP_FS_CTRL_REG1_FRAGCOORD 0x00200000 - -#define REG_A4XX_SP_FS_OBJ_OFFSET_REG 0x000022ea -#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 -#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 -static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; -} -#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 -#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 -static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; -} - -#define REG_A4XX_SP_FS_OBJ_START 0x000022eb - -#define REG_A4XX_SP_FS_PVT_MEM_PARAM 0x000022ec - -#define REG_A4XX_SP_FS_PVT_MEM_ADDR 0x000022ed - -#define REG_A4XX_SP_FS_LENGTH_REG 0x000022ef - -#define REG_A4XX_SP_FS_OUTPUT_REG 0x000022f0 -#define A4XX_SP_FS_OUTPUT_REG_MRT__MASK 0x0000000f -#define A4XX_SP_FS_OUTPUT_REG_MRT__SHIFT 0 -static inline uint32_t A4XX_SP_FS_OUTPUT_REG_MRT(uint32_t val) -{ - return ((val) << A4XX_SP_FS_OUTPUT_REG_MRT__SHIFT) & A4XX_SP_FS_OUTPUT_REG_MRT__MASK; -} -#define A4XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080 -#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00 -#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8 -static inline uint32_t A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val) -{ - return ((val) << A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK; -} -#define A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__MASK 0xff000000 -#define A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__SHIFT 24 -static inline uint32_t A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID(uint32_t val) -{ - return ((val) << A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__MASK; -} - -#define REG_A4XX_SP_FS_MRT(i0) (0x000022f1 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f1 + 0x1*i0; } -#define A4XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff -#define A4XX_SP_FS_MRT_REG_REGID__SHIFT 0 -static inline uint32_t A4XX_SP_FS_MRT_REG_REGID(uint32_t val) -{ - return ((val) << A4XX_SP_FS_MRT_REG_REGID__SHIFT) & A4XX_SP_FS_MRT_REG_REGID__MASK; -} -#define A4XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100 -#define A4XX_SP_FS_MRT_REG_COLOR_SINT 0x00000400 -#define A4XX_SP_FS_MRT_REG_COLOR_UINT 0x00000800 -#define A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK 0x0003f000 -#define A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT 12 -static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val) -{ - return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK; -} -#define A4XX_SP_FS_MRT_REG_COLOR_SRGB 0x00040000 - -#define REG_A4XX_SP_CS_CTRL_REG0 0x00002300 -#define A4XX_SP_CS_CTRL_REG0_THREADMODE__MASK 0x00000001 -#define A4XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT 0 -static inline uint32_t A4XX_SP_CS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) -{ - return ((val) << A4XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_CS_CTRL_REG0_THREADMODE__MASK; -} -#define A4XX_SP_CS_CTRL_REG0_VARYING 0x00000002 -#define A4XX_SP_CS_CTRL_REG0_CACHEINVALID 0x00000004 -#define A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 -#define A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 -static inline uint32_t A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 -#define A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 -static inline uint32_t A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000 -#define A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18 -static inline uint32_t A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val) -{ - return ((val) << A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__MASK; -} -#define A4XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000 -#define A4XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20 -static inline uint32_t A4XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A4XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_CS_CTRL_REG0_THREADSIZE__MASK; -} -#define A4XX_SP_CS_CTRL_REG0_SUPERTHREADMODE 0x00200000 -#define A4XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x00400000 - -#define REG_A4XX_SP_CS_OBJ_OFFSET_REG 0x00002301 - -#define REG_A4XX_SP_CS_OBJ_START 0x00002302 - -#define REG_A4XX_SP_CS_PVT_MEM_PARAM 0x00002303 - -#define REG_A4XX_SP_CS_PVT_MEM_ADDR 0x00002304 - -#define REG_A4XX_SP_CS_PVT_MEM_SIZE 0x00002305 - -#define REG_A4XX_SP_CS_LENGTH_REG 0x00002306 - -#define REG_A4XX_SP_HS_OBJ_OFFSET_REG 0x0000230d -#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 -#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 -static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; -} -#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 -#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 -static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; -} - -#define REG_A4XX_SP_HS_OBJ_START 0x0000230e - -#define REG_A4XX_SP_HS_PVT_MEM_PARAM 0x0000230f - -#define REG_A4XX_SP_HS_PVT_MEM_ADDR 0x00002310 - -#define REG_A4XX_SP_HS_LENGTH_REG 0x00002312 - -#define REG_A4XX_SP_DS_PARAM_REG 0x0000231a -#define A4XX_SP_DS_PARAM_REG_POSREGID__MASK 0x000000ff -#define A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT 0 -static inline uint32_t A4XX_SP_DS_PARAM_REG_POSREGID(uint32_t val) -{ - return ((val) << A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_DS_PARAM_REG_POSREGID__MASK; -} -#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000 -#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20 -static inline uint32_t A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR(uint32_t val) -{ - return ((val) << A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK; -} - -#define REG_A4XX_SP_DS_OUT(i0) (0x0000231b + 0x1*(i0)) - -static inline uint32_t REG_A4XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000231b + 0x1*i0; } -#define A4XX_SP_DS_OUT_REG_A_REGID__MASK 0x000001ff -#define A4XX_SP_DS_OUT_REG_A_REGID__SHIFT 0 -static inline uint32_t A4XX_SP_DS_OUT_REG_A_REGID(uint32_t val) -{ - return ((val) << A4XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_A_REGID__MASK; -} -#define A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00001e00 -#define A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 9 -static inline uint32_t A4XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val) -{ - return ((val) << A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK; -} -#define A4XX_SP_DS_OUT_REG_B_REGID__MASK 0x01ff0000 -#define A4XX_SP_DS_OUT_REG_B_REGID__SHIFT 16 -static inline uint32_t A4XX_SP_DS_OUT_REG_B_REGID(uint32_t val) -{ - return ((val) << A4XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_B_REGID__MASK; -} -#define A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x1e000000 -#define A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 25 -static inline uint32_t A4XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val) -{ - return ((val) << A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK; -} - -#define REG_A4XX_SP_DS_VPC_DST(i0) (0x0000232c + 0x1*(i0)) - -static inline uint32_t REG_A4XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000232c + 0x1*i0; } -#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff -#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0 -static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val) -{ - return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK; -} -#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 -#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8 -static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val) -{ - return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK; -} -#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 -#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16 -static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val) -{ - return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK; -} -#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 -#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24 -static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val) -{ - return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK; -} - -#define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334 -#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 -#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 -static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; -} -#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 -#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 -static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; -} - -#define REG_A4XX_SP_DS_OBJ_START 0x00002335 - -#define REG_A4XX_SP_DS_PVT_MEM_PARAM 0x00002336 - -#define REG_A4XX_SP_DS_PVT_MEM_ADDR 0x00002337 - -#define REG_A4XX_SP_DS_LENGTH_REG 0x00002339 - -#define REG_A4XX_SP_GS_PARAM_REG 0x00002341 -#define A4XX_SP_GS_PARAM_REG_POSREGID__MASK 0x000000ff -#define A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT 0 -static inline uint32_t A4XX_SP_GS_PARAM_REG_POSREGID(uint32_t val) -{ - return ((val) << A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_POSREGID__MASK; -} -#define A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK 0x0000ff00 -#define A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT 8 -static inline uint32_t A4XX_SP_GS_PARAM_REG_PRIMREGID(uint32_t val) -{ - return ((val) << A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK; -} -#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000 -#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20 -static inline uint32_t A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR(uint32_t val) -{ - return ((val) << A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK; -} - -#define REG_A4XX_SP_GS_OUT(i0) (0x00002342 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_SP_GS_OUT_REG(uint32_t i0) { return 0x00002342 + 0x1*i0; } -#define A4XX_SP_GS_OUT_REG_A_REGID__MASK 0x000001ff -#define A4XX_SP_GS_OUT_REG_A_REGID__SHIFT 0 -static inline uint32_t A4XX_SP_GS_OUT_REG_A_REGID(uint32_t val) -{ - return ((val) << A4XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_A_REGID__MASK; -} -#define A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00001e00 -#define A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 9 -static inline uint32_t A4XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val) -{ - return ((val) << A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK; -} -#define A4XX_SP_GS_OUT_REG_B_REGID__MASK 0x01ff0000 -#define A4XX_SP_GS_OUT_REG_B_REGID__SHIFT 16 -static inline uint32_t A4XX_SP_GS_OUT_REG_B_REGID(uint32_t val) -{ - return ((val) << A4XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_B_REGID__MASK; -} -#define A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x1e000000 -#define A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 25 -static inline uint32_t A4XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val) -{ - return ((val) << A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK; -} - -#define REG_A4XX_SP_GS_VPC_DST(i0) (0x00002353 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x00002353 + 0x1*i0; } -#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff -#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0 -static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val) -{ - return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK; -} -#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 -#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8 -static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val) -{ - return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK; -} -#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 -#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16 -static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val) -{ - return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK; -} -#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 -#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24 -static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val) -{ - return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK; -} - -#define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b -#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 -#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 -static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; -} -#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 -#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 -static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; -} - -#define REG_A4XX_SP_GS_OBJ_START 0x0000235c - -#define REG_A4XX_SP_GS_PVT_MEM_PARAM 0x0000235d - -#define REG_A4XX_SP_GS_PVT_MEM_ADDR 0x0000235e - -#define REG_A4XX_SP_GS_LENGTH_REG 0x00002360 - -#define REG_A4XX_VPC_DEBUG_RAM_SEL 0x00000e60 - -#define REG_A4XX_VPC_DEBUG_RAM_READ 0x00000e61 - -#define REG_A4XX_VPC_DEBUG_ECO_CONTROL 0x00000e64 - -#define REG_A4XX_VPC_PERFCTR_VPC_SEL_0 0x00000e65 - -#define REG_A4XX_VPC_PERFCTR_VPC_SEL_1 0x00000e66 - -#define REG_A4XX_VPC_PERFCTR_VPC_SEL_2 0x00000e67 - -#define REG_A4XX_VPC_PERFCTR_VPC_SEL_3 0x00000e68 - -#define REG_A4XX_VPC_ATTR 0x00002140 -#define A4XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff -#define A4XX_VPC_ATTR_TOTALATTR__SHIFT 0 -static inline uint32_t A4XX_VPC_ATTR_TOTALATTR(uint32_t val) -{ - return ((val) << A4XX_VPC_ATTR_TOTALATTR__SHIFT) & A4XX_VPC_ATTR_TOTALATTR__MASK; -} -#define A4XX_VPC_ATTR_PSIZE 0x00000200 -#define A4XX_VPC_ATTR_THRDASSIGN__MASK 0x00003000 -#define A4XX_VPC_ATTR_THRDASSIGN__SHIFT 12 -static inline uint32_t A4XX_VPC_ATTR_THRDASSIGN(uint32_t val) -{ - return ((val) << A4XX_VPC_ATTR_THRDASSIGN__SHIFT) & A4XX_VPC_ATTR_THRDASSIGN__MASK; -} -#define A4XX_VPC_ATTR_ENABLE 0x02000000 - -#define REG_A4XX_VPC_PACK 0x00002141 -#define A4XX_VPC_PACK_NUMBYPASSVAR__MASK 0x000000ff -#define A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT 0 -static inline uint32_t A4XX_VPC_PACK_NUMBYPASSVAR(uint32_t val) -{ - return ((val) << A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT) & A4XX_VPC_PACK_NUMBYPASSVAR__MASK; -} -#define A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00 -#define A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8 -static inline uint32_t A4XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val) -{ - return ((val) << A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK; -} -#define A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000 -#define A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16 -static inline uint32_t A4XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val) -{ - return ((val) << A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK; -} - -#define REG_A4XX_VPC_VARYING_INTERP(i0) (0x00002142 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002142 + 0x1*i0; } - -#define REG_A4XX_VPC_VARYING_PS_REPL(i0) (0x0000214a + 0x1*(i0)) - -static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000214a + 0x1*i0; } - -#define REG_A4XX_VPC_SO_FLUSH_WADDR_3 0x0000216e - -#define REG_A4XX_VSC_BIN_SIZE 0x00000c00 -#define A4XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f -#define A4XX_VSC_BIN_SIZE_WIDTH__SHIFT 0 -static inline uint32_t A4XX_VSC_BIN_SIZE_WIDTH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A4XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A4XX_VSC_BIN_SIZE_WIDTH__MASK; -} -#define A4XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0 -#define A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5 -static inline uint32_t A4XX_VSC_BIN_SIZE_HEIGHT(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A4XX_VSC_BIN_SIZE_HEIGHT__MASK; -} - -#define REG_A4XX_VSC_SIZE_ADDRESS 0x00000c01 - -#define REG_A4XX_VSC_SIZE_ADDRESS2 0x00000c02 - -#define REG_A4XX_VSC_DEBUG_ECO_CONTROL 0x00000c03 - -#define REG_A4XX_VSC_PIPE_CONFIG(i0) (0x00000c08 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c08 + 0x1*i0; } -#define A4XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff -#define A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0 -static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_X(uint32_t val) -{ - return ((val) << A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_X__MASK; -} -#define A4XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00 -#define A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10 -static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val) -{ - return ((val) << A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_Y__MASK; -} -#define A4XX_VSC_PIPE_CONFIG_REG_W__MASK 0x00f00000 -#define A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20 -static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_W(uint32_t val) -{ - return ((val) << A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_W__MASK; -} -#define A4XX_VSC_PIPE_CONFIG_REG_H__MASK 0x0f000000 -#define A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT 24 -static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_H(uint32_t val) -{ - return ((val) << A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_H__MASK; -} - -#define REG_A4XX_VSC_PIPE_DATA_ADDRESS(i0) (0x00000c10 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; } - -#define REG_A4XX_VSC_PIPE_DATA_LENGTH(i0) (0x00000c18 + 0x1*(i0)) - -static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c18 + 0x1*i0; } - -#define REG_A4XX_VSC_PIPE_PARTIAL_POSN_1 0x00000c41 - -#define REG_A4XX_VSC_PERFCTR_VSC_SEL_0 0x00000c50 - -#define REG_A4XX_VSC_PERFCTR_VSC_SEL_1 0x00000c51 - -#define REG_A4XX_VFD_DEBUG_CONTROL 0x00000e40 - -#define REG_A4XX_VFD_PERFCTR_VFD_SEL_0 0x00000e43 - -#define REG_A4XX_VFD_PERFCTR_VFD_SEL_1 0x00000e44 - -#define REG_A4XX_VFD_PERFCTR_VFD_SEL_2 0x00000e45 - -#define REG_A4XX_VFD_PERFCTR_VFD_SEL_3 0x00000e46 - -#define REG_A4XX_VFD_PERFCTR_VFD_SEL_4 0x00000e47 - -#define REG_A4XX_VFD_PERFCTR_VFD_SEL_5 0x00000e48 - -#define REG_A4XX_VFD_PERFCTR_VFD_SEL_6 0x00000e49 - -#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a - -#define REG_A4XX_VGT_CL_INITIATOR 0x000021d0 - -#define REG_A4XX_VGT_EVENT_INITIATOR 0x000021d9 - -#define REG_A4XX_VFD_CONTROL_0 0x00002200 -#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x000000ff -#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0 -static inline uint32_t A4XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val) -{ - return ((val) << A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK; -} -#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK 0x0001fe00 -#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT 9 -static inline uint32_t A4XX_VFD_CONTROL_0_BYPASSATTROVS(uint32_t val) -{ - return ((val) << A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT) & A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK; -} -#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x03f00000 -#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 20 -static inline uint32_t A4XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val) -{ - return ((val) << A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK; -} -#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xfc000000 -#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 26 -static inline uint32_t A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val) -{ - return ((val) << A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK; -} - -#define REG_A4XX_VFD_CONTROL_1 0x00002201 -#define A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff -#define A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0 -static inline uint32_t A4XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val) -{ - return ((val) << A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK; -} -#define A4XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000 -#define A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16 -static inline uint32_t A4XX_VFD_CONTROL_1_REGID4VTX(uint32_t val) -{ - return ((val) << A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A4XX_VFD_CONTROL_1_REGID4VTX__MASK; -} -#define A4XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000 -#define A4XX_VFD_CONTROL_1_REGID4INST__SHIFT 24 -static inline uint32_t A4XX_VFD_CONTROL_1_REGID4INST(uint32_t val) -{ - return ((val) << A4XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A4XX_VFD_CONTROL_1_REGID4INST__MASK; -} - -#define REG_A4XX_VFD_CONTROL_2 0x00002202 - -#define REG_A4XX_VFD_CONTROL_3 0x00002203 -#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK 0x0000ff00 -#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT 8 -static inline uint32_t A4XX_VFD_CONTROL_3_REGID_VTXCNT(uint32_t val) -{ - return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK; -} -#define A4XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000 -#define A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16 -static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val) -{ - return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSX__MASK; -} -#define A4XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000 -#define A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24 -static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val) -{ - return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSY__MASK; -} - -#define REG_A4XX_VFD_CONTROL_4 0x00002204 - -#define REG_A4XX_VFD_INDEX_OFFSET 0x00002208 - -#define REG_A4XX_VFD_FETCH(i0) (0x0000220a + 0x4*(i0)) - -static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x0000220a + 0x4*i0; } -#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f -#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0 -static inline uint32_t A4XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val) -{ - return ((val) << A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK; -} -#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80 -#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7 -static inline uint32_t A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val) -{ - return ((val) << A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK; -} -#define A4XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00080000 -#define A4XX_VFD_FETCH_INSTR_0_INSTANCED 0x00100000 - -static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x0000220b + 0x4*i0; } - -static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_2(uint32_t i0) { return 0x0000220c + 0x4*i0; } -#define A4XX_VFD_FETCH_INSTR_2_SIZE__MASK 0xffffffff -#define A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT 0 -static inline uint32_t A4XX_VFD_FETCH_INSTR_2_SIZE(uint32_t val) -{ - return ((val) << A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_2_SIZE__MASK; -} - -static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_3(uint32_t i0) { return 0x0000220d + 0x4*i0; } -#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK 0x000001ff -#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT 0 -static inline uint32_t A4XX_VFD_FETCH_INSTR_3_STEPRATE(uint32_t val) -{ - return ((val) << A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK; -} - -#define REG_A4XX_VFD_DECODE(i0) (0x0000228a + 0x1*(i0)) - -static inline uint32_t REG_A4XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000228a + 0x1*i0; } -#define A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f -#define A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0 -static inline uint32_t A4XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val) -{ - return ((val) << A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK; -} -#define A4XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010 -#define A4XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0 -#define A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6 -static inline uint32_t A4XX_VFD_DECODE_INSTR_FORMAT(enum a4xx_vtx_fmt val) -{ - return ((val) << A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A4XX_VFD_DECODE_INSTR_FORMAT__MASK; -} -#define A4XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000 -#define A4XX_VFD_DECODE_INSTR_REGID__SHIFT 12 -static inline uint32_t A4XX_VFD_DECODE_INSTR_REGID(uint32_t val) -{ - return ((val) << A4XX_VFD_DECODE_INSTR_REGID__SHIFT) & A4XX_VFD_DECODE_INSTR_REGID__MASK; -} -#define A4XX_VFD_DECODE_INSTR_INT 0x00100000 -#define A4XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000 -#define A4XX_VFD_DECODE_INSTR_SWAP__SHIFT 22 -static inline uint32_t A4XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A4XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A4XX_VFD_DECODE_INSTR_SWAP__MASK; -} -#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000 -#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24 -static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val) -{ - return ((val) << A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK; -} -#define A4XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000 -#define A4XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000 - -#define REG_A4XX_TPL1_DEBUG_ECO_CONTROL 0x00000f00 - -#define REG_A4XX_TPL1_TP_MODE_CONTROL 0x00000f03 - -#define REG_A4XX_TPL1_PERFCTR_TP_SEL_0 0x00000f04 - -#define REG_A4XX_TPL1_PERFCTR_TP_SEL_1 0x00000f05 - -#define REG_A4XX_TPL1_PERFCTR_TP_SEL_2 0x00000f06 - -#define REG_A4XX_TPL1_PERFCTR_TP_SEL_3 0x00000f07 - -#define REG_A4XX_TPL1_PERFCTR_TP_SEL_4 0x00000f08 - -#define REG_A4XX_TPL1_PERFCTR_TP_SEL_5 0x00000f09 - -#define REG_A4XX_TPL1_PERFCTR_TP_SEL_6 0x00000f0a - -#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b - -#define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380 - -#define REG_A4XX_TPL1_TP_TEX_COUNT 0x00002381 -#define A4XX_TPL1_TP_TEX_COUNT_VS__MASK 0x000000ff -#define A4XX_TPL1_TP_TEX_COUNT_VS__SHIFT 0 -static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_VS(uint32_t val) -{ - return ((val) << A4XX_TPL1_TP_TEX_COUNT_VS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_VS__MASK; -} -#define A4XX_TPL1_TP_TEX_COUNT_HS__MASK 0x0000ff00 -#define A4XX_TPL1_TP_TEX_COUNT_HS__SHIFT 8 -static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_HS(uint32_t val) -{ - return ((val) << A4XX_TPL1_TP_TEX_COUNT_HS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_HS__MASK; -} -#define A4XX_TPL1_TP_TEX_COUNT_DS__MASK 0x00ff0000 -#define A4XX_TPL1_TP_TEX_COUNT_DS__SHIFT 16 -static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_DS(uint32_t val) -{ - return ((val) << A4XX_TPL1_TP_TEX_COUNT_DS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_DS__MASK; -} -#define A4XX_TPL1_TP_TEX_COUNT_GS__MASK 0xff000000 -#define A4XX_TPL1_TP_TEX_COUNT_GS__SHIFT 24 -static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val) -{ - return ((val) << A4XX_TPL1_TP_TEX_COUNT_GS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_GS__MASK; -} - -#define REG_A4XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002384 - -#define REG_A4XX_TPL1_TP_HS_BORDER_COLOR_BASE_ADDR 0x00002387 - -#define REG_A4XX_TPL1_TP_DS_BORDER_COLOR_BASE_ADDR 0x0000238a - -#define REG_A4XX_TPL1_TP_GS_BORDER_COLOR_BASE_ADDR 0x0000238d - -#define REG_A4XX_TPL1_TP_FS_TEX_COUNT 0x000023a0 -#define A4XX_TPL1_TP_FS_TEX_COUNT_FS__MASK 0x000000ff -#define A4XX_TPL1_TP_FS_TEX_COUNT_FS__SHIFT 0 -static inline uint32_t A4XX_TPL1_TP_FS_TEX_COUNT_FS(uint32_t val) -{ - return ((val) << A4XX_TPL1_TP_FS_TEX_COUNT_FS__SHIFT) & A4XX_TPL1_TP_FS_TEX_COUNT_FS__MASK; -} -#define A4XX_TPL1_TP_FS_TEX_COUNT_CS__MASK 0x0000ff00 -#define A4XX_TPL1_TP_FS_TEX_COUNT_CS__SHIFT 8 -static inline uint32_t A4XX_TPL1_TP_FS_TEX_COUNT_CS(uint32_t val) -{ - return ((val) << A4XX_TPL1_TP_FS_TEX_COUNT_CS__SHIFT) & A4XX_TPL1_TP_FS_TEX_COUNT_CS__MASK; -} - -#define REG_A4XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x000023a1 - -#define REG_A4XX_TPL1_TP_CS_BORDER_COLOR_BASE_ADDR 0x000023a4 - -#define REG_A4XX_TPL1_TP_CS_SAMPLER_BASE_ADDR 0x000023a5 - -#define REG_A4XX_TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR 0x000023a6 - -#define REG_A4XX_GRAS_TSE_STATUS 0x00000c80 - -#define REG_A4XX_GRAS_DEBUG_ECO_CONTROL 0x00000c81 - -#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c88 - -#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c89 - -#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c8a - -#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c8b - -#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c8c - -#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c8d - -#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c8e - -#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c8f - -#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000 -#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000 -#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE 0x00010000 -#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000 -#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000 - -#define REG_A4XX_GRAS_CNTL 0x00002003 -#define A4XX_GRAS_CNTL_IJ_PERSP 0x00000001 -#define A4XX_GRAS_CNTL_IJ_LINEAR 0x00000002 - -#define REG_A4XX_GRAS_CL_GB_CLIP_ADJ 0x00002004 -#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff -#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0 -static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val) -{ - return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK; -} -#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00 -#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10 -static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val) -{ - return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK; -} - -#define REG_A4XX_GRAS_CL_VPORT_XOFFSET_0 0x00002008 -#define A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff -#define A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0 -static inline uint32_t A4XX_GRAS_CL_VPORT_XOFFSET_0(float val) -{ - return ((fui(val)) << A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK; -} - -#define REG_A4XX_GRAS_CL_VPORT_XSCALE_0 0x00002009 -#define A4XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff -#define A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0 -static inline uint32_t A4XX_GRAS_CL_VPORT_XSCALE_0(float val) -{ - return ((fui(val)) << A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_XSCALE_0__MASK; -} - -#define REG_A4XX_GRAS_CL_VPORT_YOFFSET_0 0x0000200a -#define A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff -#define A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0 -static inline uint32_t A4XX_GRAS_CL_VPORT_YOFFSET_0(float val) -{ - return ((fui(val)) << A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK; -} - -#define REG_A4XX_GRAS_CL_VPORT_YSCALE_0 0x0000200b -#define A4XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff -#define A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0 -static inline uint32_t A4XX_GRAS_CL_VPORT_YSCALE_0(float val) -{ - return ((fui(val)) << A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_YSCALE_0__MASK; -} - -#define REG_A4XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000200c -#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff -#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0 -static inline uint32_t A4XX_GRAS_CL_VPORT_ZOFFSET_0(float val) -{ - return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK; -} - -#define REG_A4XX_GRAS_CL_VPORT_ZSCALE_0 0x0000200d -#define A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff -#define A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0 -static inline uint32_t A4XX_GRAS_CL_VPORT_ZSCALE_0(float val) -{ - return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK; -} - -#define REG_A4XX_GRAS_SU_POINT_MINMAX 0x00002070 -#define A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff -#define A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0 -static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MIN(float val) -{ - return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK; -} -#define A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000 -#define A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16 -static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MAX(float val) -{ - return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK; -} - -#define REG_A4XX_GRAS_SU_POINT_SIZE 0x00002071 -#define A4XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff -#define A4XX_GRAS_SU_POINT_SIZE__SHIFT 0 -static inline uint32_t A4XX_GRAS_SU_POINT_SIZE(float val) -{ - return ((((int32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_SIZE__SHIFT) & A4XX_GRAS_SU_POINT_SIZE__MASK; -} - -#define REG_A4XX_GRAS_ALPHA_CONTROL 0x00002073 -#define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE 0x00000004 -#define A4XX_GRAS_ALPHA_CONTROL_FORCE_FRAGZ_TO_FS 0x00000008 - -#define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE 0x00002074 -#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff -#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0 -static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_SCALE(float val) -{ - return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK; -} - -#define REG_A4XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00002075 -#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff -#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0 -static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) -{ - return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; -} - -#define REG_A4XX_GRAS_SU_POLY_OFFSET_CLAMP 0x00002076 -#define A4XX_GRAS_SU_POLY_OFFSET_CLAMP__MASK 0xffffffff -#define A4XX_GRAS_SU_POLY_OFFSET_CLAMP__SHIFT 0 -static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_CLAMP(float val) -{ - return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_CLAMP__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_CLAMP__MASK; -} - -#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077 -#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003 -#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0 -static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val) -{ - return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK; -} - -#define REG_A4XX_GRAS_SU_MODE_CONTROL 0x00002078 -#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001 -#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002 -#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004 -#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8 -#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3 -static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val) -{ - return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK; -} -#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800 -#define A4XX_GRAS_SU_MODE_CONTROL_MSAA_ENABLE 0x00002000 -#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000 - -#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b -#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x0000000c -#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 2 -static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val) -{ - return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK; -} -#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000380 -#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 7 -static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val) -{ - return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK; -} -#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE 0x00000800 -#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000 -#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12 -static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val) -{ - return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK; -} - -#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL 0x0000207c -#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 -#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff -#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0 -static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val) -{ - return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK; -} -#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000 -#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16 -static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val) -{ - return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK; -} - -#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_BR 0x0000207d -#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 -#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff -#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0 -static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val) -{ - return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK; -} -#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000 -#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16 -static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val) -{ - return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK; -} - -#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000209c -#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 -#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff -#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0 -static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val) -{ - return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK; -} -#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000 -#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16 -static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val) -{ - return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK; -} - -#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000209d -#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 -#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff -#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0 -static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val) -{ - return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK; -} -#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000 -#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16 -static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val) -{ - return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK; -} - -#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_BR 0x0000209e -#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_WINDOW_OFFSET_DISABLE 0x80000000 -#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK 0x00007fff -#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT 0 -static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_X(uint32_t val) -{ - return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK; -} -#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK 0x7fff0000 -#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT 16 -static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y(uint32_t val) -{ - return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK; -} - -#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL 0x0000209f -#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_WINDOW_OFFSET_DISABLE 0x80000000 -#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK 0x00007fff -#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT 0 -static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_X(uint32_t val) -{ - return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK; -} -#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK 0x7fff0000 -#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT 16 -static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val) -{ - return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK; -} - -#define REG_A4XX_UCHE_CACHE_MODE_CONTROL 0x00000e80 - -#define REG_A4XX_UCHE_TRAP_BASE_LO 0x00000e83 - -#define REG_A4XX_UCHE_TRAP_BASE_HI 0x00000e84 - -#define REG_A4XX_UCHE_CACHE_STATUS 0x00000e88 - -#define REG_A4XX_UCHE_INVALIDATE0 0x00000e8a - -#define REG_A4XX_UCHE_INVALIDATE1 0x00000e8b - -#define REG_A4XX_UCHE_CACHE_WAYS_VFD 0x00000e8c - -#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000e8e - -#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000e8f - -#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000e90 - -#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000e91 - -#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000e92 - -#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000e93 - -#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000e94 - -#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e95 - -#define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD 0x00000e00 - -#define REG_A4XX_HLSQ_DEBUG_ECO_CONTROL 0x00000e04 - -#define REG_A4XX_HLSQ_MODE_CONTROL 0x00000e05 - -#define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e - -#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e06 - -#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e07 - -#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e08 - -#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e09 - -#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e0a - -#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e0b - -#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e0c - -#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e0d - -#define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0 -#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010 -#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4 -static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK; -} -#define A4XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040 -#define A4XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200 -#define A4XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400 -#define A4XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000 -#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000 -#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27 -static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK; -} -#define A4XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000 -#define A4XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000 -#define A4XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000 -#define A4XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000 - -#define REG_A4XX_HLSQ_CONTROL_1_REG 0x000023c1 -#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040 -#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6 -static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK; -} -#define A4XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100 -#define A4XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200 -#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK 0x00ff0000 -#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT 16 -static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_COORDREGID(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK; -} -#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__MASK 0xff000000 -#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__SHIFT 24 -static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__MASK; -} - -#define REG_A4XX_HLSQ_CONTROL_2_REG 0x000023c2 -#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000 -#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26 -static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK; -} -#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000003fc -#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 2 -static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK; -} -#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__MASK 0x0003fc00 -#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__SHIFT 10 -static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__MASK; -} -#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__MASK 0x03fc0000 -#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__SHIFT 18 -static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__MASK; -} - -#define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3 -#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff -#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0 -static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK; -} -#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00 -#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8 -static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK; -} -#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000 -#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16 -static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK; -} -#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000 -#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24 -static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK; -} - -#define REG_A4XX_HLSQ_CONTROL_4_REG 0x000023c4 -#define A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff -#define A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0 -static inline uint32_t A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK; -} -#define A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00 -#define A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8 -static inline uint32_t A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK; -} - -#define REG_A4XX_HLSQ_VS_CONTROL_REG 0x000023c5 -#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff -#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0 -static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val) -{ - return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK; -} -#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00 -#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8 -static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; -} -#define A4XX_HLSQ_VS_CONTROL_REG_SSBO_ENABLE 0x00008000 -#define A4XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00010000 -#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 -#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 -static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK; -} -#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 -#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24 -static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val) -{ - return ((val) << A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK; -} - -#define REG_A4XX_HLSQ_FS_CONTROL_REG 0x000023c6 -#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff -#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0 -static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val) -{ - return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK; -} -#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00 -#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8 -static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; -} -#define A4XX_HLSQ_FS_CONTROL_REG_SSBO_ENABLE 0x00008000 -#define A4XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00010000 -#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 -#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 -static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK; -} -#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 -#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24 -static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val) -{ - return ((val) << A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK; -} - -#define REG_A4XX_HLSQ_HS_CONTROL_REG 0x000023c7 -#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff -#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT 0 -static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH(uint32_t val) -{ - return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK; -} -#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00 -#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8 -static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; -} -#define A4XX_HLSQ_HS_CONTROL_REG_SSBO_ENABLE 0x00008000 -#define A4XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00010000 -#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 -#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 -static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK; -} -#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 -#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT 24 -static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH(uint32_t val) -{ - return ((val) << A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK; -} - -#define REG_A4XX_HLSQ_DS_CONTROL_REG 0x000023c8 -#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff -#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT 0 -static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH(uint32_t val) -{ - return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK; -} -#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00 -#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8 -static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; -} -#define A4XX_HLSQ_DS_CONTROL_REG_SSBO_ENABLE 0x00008000 -#define A4XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00010000 -#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 -#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 -static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK; -} -#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 -#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT 24 -static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH(uint32_t val) -{ - return ((val) << A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK; -} - -#define REG_A4XX_HLSQ_GS_CONTROL_REG 0x000023c9 -#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff -#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT 0 -static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH(uint32_t val) -{ - return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK; -} -#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00 -#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8 -static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; -} -#define A4XX_HLSQ_GS_CONTROL_REG_SSBO_ENABLE 0x00008000 -#define A4XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00010000 -#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 -#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 -static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK; -} -#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 -#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT 24 -static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val) -{ - return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK; -} - -#define REG_A4XX_HLSQ_CS_CONTROL_REG 0x000023ca -#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff -#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT 0 -static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK; -} -#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00 -#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8 -static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; -} -#define A4XX_HLSQ_CS_CONTROL_REG_SSBO_ENABLE 0x00008000 -#define A4XX_HLSQ_CS_CONTROL_REG_ENABLED 0x00010000 -#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 -#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 -static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK; -} -#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 -#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT 24 -static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK; -} - -#define REG_A4XX_HLSQ_CL_NDRANGE_0 0x000023cd -#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK 0x00000003 -#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT 0 -static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK; -} -#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc -#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT 2 -static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK; -} -#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000 -#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT 12 -static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK; -} -#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000 -#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT 22 -static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK; -} - -#define REG_A4XX_HLSQ_CL_NDRANGE_1 0x000023ce -#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK 0xffffffff -#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT 0 -static inline uint32_t A4XX_HLSQ_CL_NDRANGE_1_SIZE_X(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT) & A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK; -} - -#define REG_A4XX_HLSQ_CL_NDRANGE_2 0x000023cf - -#define REG_A4XX_HLSQ_CL_NDRANGE_3 0x000023d0 -#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK 0xffffffff -#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT 0 -static inline uint32_t A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT) & A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK; -} - -#define REG_A4XX_HLSQ_CL_NDRANGE_4 0x000023d1 - -#define REG_A4XX_HLSQ_CL_NDRANGE_5 0x000023d2 -#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK 0xffffffff -#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT 0 -static inline uint32_t A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT) & A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK; -} - -#define REG_A4XX_HLSQ_CL_NDRANGE_6 0x000023d3 - -#define REG_A4XX_HLSQ_CL_CONTROL_0 0x000023d4 -#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK 0x00000fff -#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT 0 -static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK; -} -#define A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__MASK 0x00fff000 -#define A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__SHIFT 12 -static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__MASK; -} -#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK 0xff000000 -#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT 24 -static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK; -} - -#define REG_A4XX_HLSQ_CL_CONTROL_1 0x000023d5 -#define A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__MASK 0x00000fff -#define A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__SHIFT 0 -static inline uint32_t A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__MASK; -} -#define A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__MASK 0x00fff000 -#define A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__SHIFT 12 -static inline uint32_t A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__MASK; -} - -#define REG_A4XX_HLSQ_CL_KERNEL_CONST 0x000023d6 -#define A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__MASK 0x00000fff -#define A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__SHIFT 0 -static inline uint32_t A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__SHIFT) & A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__MASK; -} -#define A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__MASK 0x00fff000 -#define A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__SHIFT 12 -static inline uint32_t A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__SHIFT) & A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__MASK; -} - -#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_X 0x000023d7 - -#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_Y 0x000023d8 - -#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_Z 0x000023d9 - -#define REG_A4XX_HLSQ_CL_WG_OFFSET 0x000023da -#define A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__MASK 0x00000fff -#define A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__SHIFT 0 -static inline uint32_t A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID(uint32_t val) -{ - return ((val) << A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__SHIFT) & A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__MASK; -} - -#define REG_A4XX_HLSQ_UPDATE_CONTROL 0x000023db - -#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00 -#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001 - -#define REG_A4XX_PC_TESSFACTOR_ADDR 0x00000d08 - -#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c - -#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10 - -#define REG_A4XX_PC_PERFCTR_PC_SEL_1 0x00000d11 - -#define REG_A4XX_PC_PERFCTR_PC_SEL_2 0x00000d12 - -#define REG_A4XX_PC_PERFCTR_PC_SEL_3 0x00000d13 - -#define REG_A4XX_PC_PERFCTR_PC_SEL_4 0x00000d14 - -#define REG_A4XX_PC_PERFCTR_PC_SEL_5 0x00000d15 - -#define REG_A4XX_PC_PERFCTR_PC_SEL_6 0x00000d16 - -#define REG_A4XX_PC_PERFCTR_PC_SEL_7 0x00000d17 - -#define REG_A4XX_PC_BIN_BASE 0x000021c0 - -#define REG_A4XX_PC_VSTREAM_CONTROL 0x000021c2 -#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000 -#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16 -static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val) -{ - return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK; -} -#define A4XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000 -#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT 22 -static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val) -{ - return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK; -} - -#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4 -#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f -#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0 -static inline uint32_t A4XX_PC_PRIM_VTX_CNTL_VAROUT(uint32_t val) -{ - return ((val) << A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT) & A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK; -} -#define A4XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000 -#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000 -#define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000 - -#define REG_A4XX_PC_PRIM_VTX_CNTL2 0x000021c5 -#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK 0x00000007 -#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT 0 -static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val) -{ - return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK; -} -#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK 0x00000038 -#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT 3 -static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val) -{ - return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK; -} -#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_ENABLE 0x00000040 - -#define REG_A4XX_PC_RESTART_INDEX 0x000021c6 - -#define REG_A4XX_PC_GS_PARAM 0x000021e5 -#define A4XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff -#define A4XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0 -static inline uint32_t A4XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val) -{ - return ((val) << A4XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A4XX_PC_GS_PARAM_MAX_VERTICES__MASK; -} -#define A4XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800 -#define A4XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11 -static inline uint32_t A4XX_PC_GS_PARAM_INVOCATIONS(uint32_t val) -{ - return ((val) << A4XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A4XX_PC_GS_PARAM_INVOCATIONS__MASK; -} -#define A4XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000 -#define A4XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23 -static inline uint32_t A4XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val) -{ - return ((val) << A4XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_GS_PARAM_PRIMTYPE__MASK; -} -#define A4XX_PC_GS_PARAM_LAYER 0x80000000 - -#define REG_A4XX_PC_HS_PARAM 0x000021e7 -#define A4XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f -#define A4XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0 -static inline uint32_t A4XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val) -{ - return ((val) << A4XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A4XX_PC_HS_PARAM_VERTICES_OUT__MASK; -} -#define A4XX_PC_HS_PARAM_SPACING__MASK 0x00600000 -#define A4XX_PC_HS_PARAM_SPACING__SHIFT 21 -static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val) -{ - return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK; -} -#define A4XX_PC_HS_PARAM_CW 0x00800000 -#define A4XX_PC_HS_PARAM_CONNECTED 0x01000000 - -#define REG_A4XX_VBIF_VERSION 0x00003000 - -#define REG_A4XX_VBIF_CLKON 0x00003001 -#define A4XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000001 - -#define REG_A4XX_VBIF_ABIT_SORT 0x0000301c - -#define REG_A4XX_VBIF_ABIT_SORT_CONF 0x0000301d - -#define REG_A4XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a - -#define REG_A4XX_VBIF_IN_RD_LIM_CONF0 0x0000302c - -#define REG_A4XX_VBIF_IN_RD_LIM_CONF1 0x0000302d - -#define REG_A4XX_VBIF_IN_WR_LIM_CONF0 0x00003030 - -#define REG_A4XX_VBIF_IN_WR_LIM_CONF1 0x00003031 - -#define REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049 - -#define REG_A4XX_VBIF_PERF_CNT_EN0 0x000030c0 - -#define REG_A4XX_VBIF_PERF_CNT_EN1 0x000030c1 - -#define REG_A4XX_VBIF_PERF_CNT_EN2 0x000030c2 - -#define REG_A4XX_VBIF_PERF_CNT_EN3 0x000030c3 - -#define REG_A4XX_VBIF_PERF_CNT_SEL0 0x000030d0 - -#define REG_A4XX_VBIF_PERF_CNT_SEL1 0x000030d1 - -#define REG_A4XX_VBIF_PERF_CNT_SEL2 0x000030d2 - -#define REG_A4XX_VBIF_PERF_CNT_SEL3 0x000030d3 - -#define REG_A4XX_VBIF_PERF_CNT_LOW0 0x000030d8 - -#define REG_A4XX_VBIF_PERF_CNT_LOW1 0x000030d9 - -#define REG_A4XX_VBIF_PERF_CNT_LOW2 0x000030da - -#define REG_A4XX_VBIF_PERF_CNT_LOW3 0x000030db - -#define REG_A4XX_VBIF_PERF_CNT_HIGH0 0x000030e0 - -#define REG_A4XX_VBIF_PERF_CNT_HIGH1 0x000030e1 - -#define REG_A4XX_VBIF_PERF_CNT_HIGH2 0x000030e2 - -#define REG_A4XX_VBIF_PERF_CNT_HIGH3 0x000030e3 - -#define REG_A4XX_VBIF_PERF_PWR_CNT_EN0 0x00003100 - -#define REG_A4XX_VBIF_PERF_PWR_CNT_EN1 0x00003101 - -#define REG_A4XX_VBIF_PERF_PWR_CNT_EN2 0x00003102 - -#define REG_A4XX_UNKNOWN_0CC5 0x00000cc5 - -#define REG_A4XX_UNKNOWN_0CC6 0x00000cc6 - -#define REG_A4XX_UNKNOWN_0D01 0x00000d01 - -#define REG_A4XX_UNKNOWN_0E42 0x00000e42 - -#define REG_A4XX_UNKNOWN_0EC2 0x00000ec2 - -#define REG_A4XX_UNKNOWN_2001 0x00002001 - -#define REG_A4XX_UNKNOWN_209B 0x0000209b - -#define REG_A4XX_UNKNOWN_20EF 0x000020ef - -#define REG_A4XX_UNKNOWN_2152 0x00002152 - -#define REG_A4XX_UNKNOWN_2153 0x00002153 - -#define REG_A4XX_UNKNOWN_2154 0x00002154 - -#define REG_A4XX_UNKNOWN_2155 0x00002155 - -#define REG_A4XX_UNKNOWN_2156 0x00002156 - -#define REG_A4XX_UNKNOWN_2157 0x00002157 - -#define REG_A4XX_UNKNOWN_21C3 0x000021c3 - -#define REG_A4XX_UNKNOWN_21E6 0x000021e6 - -#define REG_A4XX_UNKNOWN_2209 0x00002209 - -#define REG_A4XX_UNKNOWN_22D7 0x000022d7 - -#define REG_A4XX_UNKNOWN_2352 0x00002352 - -#define REG_A4XX_TEX_SAMP_0 0x00000000 -#define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001 -#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006 -#define A4XX_TEX_SAMP_0_XY_MAG__SHIFT 1 -static inline uint32_t A4XX_TEX_SAMP_0_XY_MAG(enum a4xx_tex_filter val) -{ - return ((val) << A4XX_TEX_SAMP_0_XY_MAG__SHIFT) & A4XX_TEX_SAMP_0_XY_MAG__MASK; -} -#define A4XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018 -#define A4XX_TEX_SAMP_0_XY_MIN__SHIFT 3 -static inline uint32_t A4XX_TEX_SAMP_0_XY_MIN(enum a4xx_tex_filter val) -{ - return ((val) << A4XX_TEX_SAMP_0_XY_MIN__SHIFT) & A4XX_TEX_SAMP_0_XY_MIN__MASK; -} -#define A4XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0 -#define A4XX_TEX_SAMP_0_WRAP_S__SHIFT 5 -static inline uint32_t A4XX_TEX_SAMP_0_WRAP_S(enum a4xx_tex_clamp val) -{ - return ((val) << A4XX_TEX_SAMP_0_WRAP_S__SHIFT) & A4XX_TEX_SAMP_0_WRAP_S__MASK; -} -#define A4XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700 -#define A4XX_TEX_SAMP_0_WRAP_T__SHIFT 8 -static inline uint32_t A4XX_TEX_SAMP_0_WRAP_T(enum a4xx_tex_clamp val) -{ - return ((val) << A4XX_TEX_SAMP_0_WRAP_T__SHIFT) & A4XX_TEX_SAMP_0_WRAP_T__MASK; -} -#define A4XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800 -#define A4XX_TEX_SAMP_0_WRAP_R__SHIFT 11 -static inline uint32_t A4XX_TEX_SAMP_0_WRAP_R(enum a4xx_tex_clamp val) -{ - return ((val) << A4XX_TEX_SAMP_0_WRAP_R__SHIFT) & A4XX_TEX_SAMP_0_WRAP_R__MASK; -} -#define A4XX_TEX_SAMP_0_ANISO__MASK 0x0001c000 -#define A4XX_TEX_SAMP_0_ANISO__SHIFT 14 -static inline uint32_t A4XX_TEX_SAMP_0_ANISO(enum a4xx_tex_aniso val) -{ - return ((val) << A4XX_TEX_SAMP_0_ANISO__SHIFT) & A4XX_TEX_SAMP_0_ANISO__MASK; -} -#define A4XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000 -#define A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19 -static inline uint32_t A4XX_TEX_SAMP_0_LOD_BIAS(float val) -{ - return ((((int32_t)(val * 256.0))) << A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A4XX_TEX_SAMP_0_LOD_BIAS__MASK; -} - -#define REG_A4XX_TEX_SAMP_1 0x00000001 -#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e -#define A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1 -static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val) -{ - return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK; -} -#define A4XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010 -#define A4XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020 -#define A4XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040 -#define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00 -#define A4XX_TEX_SAMP_1_MAX_LOD__SHIFT 8 -static inline uint32_t A4XX_TEX_SAMP_1_MAX_LOD(float val) -{ - return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK; -} -#define A4XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000 -#define A4XX_TEX_SAMP_1_MIN_LOD__SHIFT 20 -static inline uint32_t A4XX_TEX_SAMP_1_MIN_LOD(float val) -{ - return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK; -} - -#define REG_A4XX_TEX_CONST_0 0x00000000 -#define A4XX_TEX_CONST_0_TILED 0x00000001 -#define A4XX_TEX_CONST_0_SRGB 0x00000004 -#define A4XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070 -#define A4XX_TEX_CONST_0_SWIZ_X__SHIFT 4 -static inline uint32_t A4XX_TEX_CONST_0_SWIZ_X(enum a4xx_tex_swiz val) -{ - return ((val) << A4XX_TEX_CONST_0_SWIZ_X__SHIFT) & A4XX_TEX_CONST_0_SWIZ_X__MASK; -} -#define A4XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380 -#define A4XX_TEX_CONST_0_SWIZ_Y__SHIFT 7 -static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Y(enum a4xx_tex_swiz val) -{ - return ((val) << A4XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Y__MASK; -} -#define A4XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00 -#define A4XX_TEX_CONST_0_SWIZ_Z__SHIFT 10 -static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Z(enum a4xx_tex_swiz val) -{ - return ((val) << A4XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Z__MASK; -} -#define A4XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000 -#define A4XX_TEX_CONST_0_SWIZ_W__SHIFT 13 -static inline uint32_t A4XX_TEX_CONST_0_SWIZ_W(enum a4xx_tex_swiz val) -{ - return ((val) << A4XX_TEX_CONST_0_SWIZ_W__SHIFT) & A4XX_TEX_CONST_0_SWIZ_W__MASK; -} -#define A4XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000 -#define A4XX_TEX_CONST_0_MIPLVLS__SHIFT 16 -static inline uint32_t A4XX_TEX_CONST_0_MIPLVLS(uint32_t val) -{ - return ((val) << A4XX_TEX_CONST_0_MIPLVLS__SHIFT) & A4XX_TEX_CONST_0_MIPLVLS__MASK; -} -#define A4XX_TEX_CONST_0_FMT__MASK 0x1fc00000 -#define A4XX_TEX_CONST_0_FMT__SHIFT 22 -static inline uint32_t A4XX_TEX_CONST_0_FMT(enum a4xx_tex_fmt val) -{ - return ((val) << A4XX_TEX_CONST_0_FMT__SHIFT) & A4XX_TEX_CONST_0_FMT__MASK; -} -#define A4XX_TEX_CONST_0_TYPE__MASK 0xe0000000 -#define A4XX_TEX_CONST_0_TYPE__SHIFT 29 -static inline uint32_t A4XX_TEX_CONST_0_TYPE(enum a4xx_tex_type val) -{ - return ((val) << A4XX_TEX_CONST_0_TYPE__SHIFT) & A4XX_TEX_CONST_0_TYPE__MASK; -} - -#define REG_A4XX_TEX_CONST_1 0x00000001 -#define A4XX_TEX_CONST_1_HEIGHT__MASK 0x00007fff -#define A4XX_TEX_CONST_1_HEIGHT__SHIFT 0 -static inline uint32_t A4XX_TEX_CONST_1_HEIGHT(uint32_t val) -{ - return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK; -} -#define A4XX_TEX_CONST_1_WIDTH__MASK 0x3fff8000 -#define A4XX_TEX_CONST_1_WIDTH__SHIFT 15 -static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val) -{ - return ((val) << A4XX_TEX_CONST_1_WIDTH__SHIFT) & A4XX_TEX_CONST_1_WIDTH__MASK; -} - -#define REG_A4XX_TEX_CONST_2 0x00000002 -#define A4XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f -#define A4XX_TEX_CONST_2_PITCHALIGN__SHIFT 0 -static inline uint32_t A4XX_TEX_CONST_2_PITCHALIGN(uint32_t val) -{ - return ((val) << A4XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A4XX_TEX_CONST_2_PITCHALIGN__MASK; -} -#define A4XX_TEX_CONST_2_BUFFER 0x00000040 -#define A4XX_TEX_CONST_2_PITCH__MASK 0x3ffffe00 -#define A4XX_TEX_CONST_2_PITCH__SHIFT 9 -static inline uint32_t A4XX_TEX_CONST_2_PITCH(uint32_t val) -{ - return ((val) << A4XX_TEX_CONST_2_PITCH__SHIFT) & A4XX_TEX_CONST_2_PITCH__MASK; -} -#define A4XX_TEX_CONST_2_SWAP__MASK 0xc0000000 -#define A4XX_TEX_CONST_2_SWAP__SHIFT 30 -static inline uint32_t A4XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A4XX_TEX_CONST_2_SWAP__SHIFT) & A4XX_TEX_CONST_2_SWAP__MASK; -} - -#define REG_A4XX_TEX_CONST_3 0x00000003 -#define A4XX_TEX_CONST_3_LAYERSZ__MASK 0x00003fff -#define A4XX_TEX_CONST_3_LAYERSZ__SHIFT 0 -static inline uint32_t A4XX_TEX_CONST_3_LAYERSZ(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A4XX_TEX_CONST_3_LAYERSZ__SHIFT) & A4XX_TEX_CONST_3_LAYERSZ__MASK; -} -#define A4XX_TEX_CONST_3_DEPTH__MASK 0x7ffc0000 -#define A4XX_TEX_CONST_3_DEPTH__SHIFT 18 -static inline uint32_t A4XX_TEX_CONST_3_DEPTH(uint32_t val) -{ - return ((val) << A4XX_TEX_CONST_3_DEPTH__SHIFT) & A4XX_TEX_CONST_3_DEPTH__MASK; -} - -#define REG_A4XX_TEX_CONST_4 0x00000004 -#define A4XX_TEX_CONST_4_LAYERSZ__MASK 0x0000000f -#define A4XX_TEX_CONST_4_LAYERSZ__SHIFT 0 -static inline uint32_t A4XX_TEX_CONST_4_LAYERSZ(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A4XX_TEX_CONST_4_LAYERSZ__SHIFT) & A4XX_TEX_CONST_4_LAYERSZ__MASK; -} -#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffe0 -#define A4XX_TEX_CONST_4_BASE__SHIFT 5 -static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK; -} - -#define REG_A4XX_TEX_CONST_5 0x00000005 - -#define REG_A4XX_TEX_CONST_6 0x00000006 - -#define REG_A4XX_TEX_CONST_7 0x00000007 - -#define REG_A4XX_SSBO_0_0 0x00000000 -#define A4XX_SSBO_0_0_BASE__MASK 0xffffffe0 -#define A4XX_SSBO_0_0_BASE__SHIFT 5 -static inline uint32_t A4XX_SSBO_0_0_BASE(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A4XX_SSBO_0_0_BASE__SHIFT) & A4XX_SSBO_0_0_BASE__MASK; -} - -#define REG_A4XX_SSBO_0_1 0x00000001 -#define A4XX_SSBO_0_1_PITCH__MASK 0x003fffff -#define A4XX_SSBO_0_1_PITCH__SHIFT 0 -static inline uint32_t A4XX_SSBO_0_1_PITCH(uint32_t val) -{ - return ((val) << A4XX_SSBO_0_1_PITCH__SHIFT) & A4XX_SSBO_0_1_PITCH__MASK; -} - -#define REG_A4XX_SSBO_0_2 0x00000002 -#define A4XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000 -#define A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12 -static inline uint32_t A4XX_SSBO_0_2_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A4XX_SSBO_0_2_ARRAY_PITCH__MASK; -} - -#define REG_A4XX_SSBO_0_3 0x00000003 -#define A4XX_SSBO_0_3_CPP__MASK 0x0000003f -#define A4XX_SSBO_0_3_CPP__SHIFT 0 -static inline uint32_t A4XX_SSBO_0_3_CPP(uint32_t val) -{ - return ((val) << A4XX_SSBO_0_3_CPP__SHIFT) & A4XX_SSBO_0_3_CPP__MASK; -} - -#define REG_A4XX_SSBO_1_0 0x00000000 -#define A4XX_SSBO_1_0_CPP__MASK 0x0000001f -#define A4XX_SSBO_1_0_CPP__SHIFT 0 -static inline uint32_t A4XX_SSBO_1_0_CPP(uint32_t val) -{ - return ((val) << A4XX_SSBO_1_0_CPP__SHIFT) & A4XX_SSBO_1_0_CPP__MASK; -} -#define A4XX_SSBO_1_0_FMT__MASK 0x0000ff00 -#define A4XX_SSBO_1_0_FMT__SHIFT 8 -static inline uint32_t A4XX_SSBO_1_0_FMT(enum a4xx_color_fmt val) -{ - return ((val) << A4XX_SSBO_1_0_FMT__SHIFT) & A4XX_SSBO_1_0_FMT__MASK; -} -#define A4XX_SSBO_1_0_WIDTH__MASK 0xffff0000 -#define A4XX_SSBO_1_0_WIDTH__SHIFT 16 -static inline uint32_t A4XX_SSBO_1_0_WIDTH(uint32_t val) -{ - return ((val) << A4XX_SSBO_1_0_WIDTH__SHIFT) & A4XX_SSBO_1_0_WIDTH__MASK; -} - -#define REG_A4XX_SSBO_1_1 0x00000001 -#define A4XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff -#define A4XX_SSBO_1_1_HEIGHT__SHIFT 0 -static inline uint32_t A4XX_SSBO_1_1_HEIGHT(uint32_t val) -{ - return ((val) << A4XX_SSBO_1_1_HEIGHT__SHIFT) & A4XX_SSBO_1_1_HEIGHT__MASK; -} -#define A4XX_SSBO_1_1_DEPTH__MASK 0xffff0000 -#define A4XX_SSBO_1_1_DEPTH__SHIFT 16 -static inline uint32_t A4XX_SSBO_1_1_DEPTH(uint32_t val) -{ - return ((val) << A4XX_SSBO_1_1_DEPTH__SHIFT) & A4XX_SSBO_1_1_DEPTH__MASK; -} - -#ifdef __cplusplus -#endif - -#endif /* A4XX_XML */ diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h deleted file mode 100644 index d66306c149..0000000000 --- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h +++ /dev/null @@ -1,5572 +0,0 @@ -#ifndef A5XX_XML -#define A5XX_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng gen_header.py tool in this git repository: -http://gitlab.freedesktop.org/mesa/mesa/ -git clone https://gitlab.freedesktop.org/mesa/mesa.git - -The rules-ng-ng source files this header was generated from are: - -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 151693 bytes, from Wed Aug 23 10:39:39 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85691 bytes, from Fri Feb 16 09:49:01 2024) - -Copyright (C) 2013-2024 by the following authors: -- Rob Clark Rob Clark -- Ilia Mirkin Ilia Mirkin - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -*/ - -#ifdef __KERNEL__ -#include -#define assert(x) BUG_ON(!(x)) -#else -#include -#endif - -#ifdef __cplusplus -#define __struct_cast(X) -#else -#define __struct_cast(X) (struct X) -#endif - -enum a5xx_color_fmt { - RB5_A8_UNORM = 2, - RB5_R8_UNORM = 3, - RB5_R8_SNORM = 4, - RB5_R8_UINT = 5, - RB5_R8_SINT = 6, - RB5_R4G4B4A4_UNORM = 8, - RB5_R5G5B5A1_UNORM = 10, - RB5_R5G6B5_UNORM = 14, - RB5_R8G8_UNORM = 15, - RB5_R8G8_SNORM = 16, - RB5_R8G8_UINT = 17, - RB5_R8G8_SINT = 18, - RB5_R16_UNORM = 21, - RB5_R16_SNORM = 22, - RB5_R16_FLOAT = 23, - RB5_R16_UINT = 24, - RB5_R16_SINT = 25, - RB5_R8G8B8A8_UNORM = 48, - RB5_R8G8B8_UNORM = 49, - RB5_R8G8B8A8_SNORM = 50, - RB5_R8G8B8A8_UINT = 51, - RB5_R8G8B8A8_SINT = 52, - RB5_R10G10B10A2_UNORM = 55, - RB5_R10G10B10A2_UINT = 58, - RB5_R11G11B10_FLOAT = 66, - RB5_R16G16_UNORM = 67, - RB5_R16G16_SNORM = 68, - RB5_R16G16_FLOAT = 69, - RB5_R16G16_UINT = 70, - RB5_R16G16_SINT = 71, - RB5_R32_FLOAT = 74, - RB5_R32_UINT = 75, - RB5_R32_SINT = 76, - RB5_R16G16B16A16_UNORM = 96, - RB5_R16G16B16A16_SNORM = 97, - RB5_R16G16B16A16_FLOAT = 98, - RB5_R16G16B16A16_UINT = 99, - RB5_R16G16B16A16_SINT = 100, - RB5_R32G32_FLOAT = 103, - RB5_R32G32_UINT = 104, - RB5_R32G32_SINT = 105, - RB5_R32G32B32A32_FLOAT = 130, - RB5_R32G32B32A32_UINT = 131, - RB5_R32G32B32A32_SINT = 132, - RB5_NONE = 255, -}; - -enum a5xx_tile_mode { - TILE5_LINEAR = 0, - TILE5_2 = 2, - TILE5_3 = 3, -}; - -enum a5xx_vtx_fmt { - VFMT5_8_UNORM = 3, - VFMT5_8_SNORM = 4, - VFMT5_8_UINT = 5, - VFMT5_8_SINT = 6, - VFMT5_8_8_UNORM = 15, - VFMT5_8_8_SNORM = 16, - VFMT5_8_8_UINT = 17, - VFMT5_8_8_SINT = 18, - VFMT5_16_UNORM = 21, - VFMT5_16_SNORM = 22, - VFMT5_16_FLOAT = 23, - VFMT5_16_UINT = 24, - VFMT5_16_SINT = 25, - VFMT5_8_8_8_UNORM = 33, - VFMT5_8_8_8_SNORM = 34, - VFMT5_8_8_8_UINT = 35, - VFMT5_8_8_8_SINT = 36, - VFMT5_8_8_8_8_UNORM = 48, - VFMT5_8_8_8_8_SNORM = 50, - VFMT5_8_8_8_8_UINT = 51, - VFMT5_8_8_8_8_SINT = 52, - VFMT5_10_10_10_2_UNORM = 54, - VFMT5_10_10_10_2_SNORM = 57, - VFMT5_10_10_10_2_UINT = 58, - VFMT5_10_10_10_2_SINT = 59, - VFMT5_11_11_10_FLOAT = 66, - VFMT5_16_16_UNORM = 67, - VFMT5_16_16_SNORM = 68, - VFMT5_16_16_FLOAT = 69, - VFMT5_16_16_UINT = 70, - VFMT5_16_16_SINT = 71, - VFMT5_32_UNORM = 72, - VFMT5_32_SNORM = 73, - VFMT5_32_FLOAT = 74, - VFMT5_32_UINT = 75, - VFMT5_32_SINT = 76, - VFMT5_32_FIXED = 77, - VFMT5_16_16_16_UNORM = 88, - VFMT5_16_16_16_SNORM = 89, - VFMT5_16_16_16_FLOAT = 90, - VFMT5_16_16_16_UINT = 91, - VFMT5_16_16_16_SINT = 92, - VFMT5_16_16_16_16_UNORM = 96, - VFMT5_16_16_16_16_SNORM = 97, - VFMT5_16_16_16_16_FLOAT = 98, - VFMT5_16_16_16_16_UINT = 99, - VFMT5_16_16_16_16_SINT = 100, - VFMT5_32_32_UNORM = 101, - VFMT5_32_32_SNORM = 102, - VFMT5_32_32_FLOAT = 103, - VFMT5_32_32_UINT = 104, - VFMT5_32_32_SINT = 105, - VFMT5_32_32_FIXED = 106, - VFMT5_32_32_32_UNORM = 112, - VFMT5_32_32_32_SNORM = 113, - VFMT5_32_32_32_UINT = 114, - VFMT5_32_32_32_SINT = 115, - VFMT5_32_32_32_FLOAT = 116, - VFMT5_32_32_32_FIXED = 117, - VFMT5_32_32_32_32_UNORM = 128, - VFMT5_32_32_32_32_SNORM = 129, - VFMT5_32_32_32_32_FLOAT = 130, - VFMT5_32_32_32_32_UINT = 131, - VFMT5_32_32_32_32_SINT = 132, - VFMT5_32_32_32_32_FIXED = 133, - VFMT5_NONE = 255, -}; - -enum a5xx_tex_fmt { - TFMT5_A8_UNORM = 2, - TFMT5_8_UNORM = 3, - TFMT5_8_SNORM = 4, - TFMT5_8_UINT = 5, - TFMT5_8_SINT = 6, - TFMT5_4_4_4_4_UNORM = 8, - TFMT5_5_5_5_1_UNORM = 10, - TFMT5_5_6_5_UNORM = 14, - TFMT5_8_8_UNORM = 15, - TFMT5_8_8_SNORM = 16, - TFMT5_8_8_UINT = 17, - TFMT5_8_8_SINT = 18, - TFMT5_L8_A8_UNORM = 19, - TFMT5_16_UNORM = 21, - TFMT5_16_SNORM = 22, - TFMT5_16_FLOAT = 23, - TFMT5_16_UINT = 24, - TFMT5_16_SINT = 25, - TFMT5_8_8_8_8_UNORM = 48, - TFMT5_8_8_8_UNORM = 49, - TFMT5_8_8_8_8_SNORM = 50, - TFMT5_8_8_8_8_UINT = 51, - TFMT5_8_8_8_8_SINT = 52, - TFMT5_9_9_9_E5_FLOAT = 53, - TFMT5_10_10_10_2_UNORM = 54, - TFMT5_10_10_10_2_UINT = 58, - TFMT5_11_11_10_FLOAT = 66, - TFMT5_16_16_UNORM = 67, - TFMT5_16_16_SNORM = 68, - TFMT5_16_16_FLOAT = 69, - TFMT5_16_16_UINT = 70, - TFMT5_16_16_SINT = 71, - TFMT5_32_FLOAT = 74, - TFMT5_32_UINT = 75, - TFMT5_32_SINT = 76, - TFMT5_16_16_16_16_UNORM = 96, - TFMT5_16_16_16_16_SNORM = 97, - TFMT5_16_16_16_16_FLOAT = 98, - TFMT5_16_16_16_16_UINT = 99, - TFMT5_16_16_16_16_SINT = 100, - TFMT5_32_32_FLOAT = 103, - TFMT5_32_32_UINT = 104, - TFMT5_32_32_SINT = 105, - TFMT5_32_32_32_UINT = 114, - TFMT5_32_32_32_SINT = 115, - TFMT5_32_32_32_FLOAT = 116, - TFMT5_32_32_32_32_FLOAT = 130, - TFMT5_32_32_32_32_UINT = 131, - TFMT5_32_32_32_32_SINT = 132, - TFMT5_X8Z24_UNORM = 160, - TFMT5_ETC2_RG11_UNORM = 171, - TFMT5_ETC2_RG11_SNORM = 172, - TFMT5_ETC2_R11_UNORM = 173, - TFMT5_ETC2_R11_SNORM = 174, - TFMT5_ETC1 = 175, - TFMT5_ETC2_RGB8 = 176, - TFMT5_ETC2_RGBA8 = 177, - TFMT5_ETC2_RGB8A1 = 178, - TFMT5_DXT1 = 179, - TFMT5_DXT3 = 180, - TFMT5_DXT5 = 181, - TFMT5_RGTC1_UNORM = 183, - TFMT5_RGTC1_SNORM = 184, - TFMT5_RGTC2_UNORM = 187, - TFMT5_RGTC2_SNORM = 188, - TFMT5_BPTC_UFLOAT = 190, - TFMT5_BPTC_FLOAT = 191, - TFMT5_BPTC = 192, - TFMT5_ASTC_4x4 = 193, - TFMT5_ASTC_5x4 = 194, - TFMT5_ASTC_5x5 = 195, - TFMT5_ASTC_6x5 = 196, - TFMT5_ASTC_6x6 = 197, - TFMT5_ASTC_8x5 = 198, - TFMT5_ASTC_8x6 = 199, - TFMT5_ASTC_8x8 = 200, - TFMT5_ASTC_10x5 = 201, - TFMT5_ASTC_10x6 = 202, - TFMT5_ASTC_10x8 = 203, - TFMT5_ASTC_10x10 = 204, - TFMT5_ASTC_12x10 = 205, - TFMT5_ASTC_12x12 = 206, - TFMT5_NONE = 255, -}; - -enum a5xx_depth_format { - DEPTH5_NONE = 0, - DEPTH5_16 = 1, - DEPTH5_24_8 = 2, - DEPTH5_32 = 4, -}; - -enum a5xx_blit_buf { - BLIT_MRT0 = 0, - BLIT_MRT1 = 1, - BLIT_MRT2 = 2, - BLIT_MRT3 = 3, - BLIT_MRT4 = 4, - BLIT_MRT5 = 5, - BLIT_MRT6 = 6, - BLIT_MRT7 = 7, - BLIT_ZS = 8, - BLIT_S = 9, -}; - -enum a5xx_cp_perfcounter_select { - PERF_CP_ALWAYS_COUNT = 0, - PERF_CP_BUSY_GFX_CORE_IDLE = 1, - PERF_CP_BUSY_CYCLES = 2, - PERF_CP_PFP_IDLE = 3, - PERF_CP_PFP_BUSY_WORKING = 4, - PERF_CP_PFP_STALL_CYCLES_ANY = 5, - PERF_CP_PFP_STARVE_CYCLES_ANY = 6, - PERF_CP_PFP_ICACHE_MISS = 7, - PERF_CP_PFP_ICACHE_HIT = 8, - PERF_CP_PFP_MATCH_PM4_PKT_PROFILE = 9, - PERF_CP_ME_BUSY_WORKING = 10, - PERF_CP_ME_IDLE = 11, - PERF_CP_ME_STARVE_CYCLES_ANY = 12, - PERF_CP_ME_FIFO_EMPTY_PFP_IDLE = 13, - PERF_CP_ME_FIFO_EMPTY_PFP_BUSY = 14, - PERF_CP_ME_FIFO_FULL_ME_BUSY = 15, - PERF_CP_ME_FIFO_FULL_ME_NON_WORKING = 16, - PERF_CP_ME_STALL_CYCLES_ANY = 17, - PERF_CP_ME_ICACHE_MISS = 18, - PERF_CP_ME_ICACHE_HIT = 19, - PERF_CP_NUM_PREEMPTIONS = 20, - PERF_CP_PREEMPTION_REACTION_DELAY = 21, - PERF_CP_PREEMPTION_SWITCH_OUT_TIME = 22, - PERF_CP_PREEMPTION_SWITCH_IN_TIME = 23, - PERF_CP_DEAD_DRAWS_IN_BIN_RENDER = 24, - PERF_CP_PREDICATED_DRAWS_KILLED = 25, - PERF_CP_MODE_SWITCH = 26, - PERF_CP_ZPASS_DONE = 27, - PERF_CP_CONTEXT_DONE = 28, - PERF_CP_CACHE_FLUSH = 29, - PERF_CP_LONG_PREEMPTIONS = 30, -}; - -enum a5xx_rbbm_perfcounter_select { - PERF_RBBM_ALWAYS_COUNT = 0, - PERF_RBBM_ALWAYS_ON = 1, - PERF_RBBM_TSE_BUSY = 2, - PERF_RBBM_RAS_BUSY = 3, - PERF_RBBM_PC_DCALL_BUSY = 4, - PERF_RBBM_PC_VSD_BUSY = 5, - PERF_RBBM_STATUS_MASKED = 6, - PERF_RBBM_COM_BUSY = 7, - PERF_RBBM_DCOM_BUSY = 8, - PERF_RBBM_VBIF_BUSY = 9, - PERF_RBBM_VSC_BUSY = 10, - PERF_RBBM_TESS_BUSY = 11, - PERF_RBBM_UCHE_BUSY = 12, - PERF_RBBM_HLSQ_BUSY = 13, -}; - -enum a5xx_pc_perfcounter_select { - PERF_PC_BUSY_CYCLES = 0, - PERF_PC_WORKING_CYCLES = 1, - PERF_PC_STALL_CYCLES_VFD = 2, - PERF_PC_STALL_CYCLES_TSE = 3, - PERF_PC_STALL_CYCLES_VPC = 4, - PERF_PC_STALL_CYCLES_UCHE = 5, - PERF_PC_STALL_CYCLES_TESS = 6, - PERF_PC_STALL_CYCLES_TSE_ONLY = 7, - PERF_PC_STALL_CYCLES_VPC_ONLY = 8, - PERF_PC_PASS1_TF_STALL_CYCLES = 9, - PERF_PC_STARVE_CYCLES_FOR_INDEX = 10, - PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR = 11, - PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM = 12, - PERF_PC_STARVE_CYCLES_FOR_POSITION = 13, - PERF_PC_STARVE_CYCLES_DI = 14, - PERF_PC_VIS_STREAMS_LOADED = 15, - PERF_PC_INSTANCES = 16, - PERF_PC_VPC_PRIMITIVES = 17, - PERF_PC_DEAD_PRIM = 18, - PERF_PC_LIVE_PRIM = 19, - PERF_PC_VERTEX_HITS = 20, - PERF_PC_IA_VERTICES = 21, - PERF_PC_IA_PRIMITIVES = 22, - PERF_PC_GS_PRIMITIVES = 23, - PERF_PC_HS_INVOCATIONS = 24, - PERF_PC_DS_INVOCATIONS = 25, - PERF_PC_VS_INVOCATIONS = 26, - PERF_PC_GS_INVOCATIONS = 27, - PERF_PC_DS_PRIMITIVES = 28, - PERF_PC_VPC_POS_DATA_TRANSACTION = 29, - PERF_PC_3D_DRAWCALLS = 30, - PERF_PC_2D_DRAWCALLS = 31, - PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS = 32, - PERF_TESS_BUSY_CYCLES = 33, - PERF_TESS_WORKING_CYCLES = 34, - PERF_TESS_STALL_CYCLES_PC = 35, - PERF_TESS_STARVE_CYCLES_PC = 36, -}; - -enum a5xx_vfd_perfcounter_select { - PERF_VFD_BUSY_CYCLES = 0, - PERF_VFD_STALL_CYCLES_UCHE = 1, - PERF_VFD_STALL_CYCLES_VPC_ALLOC = 2, - PERF_VFD_STALL_CYCLES_MISS_VB = 3, - PERF_VFD_STALL_CYCLES_MISS_Q = 4, - PERF_VFD_STALL_CYCLES_SP_INFO = 5, - PERF_VFD_STALL_CYCLES_SP_ATTR = 6, - PERF_VFD_STALL_CYCLES_VFDP_VB = 7, - PERF_VFD_STALL_CYCLES_VFDP_Q = 8, - PERF_VFD_DECODER_PACKER_STALL = 9, - PERF_VFD_STARVE_CYCLES_UCHE = 10, - PERF_VFD_RBUFFER_FULL = 11, - PERF_VFD_ATTR_INFO_FIFO_FULL = 12, - PERF_VFD_DECODED_ATTRIBUTE_BYTES = 13, - PERF_VFD_NUM_ATTRIBUTES = 14, - PERF_VFD_INSTRUCTIONS = 15, - PERF_VFD_UPPER_SHADER_FIBERS = 16, - PERF_VFD_LOWER_SHADER_FIBERS = 17, - PERF_VFD_MODE_0_FIBERS = 18, - PERF_VFD_MODE_1_FIBERS = 19, - PERF_VFD_MODE_2_FIBERS = 20, - PERF_VFD_MODE_3_FIBERS = 21, - PERF_VFD_MODE_4_FIBERS = 22, - PERF_VFD_TOTAL_VERTICES = 23, - PERF_VFD_NUM_ATTR_MISS = 24, - PERF_VFD_1_BURST_REQ = 25, - PERF_VFDP_STALL_CYCLES_VFD = 26, - PERF_VFDP_STALL_CYCLES_VFD_INDEX = 27, - PERF_VFDP_STALL_CYCLES_VFD_PROG = 28, - PERF_VFDP_STARVE_CYCLES_PC = 29, - PERF_VFDP_VS_STAGE_32_WAVES = 30, -}; - -enum a5xx_hlsq_perfcounter_select { - PERF_HLSQ_BUSY_CYCLES = 0, - PERF_HLSQ_STALL_CYCLES_UCHE = 1, - PERF_HLSQ_STALL_CYCLES_SP_STATE = 2, - PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE = 3, - PERF_HLSQ_UCHE_LATENCY_CYCLES = 4, - PERF_HLSQ_UCHE_LATENCY_COUNT = 5, - PERF_HLSQ_FS_STAGE_32_WAVES = 6, - PERF_HLSQ_FS_STAGE_64_WAVES = 7, - PERF_HLSQ_QUADS = 8, - PERF_HLSQ_SP_STATE_COPY_TRANS_FS_STAGE = 9, - PERF_HLSQ_SP_STATE_COPY_TRANS_VS_STAGE = 10, - PERF_HLSQ_TP_STATE_COPY_TRANS_FS_STAGE = 11, - PERF_HLSQ_TP_STATE_COPY_TRANS_VS_STAGE = 12, - PERF_HLSQ_CS_INVOCATIONS = 13, - PERF_HLSQ_COMPUTE_DRAWCALLS = 14, -}; - -enum a5xx_vpc_perfcounter_select { - PERF_VPC_BUSY_CYCLES = 0, - PERF_VPC_WORKING_CYCLES = 1, - PERF_VPC_STALL_CYCLES_UCHE = 2, - PERF_VPC_STALL_CYCLES_VFD_WACK = 3, - PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC = 4, - PERF_VPC_STALL_CYCLES_PC = 5, - PERF_VPC_STALL_CYCLES_SP_LM = 6, - PERF_VPC_POS_EXPORT_STALL_CYCLES = 7, - PERF_VPC_STARVE_CYCLES_SP = 8, - PERF_VPC_STARVE_CYCLES_LRZ = 9, - PERF_VPC_PC_PRIMITIVES = 10, - PERF_VPC_SP_COMPONENTS = 11, - PERF_VPC_SP_LM_PRIMITIVES = 12, - PERF_VPC_SP_LM_COMPONENTS = 13, - PERF_VPC_SP_LM_DWORDS = 14, - PERF_VPC_STREAMOUT_COMPONENTS = 15, - PERF_VPC_GRANT_PHASES = 16, -}; - -enum a5xx_tse_perfcounter_select { - PERF_TSE_BUSY_CYCLES = 0, - PERF_TSE_CLIPPING_CYCLES = 1, - PERF_TSE_STALL_CYCLES_RAS = 2, - PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE = 3, - PERF_TSE_STALL_CYCLES_LRZ_ZPLANE = 4, - PERF_TSE_STARVE_CYCLES_PC = 5, - PERF_TSE_INPUT_PRIM = 6, - PERF_TSE_INPUT_NULL_PRIM = 7, - PERF_TSE_TRIVAL_REJ_PRIM = 8, - PERF_TSE_CLIPPED_PRIM = 9, - PERF_TSE_ZERO_AREA_PRIM = 10, - PERF_TSE_FACENESS_CULLED_PRIM = 11, - PERF_TSE_ZERO_PIXEL_PRIM = 12, - PERF_TSE_OUTPUT_NULL_PRIM = 13, - PERF_TSE_OUTPUT_VISIBLE_PRIM = 14, - PERF_TSE_CINVOCATION = 15, - PERF_TSE_CPRIMITIVES = 16, - PERF_TSE_2D_INPUT_PRIM = 17, - PERF_TSE_2D_ALIVE_CLCLES = 18, -}; - -enum a5xx_ras_perfcounter_select { - PERF_RAS_BUSY_CYCLES = 0, - PERF_RAS_SUPERTILE_ACTIVE_CYCLES = 1, - PERF_RAS_STALL_CYCLES_LRZ = 2, - PERF_RAS_STARVE_CYCLES_TSE = 3, - PERF_RAS_SUPER_TILES = 4, - PERF_RAS_8X4_TILES = 5, - PERF_RAS_MASKGEN_ACTIVE = 6, - PERF_RAS_FULLY_COVERED_SUPER_TILES = 7, - PERF_RAS_FULLY_COVERED_8X4_TILES = 8, - PERF_RAS_PRIM_KILLED_INVISILBE = 9, -}; - -enum a5xx_lrz_perfcounter_select { - PERF_LRZ_BUSY_CYCLES = 0, - PERF_LRZ_STARVE_CYCLES_RAS = 1, - PERF_LRZ_STALL_CYCLES_RB = 2, - PERF_LRZ_STALL_CYCLES_VSC = 3, - PERF_LRZ_STALL_CYCLES_VPC = 4, - PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH = 5, - PERF_LRZ_STALL_CYCLES_UCHE = 6, - PERF_LRZ_LRZ_READ = 7, - PERF_LRZ_LRZ_WRITE = 8, - PERF_LRZ_READ_LATENCY = 9, - PERF_LRZ_MERGE_CACHE_UPDATING = 10, - PERF_LRZ_PRIM_KILLED_BY_MASKGEN = 11, - PERF_LRZ_PRIM_KILLED_BY_LRZ = 12, - PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ = 13, - PERF_LRZ_FULL_8X8_TILES = 14, - PERF_LRZ_PARTIAL_8X8_TILES = 15, - PERF_LRZ_TILE_KILLED = 16, - PERF_LRZ_TOTAL_PIXEL = 17, - PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ = 18, -}; - -enum a5xx_uche_perfcounter_select { - PERF_UCHE_BUSY_CYCLES = 0, - PERF_UCHE_STALL_CYCLES_VBIF = 1, - PERF_UCHE_VBIF_LATENCY_CYCLES = 2, - PERF_UCHE_VBIF_LATENCY_SAMPLES = 3, - PERF_UCHE_VBIF_READ_BEATS_TP = 4, - PERF_UCHE_VBIF_READ_BEATS_VFD = 5, - PERF_UCHE_VBIF_READ_BEATS_HLSQ = 6, - PERF_UCHE_VBIF_READ_BEATS_LRZ = 7, - PERF_UCHE_VBIF_READ_BEATS_SP = 8, - PERF_UCHE_READ_REQUESTS_TP = 9, - PERF_UCHE_READ_REQUESTS_VFD = 10, - PERF_UCHE_READ_REQUESTS_HLSQ = 11, - PERF_UCHE_READ_REQUESTS_LRZ = 12, - PERF_UCHE_READ_REQUESTS_SP = 13, - PERF_UCHE_WRITE_REQUESTS_LRZ = 14, - PERF_UCHE_WRITE_REQUESTS_SP = 15, - PERF_UCHE_WRITE_REQUESTS_VPC = 16, - PERF_UCHE_WRITE_REQUESTS_VSC = 17, - PERF_UCHE_EVICTS = 18, - PERF_UCHE_BANK_REQ0 = 19, - PERF_UCHE_BANK_REQ1 = 20, - PERF_UCHE_BANK_REQ2 = 21, - PERF_UCHE_BANK_REQ3 = 22, - PERF_UCHE_BANK_REQ4 = 23, - PERF_UCHE_BANK_REQ5 = 24, - PERF_UCHE_BANK_REQ6 = 25, - PERF_UCHE_BANK_REQ7 = 26, - PERF_UCHE_VBIF_READ_BEATS_CH0 = 27, - PERF_UCHE_VBIF_READ_BEATS_CH1 = 28, - PERF_UCHE_GMEM_READ_BEATS = 29, - PERF_UCHE_FLAG_COUNT = 30, -}; - -enum a5xx_tp_perfcounter_select { - PERF_TP_BUSY_CYCLES = 0, - PERF_TP_STALL_CYCLES_UCHE = 1, - PERF_TP_LATENCY_CYCLES = 2, - PERF_TP_LATENCY_TRANS = 3, - PERF_TP_FLAG_CACHE_REQUEST_SAMPLES = 4, - PERF_TP_FLAG_CACHE_REQUEST_LATENCY = 5, - PERF_TP_L1_CACHELINE_REQUESTS = 6, - PERF_TP_L1_CACHELINE_MISSES = 7, - PERF_TP_SP_TP_TRANS = 8, - PERF_TP_TP_SP_TRANS = 9, - PERF_TP_OUTPUT_PIXELS = 10, - PERF_TP_FILTER_WORKLOAD_16BIT = 11, - PERF_TP_FILTER_WORKLOAD_32BIT = 12, - PERF_TP_QUADS_RECEIVED = 13, - PERF_TP_QUADS_OFFSET = 14, - PERF_TP_QUADS_SHADOW = 15, - PERF_TP_QUADS_ARRAY = 16, - PERF_TP_QUADS_GRADIENT = 17, - PERF_TP_QUADS_1D = 18, - PERF_TP_QUADS_2D = 19, - PERF_TP_QUADS_BUFFER = 20, - PERF_TP_QUADS_3D = 21, - PERF_TP_QUADS_CUBE = 22, - PERF_TP_STATE_CACHE_REQUESTS = 23, - PERF_TP_STATE_CACHE_MISSES = 24, - PERF_TP_DIVERGENT_QUADS_RECEIVED = 25, - PERF_TP_BINDLESS_STATE_CACHE_REQUESTS = 26, - PERF_TP_BINDLESS_STATE_CACHE_MISSES = 27, - PERF_TP_PRT_NON_RESIDENT_EVENTS = 28, - PERF_TP_OUTPUT_PIXELS_POINT = 29, - PERF_TP_OUTPUT_PIXELS_BILINEAR = 30, - PERF_TP_OUTPUT_PIXELS_MIP = 31, - PERF_TP_OUTPUT_PIXELS_ANISO = 32, - PERF_TP_OUTPUT_PIXELS_ZERO_LOD = 33, - PERF_TP_FLAG_CACHE_REQUESTS = 34, - PERF_TP_FLAG_CACHE_MISSES = 35, - PERF_TP_L1_5_L2_REQUESTS = 36, - PERF_TP_2D_OUTPUT_PIXELS = 37, - PERF_TP_2D_OUTPUT_PIXELS_POINT = 38, - PERF_TP_2D_OUTPUT_PIXELS_BILINEAR = 39, - PERF_TP_2D_FILTER_WORKLOAD_16BIT = 40, - PERF_TP_2D_FILTER_WORKLOAD_32BIT = 41, -}; - -enum a5xx_sp_perfcounter_select { - PERF_SP_BUSY_CYCLES = 0, - PERF_SP_ALU_WORKING_CYCLES = 1, - PERF_SP_EFU_WORKING_CYCLES = 2, - PERF_SP_STALL_CYCLES_VPC = 3, - PERF_SP_STALL_CYCLES_TP = 4, - PERF_SP_STALL_CYCLES_UCHE = 5, - PERF_SP_STALL_CYCLES_RB = 6, - PERF_SP_SCHEDULER_NON_WORKING = 7, - PERF_SP_WAVE_CONTEXTS = 8, - PERF_SP_WAVE_CONTEXT_CYCLES = 9, - PERF_SP_FS_STAGE_WAVE_CYCLES = 10, - PERF_SP_FS_STAGE_WAVE_SAMPLES = 11, - PERF_SP_VS_STAGE_WAVE_CYCLES = 12, - PERF_SP_VS_STAGE_WAVE_SAMPLES = 13, - PERF_SP_FS_STAGE_DURATION_CYCLES = 14, - PERF_SP_VS_STAGE_DURATION_CYCLES = 15, - PERF_SP_WAVE_CTRL_CYCLES = 16, - PERF_SP_WAVE_LOAD_CYCLES = 17, - PERF_SP_WAVE_EMIT_CYCLES = 18, - PERF_SP_WAVE_NOP_CYCLES = 19, - PERF_SP_WAVE_WAIT_CYCLES = 20, - PERF_SP_WAVE_FETCH_CYCLES = 21, - PERF_SP_WAVE_IDLE_CYCLES = 22, - PERF_SP_WAVE_END_CYCLES = 23, - PERF_SP_WAVE_LONG_SYNC_CYCLES = 24, - PERF_SP_WAVE_SHORT_SYNC_CYCLES = 25, - PERF_SP_WAVE_JOIN_CYCLES = 26, - PERF_SP_LM_LOAD_INSTRUCTIONS = 27, - PERF_SP_LM_STORE_INSTRUCTIONS = 28, - PERF_SP_LM_ATOMICS = 29, - PERF_SP_GM_LOAD_INSTRUCTIONS = 30, - PERF_SP_GM_STORE_INSTRUCTIONS = 31, - PERF_SP_GM_ATOMICS = 32, - PERF_SP_VS_STAGE_TEX_INSTRUCTIONS = 33, - PERF_SP_VS_STAGE_CFLOW_INSTRUCTIONS = 34, - PERF_SP_VS_STAGE_EFU_INSTRUCTIONS = 35, - PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 36, - PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 37, - PERF_SP_FS_STAGE_TEX_INSTRUCTIONS = 38, - PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS = 39, - PERF_SP_FS_STAGE_EFU_INSTRUCTIONS = 40, - PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 41, - PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 42, - PERF_SP_FS_STAGE_BARY_INSTRUCTIONS = 43, - PERF_SP_VS_INSTRUCTIONS = 44, - PERF_SP_FS_INSTRUCTIONS = 45, - PERF_SP_ADDR_LOCK_COUNT = 46, - PERF_SP_UCHE_READ_TRANS = 47, - PERF_SP_UCHE_WRITE_TRANS = 48, - PERF_SP_EXPORT_VPC_TRANS = 49, - PERF_SP_EXPORT_RB_TRANS = 50, - PERF_SP_PIXELS_KILLED = 51, - PERF_SP_ICL1_REQUESTS = 52, - PERF_SP_ICL1_MISSES = 53, - PERF_SP_ICL0_REQUESTS = 54, - PERF_SP_ICL0_MISSES = 55, - PERF_SP_HS_INSTRUCTIONS = 56, - PERF_SP_DS_INSTRUCTIONS = 57, - PERF_SP_GS_INSTRUCTIONS = 58, - PERF_SP_CS_INSTRUCTIONS = 59, - PERF_SP_GPR_READ = 60, - PERF_SP_GPR_WRITE = 61, - PERF_SP_LM_CH0_REQUESTS = 62, - PERF_SP_LM_CH1_REQUESTS = 63, - PERF_SP_LM_BANK_CONFLICTS = 64, -}; - -enum a5xx_rb_perfcounter_select { - PERF_RB_BUSY_CYCLES = 0, - PERF_RB_STALL_CYCLES_CCU = 1, - PERF_RB_STALL_CYCLES_HLSQ = 2, - PERF_RB_STALL_CYCLES_FIFO0_FULL = 3, - PERF_RB_STALL_CYCLES_FIFO1_FULL = 4, - PERF_RB_STALL_CYCLES_FIFO2_FULL = 5, - PERF_RB_STARVE_CYCLES_SP = 6, - PERF_RB_STARVE_CYCLES_LRZ_TILE = 7, - PERF_RB_STARVE_CYCLES_CCU = 8, - PERF_RB_STARVE_CYCLES_Z_PLANE = 9, - PERF_RB_STARVE_CYCLES_BARY_PLANE = 10, - PERF_RB_Z_WORKLOAD = 11, - PERF_RB_HLSQ_ACTIVE = 12, - PERF_RB_Z_READ = 13, - PERF_RB_Z_WRITE = 14, - PERF_RB_C_READ = 15, - PERF_RB_C_WRITE = 16, - PERF_RB_TOTAL_PASS = 17, - PERF_RB_Z_PASS = 18, - PERF_RB_Z_FAIL = 19, - PERF_RB_S_FAIL = 20, - PERF_RB_BLENDED_FXP_COMPONENTS = 21, - PERF_RB_BLENDED_FP16_COMPONENTS = 22, - RB_RESERVED = 23, - PERF_RB_2D_ALIVE_CYCLES = 24, - PERF_RB_2D_STALL_CYCLES_A2D = 25, - PERF_RB_2D_STARVE_CYCLES_SRC = 26, - PERF_RB_2D_STARVE_CYCLES_SP = 27, - PERF_RB_2D_STARVE_CYCLES_DST = 28, - PERF_RB_2D_VALID_PIXELS = 29, -}; - -enum a5xx_rb_samples_perfcounter_select { - TOTAL_SAMPLES = 0, - ZPASS_SAMPLES = 1, - ZFAIL_SAMPLES = 2, - SFAIL_SAMPLES = 3, -}; - -enum a5xx_vsc_perfcounter_select { - PERF_VSC_BUSY_CYCLES = 0, - PERF_VSC_WORKING_CYCLES = 1, - PERF_VSC_STALL_CYCLES_UCHE = 2, - PERF_VSC_EOT_NUM = 3, -}; - -enum a5xx_ccu_perfcounter_select { - PERF_CCU_BUSY_CYCLES = 0, - PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN = 1, - PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN = 2, - PERF_CCU_STARVE_CYCLES_FLAG_RETURN = 3, - PERF_CCU_DEPTH_BLOCKS = 4, - PERF_CCU_COLOR_BLOCKS = 5, - PERF_CCU_DEPTH_BLOCK_HIT = 6, - PERF_CCU_COLOR_BLOCK_HIT = 7, - PERF_CCU_PARTIAL_BLOCK_READ = 8, - PERF_CCU_GMEM_READ = 9, - PERF_CCU_GMEM_WRITE = 10, - PERF_CCU_DEPTH_READ_FLAG0_COUNT = 11, - PERF_CCU_DEPTH_READ_FLAG1_COUNT = 12, - PERF_CCU_DEPTH_READ_FLAG2_COUNT = 13, - PERF_CCU_DEPTH_READ_FLAG3_COUNT = 14, - PERF_CCU_DEPTH_READ_FLAG4_COUNT = 15, - PERF_CCU_COLOR_READ_FLAG0_COUNT = 16, - PERF_CCU_COLOR_READ_FLAG1_COUNT = 17, - PERF_CCU_COLOR_READ_FLAG2_COUNT = 18, - PERF_CCU_COLOR_READ_FLAG3_COUNT = 19, - PERF_CCU_COLOR_READ_FLAG4_COUNT = 20, - PERF_CCU_2D_BUSY_CYCLES = 21, - PERF_CCU_2D_RD_REQ = 22, - PERF_CCU_2D_WR_REQ = 23, - PERF_CCU_2D_REORDER_STARVE_CYCLES = 24, - PERF_CCU_2D_PIXELS = 25, -}; - -enum a5xx_cmp_perfcounter_select { - PERF_CMPDECMP_STALL_CYCLES_VBIF = 0, - PERF_CMPDECMP_VBIF_LATENCY_CYCLES = 1, - PERF_CMPDECMP_VBIF_LATENCY_SAMPLES = 2, - PERF_CMPDECMP_VBIF_READ_DATA_CCU = 3, - PERF_CMPDECMP_VBIF_WRITE_DATA_CCU = 4, - PERF_CMPDECMP_VBIF_READ_REQUEST = 5, - PERF_CMPDECMP_VBIF_WRITE_REQUEST = 6, - PERF_CMPDECMP_VBIF_READ_DATA = 7, - PERF_CMPDECMP_VBIF_WRITE_DATA = 8, - PERF_CMPDECMP_FLAG_FETCH_CYCLES = 9, - PERF_CMPDECMP_FLAG_FETCH_SAMPLES = 10, - PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT = 11, - PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT = 12, - PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT = 13, - PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT = 14, - PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT = 15, - PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT = 16, - PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT = 17, - PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT = 18, - PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ = 19, - PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR = 20, - PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN = 21, - PERF_CMPDECMP_2D_RD_DATA = 22, - PERF_CMPDECMP_2D_WR_DATA = 23, -}; - -enum a5xx_vbif_perfcounter_select { - AXI_READ_REQUESTS_ID_0 = 0, - AXI_READ_REQUESTS_ID_1 = 1, - AXI_READ_REQUESTS_ID_2 = 2, - AXI_READ_REQUESTS_ID_3 = 3, - AXI_READ_REQUESTS_ID_4 = 4, - AXI_READ_REQUESTS_ID_5 = 5, - AXI_READ_REQUESTS_ID_6 = 6, - AXI_READ_REQUESTS_ID_7 = 7, - AXI_READ_REQUESTS_ID_8 = 8, - AXI_READ_REQUESTS_ID_9 = 9, - AXI_READ_REQUESTS_ID_10 = 10, - AXI_READ_REQUESTS_ID_11 = 11, - AXI_READ_REQUESTS_ID_12 = 12, - AXI_READ_REQUESTS_ID_13 = 13, - AXI_READ_REQUESTS_ID_14 = 14, - AXI_READ_REQUESTS_ID_15 = 15, - AXI0_READ_REQUESTS_TOTAL = 16, - AXI1_READ_REQUESTS_TOTAL = 17, - AXI2_READ_REQUESTS_TOTAL = 18, - AXI3_READ_REQUESTS_TOTAL = 19, - AXI_READ_REQUESTS_TOTAL = 20, - AXI_WRITE_REQUESTS_ID_0 = 21, - AXI_WRITE_REQUESTS_ID_1 = 22, - AXI_WRITE_REQUESTS_ID_2 = 23, - AXI_WRITE_REQUESTS_ID_3 = 24, - AXI_WRITE_REQUESTS_ID_4 = 25, - AXI_WRITE_REQUESTS_ID_5 = 26, - AXI_WRITE_REQUESTS_ID_6 = 27, - AXI_WRITE_REQUESTS_ID_7 = 28, - AXI_WRITE_REQUESTS_ID_8 = 29, - AXI_WRITE_REQUESTS_ID_9 = 30, - AXI_WRITE_REQUESTS_ID_10 = 31, - AXI_WRITE_REQUESTS_ID_11 = 32, - AXI_WRITE_REQUESTS_ID_12 = 33, - AXI_WRITE_REQUESTS_ID_13 = 34, - AXI_WRITE_REQUESTS_ID_14 = 35, - AXI_WRITE_REQUESTS_ID_15 = 36, - AXI0_WRITE_REQUESTS_TOTAL = 37, - AXI1_WRITE_REQUESTS_TOTAL = 38, - AXI2_WRITE_REQUESTS_TOTAL = 39, - AXI3_WRITE_REQUESTS_TOTAL = 40, - AXI_WRITE_REQUESTS_TOTAL = 41, - AXI_TOTAL_REQUESTS = 42, - AXI_READ_DATA_BEATS_ID_0 = 43, - AXI_READ_DATA_BEATS_ID_1 = 44, - AXI_READ_DATA_BEATS_ID_2 = 45, - AXI_READ_DATA_BEATS_ID_3 = 46, - AXI_READ_DATA_BEATS_ID_4 = 47, - AXI_READ_DATA_BEATS_ID_5 = 48, - AXI_READ_DATA_BEATS_ID_6 = 49, - AXI_READ_DATA_BEATS_ID_7 = 50, - AXI_READ_DATA_BEATS_ID_8 = 51, - AXI_READ_DATA_BEATS_ID_9 = 52, - AXI_READ_DATA_BEATS_ID_10 = 53, - AXI_READ_DATA_BEATS_ID_11 = 54, - AXI_READ_DATA_BEATS_ID_12 = 55, - AXI_READ_DATA_BEATS_ID_13 = 56, - AXI_READ_DATA_BEATS_ID_14 = 57, - AXI_READ_DATA_BEATS_ID_15 = 58, - AXI0_READ_DATA_BEATS_TOTAL = 59, - AXI1_READ_DATA_BEATS_TOTAL = 60, - AXI2_READ_DATA_BEATS_TOTAL = 61, - AXI3_READ_DATA_BEATS_TOTAL = 62, - AXI_READ_DATA_BEATS_TOTAL = 63, - AXI_WRITE_DATA_BEATS_ID_0 = 64, - AXI_WRITE_DATA_BEATS_ID_1 = 65, - AXI_WRITE_DATA_BEATS_ID_2 = 66, - AXI_WRITE_DATA_BEATS_ID_3 = 67, - AXI_WRITE_DATA_BEATS_ID_4 = 68, - AXI_WRITE_DATA_BEATS_ID_5 = 69, - AXI_WRITE_DATA_BEATS_ID_6 = 70, - AXI_WRITE_DATA_BEATS_ID_7 = 71, - AXI_WRITE_DATA_BEATS_ID_8 = 72, - AXI_WRITE_DATA_BEATS_ID_9 = 73, - AXI_WRITE_DATA_BEATS_ID_10 = 74, - AXI_WRITE_DATA_BEATS_ID_11 = 75, - AXI_WRITE_DATA_BEATS_ID_12 = 76, - AXI_WRITE_DATA_BEATS_ID_13 = 77, - AXI_WRITE_DATA_BEATS_ID_14 = 78, - AXI_WRITE_DATA_BEATS_ID_15 = 79, - AXI0_WRITE_DATA_BEATS_TOTAL = 80, - AXI1_WRITE_DATA_BEATS_TOTAL = 81, - AXI2_WRITE_DATA_BEATS_TOTAL = 82, - AXI3_WRITE_DATA_BEATS_TOTAL = 83, - AXI_WRITE_DATA_BEATS_TOTAL = 84, - AXI_DATA_BEATS_TOTAL = 85, -}; - -enum a5xx_tex_filter { - A5XX_TEX_NEAREST = 0, - A5XX_TEX_LINEAR = 1, - A5XX_TEX_ANISO = 2, -}; - -enum a5xx_tex_clamp { - A5XX_TEX_REPEAT = 0, - A5XX_TEX_CLAMP_TO_EDGE = 1, - A5XX_TEX_MIRROR_REPEAT = 2, - A5XX_TEX_CLAMP_TO_BORDER = 3, - A5XX_TEX_MIRROR_CLAMP = 4, -}; - -enum a5xx_tex_aniso { - A5XX_TEX_ANISO_1 = 0, - A5XX_TEX_ANISO_2 = 1, - A5XX_TEX_ANISO_4 = 2, - A5XX_TEX_ANISO_8 = 3, - A5XX_TEX_ANISO_16 = 4, -}; - -enum a5xx_tex_swiz { - A5XX_TEX_X = 0, - A5XX_TEX_Y = 1, - A5XX_TEX_Z = 2, - A5XX_TEX_W = 3, - A5XX_TEX_ZERO = 4, - A5XX_TEX_ONE = 5, -}; - -enum a5xx_tex_type { - A5XX_TEX_1D = 0, - A5XX_TEX_2D = 1, - A5XX_TEX_CUBE = 2, - A5XX_TEX_3D = 3, - A5XX_TEX_BUFFER = 4, -}; - -#define A5XX_INT0_RBBM_GPU_IDLE 0x00000001 -#define A5XX_INT0_RBBM_AHB_ERROR 0x00000002 -#define A5XX_INT0_RBBM_TRANSFER_TIMEOUT 0x00000004 -#define A5XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008 -#define A5XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010 -#define A5XX_INT0_RBBM_ETS_MS_TIMEOUT 0x00000020 -#define A5XX_INT0_RBBM_ATB_ASYNC_OVERFLOW 0x00000040 -#define A5XX_INT0_RBBM_GPC_ERROR 0x00000080 -#define A5XX_INT0_CP_SW 0x00000100 -#define A5XX_INT0_CP_HW_ERROR 0x00000200 -#define A5XX_INT0_CP_CCU_FLUSH_DEPTH_TS 0x00000400 -#define A5XX_INT0_CP_CCU_FLUSH_COLOR_TS 0x00000800 -#define A5XX_INT0_CP_CCU_RESOLVE_TS 0x00001000 -#define A5XX_INT0_CP_IB2 0x00002000 -#define A5XX_INT0_CP_IB1 0x00004000 -#define A5XX_INT0_CP_RB 0x00008000 -#define A5XX_INT0_CP_UNUSED_1 0x00010000 -#define A5XX_INT0_CP_RB_DONE_TS 0x00020000 -#define A5XX_INT0_CP_WT_DONE_TS 0x00040000 -#define A5XX_INT0_UNKNOWN_1 0x00080000 -#define A5XX_INT0_CP_CACHE_FLUSH_TS 0x00100000 -#define A5XX_INT0_UNUSED_2 0x00200000 -#define A5XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00400000 -#define A5XX_INT0_MISC_HANG_DETECT 0x00800000 -#define A5XX_INT0_UCHE_OOB_ACCESS 0x01000000 -#define A5XX_INT0_UCHE_TRAP_INTR 0x02000000 -#define A5XX_INT0_DEBBUS_INTR_0 0x04000000 -#define A5XX_INT0_DEBBUS_INTR_1 0x08000000 -#define A5XX_INT0_GPMU_VOLTAGE_DROOP 0x10000000 -#define A5XX_INT0_GPMU_FIRMWARE 0x20000000 -#define A5XX_INT0_ISDB_CPU_IRQ 0x40000000 -#define A5XX_INT0_ISDB_UNDER_DEBUG 0x80000000 - -#define A5XX_CP_INT_CP_OPCODE_ERROR 0x00000001 -#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR 0x00000002 -#define A5XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004 -#define A5XX_CP_INT_CP_DMA_ERROR 0x00000008 -#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010 -#define A5XX_CP_INT_CP_AHB_ERROR 0x00000020 - -#define REG_A5XX_CP_RB_BASE 0x00000800 - -#define REG_A5XX_CP_RB_BASE_HI 0x00000801 - -#define REG_A5XX_CP_RB_CNTL 0x00000802 - -#define REG_A5XX_CP_RB_RPTR_ADDR 0x00000804 - -#define REG_A5XX_CP_RB_RPTR_ADDR_HI 0x00000805 - -#define REG_A5XX_CP_RB_RPTR 0x00000806 - -#define REG_A5XX_CP_RB_WPTR 0x00000807 - -#define REG_A5XX_CP_PFP_STAT_ADDR 0x00000808 - -#define REG_A5XX_CP_PFP_STAT_DATA 0x00000809 - -#define REG_A5XX_CP_DRAW_STATE_ADDR 0x0000080b - -#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c - -#define REG_A5XX_CP_ME_NRT_ADDR_LO 0x0000080d - -#define REG_A5XX_CP_ME_NRT_ADDR_HI 0x0000080e - -#define REG_A5XX_CP_ME_NRT_DATA 0x00000810 - -#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817 - -#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818 - -#define REG_A5XX_CP_CRASH_DUMP_CNTL 0x00000819 - -#define REG_A5XX_CP_ME_STAT_ADDR 0x0000081a - -#define REG_A5XX_CP_ROQ_THRESHOLDS_1 0x0000081f - -#define REG_A5XX_CP_ROQ_THRESHOLDS_2 0x00000820 - -#define REG_A5XX_CP_ROQ_DBG_ADDR 0x00000821 - -#define REG_A5XX_CP_ROQ_DBG_DATA 0x00000822 - -#define REG_A5XX_CP_MEQ_DBG_ADDR 0x00000823 - -#define REG_A5XX_CP_MEQ_DBG_DATA 0x00000824 - -#define REG_A5XX_CP_MEQ_THRESHOLDS 0x00000825 - -#define REG_A5XX_CP_MERCIU_SIZE 0x00000826 - -#define REG_A5XX_CP_MERCIU_DBG_ADDR 0x00000827 - -#define REG_A5XX_CP_MERCIU_DBG_DATA_1 0x00000828 - -#define REG_A5XX_CP_MERCIU_DBG_DATA_2 0x00000829 - -#define REG_A5XX_CP_PFP_UCODE_DBG_ADDR 0x0000082a - -#define REG_A5XX_CP_PFP_UCODE_DBG_DATA 0x0000082b - -#define REG_A5XX_CP_ME_UCODE_DBG_ADDR 0x0000082f - -#define REG_A5XX_CP_ME_UCODE_DBG_DATA 0x00000830 - -#define REG_A5XX_CP_CNTL 0x00000831 - -#define REG_A5XX_CP_PFP_ME_CNTL 0x00000832 - -#define REG_A5XX_CP_CHICKEN_DBG 0x00000833 - -#define REG_A5XX_CP_PFP_INSTR_BASE_LO 0x00000835 - -#define REG_A5XX_CP_PFP_INSTR_BASE_HI 0x00000836 - -#define REG_A5XX_CP_ME_INSTR_BASE_LO 0x00000838 - -#define REG_A5XX_CP_ME_INSTR_BASE_HI 0x00000839 - -#define REG_A5XX_CP_CONTEXT_SWITCH_CNTL 0x0000083b - -#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO 0x0000083c - -#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI 0x0000083d - -#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO 0x0000083e - -#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI 0x0000083f - -#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x00000840 - -#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x00000841 - -#define REG_A5XX_CP_ADDR_MODE_CNTL 0x00000860 - -#define REG_A5XX_CP_ME_STAT_DATA 0x00000b14 - -#define REG_A5XX_CP_WFI_PEND_CTR 0x00000b15 - -#define REG_A5XX_CP_INTERRUPT_STATUS 0x00000b18 - -#define REG_A5XX_CP_HW_FAULT 0x00000b1a - -#define REG_A5XX_CP_PROTECT_STATUS 0x00000b1c - -#define REG_A5XX_CP_IB1_BASE 0x00000b1f - -#define REG_A5XX_CP_IB1_BASE_HI 0x00000b20 - -#define REG_A5XX_CP_IB1_BUFSZ 0x00000b21 - -#define REG_A5XX_CP_IB2_BASE 0x00000b22 - -#define REG_A5XX_CP_IB2_BASE_HI 0x00000b23 - -#define REG_A5XX_CP_IB2_BUFSZ 0x00000b24 - -#define REG_A5XX_CP_SCRATCH(i0) (0x00000b78 + 0x1*(i0)) - -static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; } - -#define REG_A5XX_CP_PROTECT(i0) (0x00000880 + 0x1*(i0)) - -static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; } -#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff -#define A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0 -static inline uint32_t A5XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val) -{ - return ((val) << A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A5XX_CP_PROTECT_REG_BASE_ADDR__MASK; -} -#define A5XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000 -#define A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24 -static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val) -{ - return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK; -} -#define A5XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000 -#define A5XX_CP_PROTECT_REG_TRAP_READ 0x40000000 - -#define REG_A5XX_CP_PROTECT_CNTL 0x000008a0 - -#define REG_A5XX_CP_AHB_FAULT 0x00000b1b - -#define REG_A5XX_CP_PERFCTR_CP_SEL_0 0x00000bb0 - -#define REG_A5XX_CP_PERFCTR_CP_SEL_1 0x00000bb1 - -#define REG_A5XX_CP_PERFCTR_CP_SEL_2 0x00000bb2 - -#define REG_A5XX_CP_PERFCTR_CP_SEL_3 0x00000bb3 - -#define REG_A5XX_CP_PERFCTR_CP_SEL_4 0x00000bb4 - -#define REG_A5XX_CP_PERFCTR_CP_SEL_5 0x00000bb5 - -#define REG_A5XX_CP_PERFCTR_CP_SEL_6 0x00000bb6 - -#define REG_A5XX_CP_PERFCTR_CP_SEL_7 0x00000bb7 - -#define REG_A5XX_VSC_ADDR_MODE_CNTL 0x00000bc1 - -#define REG_A5XX_CP_POWERCTR_CP_SEL_0 0x00000bba - -#define REG_A5XX_CP_POWERCTR_CP_SEL_1 0x00000bbb - -#define REG_A5XX_CP_POWERCTR_CP_SEL_2 0x00000bbc - -#define REG_A5XX_CP_POWERCTR_CP_SEL_3 0x00000bbd - -#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_A 0x00000004 - -#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_B 0x00000005 - -#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_C 0x00000006 - -#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_D 0x00000007 - -#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLT 0x00000008 - -#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLM 0x00000009 - -#define REG_A5XX_RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT 0x00000018 - -#define REG_A5XX_RBBM_CFG_DBGBUS_OPL 0x0000000a - -#define REG_A5XX_RBBM_CFG_DBGBUS_OPE 0x0000000b - -#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_0 0x0000000c - -#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_1 0x0000000d - -#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_2 0x0000000e - -#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_3 0x0000000f - -#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_0 0x00000010 - -#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_1 0x00000011 - -#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_2 0x00000012 - -#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_3 0x00000013 - -#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_0 0x00000014 - -#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_1 0x00000015 - -#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_0 0x00000016 - -#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_1 0x00000017 - -#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_2 0x00000018 - -#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_3 0x00000019 - -#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_0 0x0000001a - -#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_1 0x0000001b - -#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_2 0x0000001c - -#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_3 0x0000001d - -#define REG_A5XX_RBBM_CFG_DBGBUS_NIBBLEE 0x0000001e - -#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC0 0x0000001f - -#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC1 0x00000020 - -#define REG_A5XX_RBBM_CFG_DBGBUS_LOADREG 0x00000021 - -#define REG_A5XX_RBBM_CFG_DBGBUS_IDX 0x00000022 - -#define REG_A5XX_RBBM_CFG_DBGBUS_CLRC 0x00000023 - -#define REG_A5XX_RBBM_CFG_DBGBUS_LOADIVT 0x00000024 - -#define REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000002f - -#define REG_A5XX_RBBM_INT_CLEAR_CMD 0x00000037 - -#define REG_A5XX_RBBM_INT_0_MASK 0x00000038 -#define A5XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001 -#define A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR 0x00000002 -#define A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT 0x00000004 -#define A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT 0x00000008 -#define A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT 0x00000010 -#define A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT 0x00000020 -#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW 0x00000040 -#define A5XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080 -#define A5XX_RBBM_INT_0_MASK_CP_SW 0x00000100 -#define A5XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200 -#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400 -#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800 -#define A5XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000 -#define A5XX_RBBM_INT_0_MASK_CP_IB2 0x00002000 -#define A5XX_RBBM_INT_0_MASK_CP_IB1 0x00004000 -#define A5XX_RBBM_INT_0_MASK_CP_RB 0x00008000 -#define A5XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000 -#define A5XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000 -#define A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000 -#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000 -#define A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT 0x00800000 -#define A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000 -#define A5XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000 -#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000 -#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000 -#define A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP 0x10000000 -#define A5XX_RBBM_INT_0_MASK_GPMU_FIRMWARE 0x20000000 -#define A5XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000 -#define A5XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000 - -#define REG_A5XX_RBBM_AHB_DBG_CNTL 0x0000003f - -#define REG_A5XX_RBBM_EXT_VBIF_DBG_CNTL 0x00000041 - -#define REG_A5XX_RBBM_SW_RESET_CMD 0x00000043 - -#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045 - -#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046 - -#define REG_A5XX_RBBM_DBG_LO_HI_GPIO 0x00000048 - -#define REG_A5XX_RBBM_EXT_TRACE_BUS_CNTL 0x00000049 - -#define REG_A5XX_RBBM_CLOCK_CNTL_TP0 0x0000004a - -#define REG_A5XX_RBBM_CLOCK_CNTL_TP1 0x0000004b - -#define REG_A5XX_RBBM_CLOCK_CNTL_TP2 0x0000004c - -#define REG_A5XX_RBBM_CLOCK_CNTL_TP3 0x0000004d - -#define REG_A5XX_RBBM_CLOCK_CNTL2_TP0 0x0000004e - -#define REG_A5XX_RBBM_CLOCK_CNTL2_TP1 0x0000004f - -#define REG_A5XX_RBBM_CLOCK_CNTL2_TP2 0x00000050 - -#define REG_A5XX_RBBM_CLOCK_CNTL2_TP3 0x00000051 - -#define REG_A5XX_RBBM_CLOCK_CNTL3_TP0 0x00000052 - -#define REG_A5XX_RBBM_CLOCK_CNTL3_TP1 0x00000053 - -#define REG_A5XX_RBBM_CLOCK_CNTL3_TP2 0x00000054 - -#define REG_A5XX_RBBM_CLOCK_CNTL3_TP3 0x00000055 - -#define REG_A5XX_RBBM_READ_AHB_THROUGH_DBG 0x00000059 - -#define REG_A5XX_RBBM_CLOCK_CNTL_UCHE 0x0000005a - -#define REG_A5XX_RBBM_CLOCK_CNTL2_UCHE 0x0000005b - -#define REG_A5XX_RBBM_CLOCK_CNTL3_UCHE 0x0000005c - -#define REG_A5XX_RBBM_CLOCK_CNTL4_UCHE 0x0000005d - -#define REG_A5XX_RBBM_CLOCK_HYST_UCHE 0x0000005e - -#define REG_A5XX_RBBM_CLOCK_DELAY_UCHE 0x0000005f - -#define REG_A5XX_RBBM_CLOCK_MODE_GPC 0x00000060 - -#define REG_A5XX_RBBM_CLOCK_DELAY_GPC 0x00000061 - -#define REG_A5XX_RBBM_CLOCK_HYST_GPC 0x00000062 - -#define REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000063 - -#define REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x00000064 - -#define REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000065 - -#define REG_A5XX_RBBM_CLOCK_DELAY_HLSQ 0x00000066 - -#define REG_A5XX_RBBM_CLOCK_CNTL 0x00000067 - -#define REG_A5XX_RBBM_CLOCK_CNTL_SP0 0x00000068 - -#define REG_A5XX_RBBM_CLOCK_CNTL_SP1 0x00000069 - -#define REG_A5XX_RBBM_CLOCK_CNTL_SP2 0x0000006a - -#define REG_A5XX_RBBM_CLOCK_CNTL_SP3 0x0000006b - -#define REG_A5XX_RBBM_CLOCK_CNTL2_SP0 0x0000006c - -#define REG_A5XX_RBBM_CLOCK_CNTL2_SP1 0x0000006d - -#define REG_A5XX_RBBM_CLOCK_CNTL2_SP2 0x0000006e - -#define REG_A5XX_RBBM_CLOCK_CNTL2_SP3 0x0000006f - -#define REG_A5XX_RBBM_CLOCK_HYST_SP0 0x00000070 - -#define REG_A5XX_RBBM_CLOCK_HYST_SP1 0x00000071 - -#define REG_A5XX_RBBM_CLOCK_HYST_SP2 0x00000072 - -#define REG_A5XX_RBBM_CLOCK_HYST_SP3 0x00000073 - -#define REG_A5XX_RBBM_CLOCK_DELAY_SP0 0x00000074 - -#define REG_A5XX_RBBM_CLOCK_DELAY_SP1 0x00000075 - -#define REG_A5XX_RBBM_CLOCK_DELAY_SP2 0x00000076 - -#define REG_A5XX_RBBM_CLOCK_DELAY_SP3 0x00000077 - -#define REG_A5XX_RBBM_CLOCK_CNTL_RB0 0x00000078 - -#define REG_A5XX_RBBM_CLOCK_CNTL_RB1 0x00000079 - -#define REG_A5XX_RBBM_CLOCK_CNTL_RB2 0x0000007a - -#define REG_A5XX_RBBM_CLOCK_CNTL_RB3 0x0000007b - -#define REG_A5XX_RBBM_CLOCK_CNTL2_RB0 0x0000007c - -#define REG_A5XX_RBBM_CLOCK_CNTL2_RB1 0x0000007d - -#define REG_A5XX_RBBM_CLOCK_CNTL2_RB2 0x0000007e - -#define REG_A5XX_RBBM_CLOCK_CNTL2_RB3 0x0000007f - -#define REG_A5XX_RBBM_CLOCK_HYST_RAC 0x00000080 - -#define REG_A5XX_RBBM_CLOCK_DELAY_RAC 0x00000081 - -#define REG_A5XX_RBBM_CLOCK_CNTL_CCU0 0x00000082 - -#define REG_A5XX_RBBM_CLOCK_CNTL_CCU1 0x00000083 - -#define REG_A5XX_RBBM_CLOCK_CNTL_CCU2 0x00000084 - -#define REG_A5XX_RBBM_CLOCK_CNTL_CCU3 0x00000085 - -#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000086 - -#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000087 - -#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000088 - -#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000089 - -#define REG_A5XX_RBBM_CLOCK_CNTL_RAC 0x0000008a - -#define REG_A5XX_RBBM_CLOCK_CNTL2_RAC 0x0000008b - -#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0 0x0000008c - -#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1 0x0000008d - -#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2 0x0000008e - -#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3 0x0000008f - -#define REG_A5XX_RBBM_CLOCK_HYST_VFD 0x00000090 - -#define REG_A5XX_RBBM_CLOCK_MODE_VFD 0x00000091 - -#define REG_A5XX_RBBM_CLOCK_DELAY_VFD 0x00000092 - -#define REG_A5XX_RBBM_AHB_CNTL0 0x00000093 - -#define REG_A5XX_RBBM_AHB_CNTL1 0x00000094 - -#define REG_A5XX_RBBM_AHB_CNTL2 0x00000095 - -#define REG_A5XX_RBBM_AHB_CMD 0x00000096 - -#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11 0x0000009c - -#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12 0x0000009d - -#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13 0x0000009e - -#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14 0x0000009f - -#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15 0x000000a0 - -#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16 0x000000a1 - -#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17 0x000000a2 - -#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18 0x000000a3 - -#define REG_A5XX_RBBM_CLOCK_DELAY_TP0 0x000000a4 - -#define REG_A5XX_RBBM_CLOCK_DELAY_TP1 0x000000a5 - -#define REG_A5XX_RBBM_CLOCK_DELAY_TP2 0x000000a6 - -#define REG_A5XX_RBBM_CLOCK_DELAY_TP3 0x000000a7 - -#define REG_A5XX_RBBM_CLOCK_DELAY2_TP0 0x000000a8 - -#define REG_A5XX_RBBM_CLOCK_DELAY2_TP1 0x000000a9 - -#define REG_A5XX_RBBM_CLOCK_DELAY2_TP2 0x000000aa - -#define REG_A5XX_RBBM_CLOCK_DELAY2_TP3 0x000000ab - -#define REG_A5XX_RBBM_CLOCK_DELAY3_TP0 0x000000ac - -#define REG_A5XX_RBBM_CLOCK_DELAY3_TP1 0x000000ad - -#define REG_A5XX_RBBM_CLOCK_DELAY3_TP2 0x000000ae - -#define REG_A5XX_RBBM_CLOCK_DELAY3_TP3 0x000000af - -#define REG_A5XX_RBBM_CLOCK_HYST_TP0 0x000000b0 - -#define REG_A5XX_RBBM_CLOCK_HYST_TP1 0x000000b1 - -#define REG_A5XX_RBBM_CLOCK_HYST_TP2 0x000000b2 - -#define REG_A5XX_RBBM_CLOCK_HYST_TP3 0x000000b3 - -#define REG_A5XX_RBBM_CLOCK_HYST2_TP0 0x000000b4 - -#define REG_A5XX_RBBM_CLOCK_HYST2_TP1 0x000000b5 - -#define REG_A5XX_RBBM_CLOCK_HYST2_TP2 0x000000b6 - -#define REG_A5XX_RBBM_CLOCK_HYST2_TP3 0x000000b7 - -#define REG_A5XX_RBBM_CLOCK_HYST3_TP0 0x000000b8 - -#define REG_A5XX_RBBM_CLOCK_HYST3_TP1 0x000000b9 - -#define REG_A5XX_RBBM_CLOCK_HYST3_TP2 0x000000ba - -#define REG_A5XX_RBBM_CLOCK_HYST3_TP3 0x000000bb - -#define REG_A5XX_RBBM_CLOCK_CNTL_GPMU 0x000000c8 - -#define REG_A5XX_RBBM_CLOCK_DELAY_GPMU 0x000000c9 - -#define REG_A5XX_RBBM_CLOCK_HYST_GPMU 0x000000ca - -#define REG_A5XX_RBBM_PERFCTR_CP_0_LO 0x000003a0 - -#define REG_A5XX_RBBM_PERFCTR_CP_0_HI 0x000003a1 - -#define REG_A5XX_RBBM_PERFCTR_CP_1_LO 0x000003a2 - -#define REG_A5XX_RBBM_PERFCTR_CP_1_HI 0x000003a3 - -#define REG_A5XX_RBBM_PERFCTR_CP_2_LO 0x000003a4 - -#define REG_A5XX_RBBM_PERFCTR_CP_2_HI 0x000003a5 - -#define REG_A5XX_RBBM_PERFCTR_CP_3_LO 0x000003a6 - -#define REG_A5XX_RBBM_PERFCTR_CP_3_HI 0x000003a7 - -#define REG_A5XX_RBBM_PERFCTR_CP_4_LO 0x000003a8 - -#define REG_A5XX_RBBM_PERFCTR_CP_4_HI 0x000003a9 - -#define REG_A5XX_RBBM_PERFCTR_CP_5_LO 0x000003aa - -#define REG_A5XX_RBBM_PERFCTR_CP_5_HI 0x000003ab - -#define REG_A5XX_RBBM_PERFCTR_CP_6_LO 0x000003ac - -#define REG_A5XX_RBBM_PERFCTR_CP_6_HI 0x000003ad - -#define REG_A5XX_RBBM_PERFCTR_CP_7_LO 0x000003ae - -#define REG_A5XX_RBBM_PERFCTR_CP_7_HI 0x000003af - -#define REG_A5XX_RBBM_PERFCTR_RBBM_0_LO 0x000003b0 - -#define REG_A5XX_RBBM_PERFCTR_RBBM_0_HI 0x000003b1 - -#define REG_A5XX_RBBM_PERFCTR_RBBM_1_LO 0x000003b2 - -#define REG_A5XX_RBBM_PERFCTR_RBBM_1_HI 0x000003b3 - -#define REG_A5XX_RBBM_PERFCTR_RBBM_2_LO 0x000003b4 - -#define REG_A5XX_RBBM_PERFCTR_RBBM_2_HI 0x000003b5 - -#define REG_A5XX_RBBM_PERFCTR_RBBM_3_LO 0x000003b6 - -#define REG_A5XX_RBBM_PERFCTR_RBBM_3_HI 0x000003b7 - -#define REG_A5XX_RBBM_PERFCTR_PC_0_LO 0x000003b8 - -#define REG_A5XX_RBBM_PERFCTR_PC_0_HI 0x000003b9 - -#define REG_A5XX_RBBM_PERFCTR_PC_1_LO 0x000003ba - -#define REG_A5XX_RBBM_PERFCTR_PC_1_HI 0x000003bb - -#define REG_A5XX_RBBM_PERFCTR_PC_2_LO 0x000003bc - -#define REG_A5XX_RBBM_PERFCTR_PC_2_HI 0x000003bd - -#define REG_A5XX_RBBM_PERFCTR_PC_3_LO 0x000003be - -#define REG_A5XX_RBBM_PERFCTR_PC_3_HI 0x000003bf - -#define REG_A5XX_RBBM_PERFCTR_PC_4_LO 0x000003c0 - -#define REG_A5XX_RBBM_PERFCTR_PC_4_HI 0x000003c1 - -#define REG_A5XX_RBBM_PERFCTR_PC_5_LO 0x000003c2 - -#define REG_A5XX_RBBM_PERFCTR_PC_5_HI 0x000003c3 - -#define REG_A5XX_RBBM_PERFCTR_PC_6_LO 0x000003c4 - -#define REG_A5XX_RBBM_PERFCTR_PC_6_HI 0x000003c5 - -#define REG_A5XX_RBBM_PERFCTR_PC_7_LO 0x000003c6 - -#define REG_A5XX_RBBM_PERFCTR_PC_7_HI 0x000003c7 - -#define REG_A5XX_RBBM_PERFCTR_VFD_0_LO 0x000003c8 - -#define REG_A5XX_RBBM_PERFCTR_VFD_0_HI 0x000003c9 - -#define REG_A5XX_RBBM_PERFCTR_VFD_1_LO 0x000003ca - -#define REG_A5XX_RBBM_PERFCTR_VFD_1_HI 0x000003cb - -#define REG_A5XX_RBBM_PERFCTR_VFD_2_LO 0x000003cc - -#define REG_A5XX_RBBM_PERFCTR_VFD_2_HI 0x000003cd - -#define REG_A5XX_RBBM_PERFCTR_VFD_3_LO 0x000003ce - -#define REG_A5XX_RBBM_PERFCTR_VFD_3_HI 0x000003cf - -#define REG_A5XX_RBBM_PERFCTR_VFD_4_LO 0x000003d0 - -#define REG_A5XX_RBBM_PERFCTR_VFD_4_HI 0x000003d1 - -#define REG_A5XX_RBBM_PERFCTR_VFD_5_LO 0x000003d2 - -#define REG_A5XX_RBBM_PERFCTR_VFD_5_HI 0x000003d3 - -#define REG_A5XX_RBBM_PERFCTR_VFD_6_LO 0x000003d4 - -#define REG_A5XX_RBBM_PERFCTR_VFD_6_HI 0x000003d5 - -#define REG_A5XX_RBBM_PERFCTR_VFD_7_LO 0x000003d6 - -#define REG_A5XX_RBBM_PERFCTR_VFD_7_HI 0x000003d7 - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO 0x000003d8 - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI 0x000003d9 - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO 0x000003da - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI 0x000003db - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO 0x000003dc - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI 0x000003dd - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO 0x000003de - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI 0x000003df - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO 0x000003e0 - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI 0x000003e1 - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO 0x000003e2 - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI 0x000003e3 - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO 0x000003e4 - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI 0x000003e5 - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO 0x000003e6 - -#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI 0x000003e7 - -#define REG_A5XX_RBBM_PERFCTR_VPC_0_LO 0x000003e8 - -#define REG_A5XX_RBBM_PERFCTR_VPC_0_HI 0x000003e9 - -#define REG_A5XX_RBBM_PERFCTR_VPC_1_LO 0x000003ea - -#define REG_A5XX_RBBM_PERFCTR_VPC_1_HI 0x000003eb - -#define REG_A5XX_RBBM_PERFCTR_VPC_2_LO 0x000003ec - -#define REG_A5XX_RBBM_PERFCTR_VPC_2_HI 0x000003ed - -#define REG_A5XX_RBBM_PERFCTR_VPC_3_LO 0x000003ee - -#define REG_A5XX_RBBM_PERFCTR_VPC_3_HI 0x000003ef - -#define REG_A5XX_RBBM_PERFCTR_CCU_0_LO 0x000003f0 - -#define REG_A5XX_RBBM_PERFCTR_CCU_0_HI 0x000003f1 - -#define REG_A5XX_RBBM_PERFCTR_CCU_1_LO 0x000003f2 - -#define REG_A5XX_RBBM_PERFCTR_CCU_1_HI 0x000003f3 - -#define REG_A5XX_RBBM_PERFCTR_CCU_2_LO 0x000003f4 - -#define REG_A5XX_RBBM_PERFCTR_CCU_2_HI 0x000003f5 - -#define REG_A5XX_RBBM_PERFCTR_CCU_3_LO 0x000003f6 - -#define REG_A5XX_RBBM_PERFCTR_CCU_3_HI 0x000003f7 - -#define REG_A5XX_RBBM_PERFCTR_TSE_0_LO 0x000003f8 - -#define REG_A5XX_RBBM_PERFCTR_TSE_0_HI 0x000003f9 - -#define REG_A5XX_RBBM_PERFCTR_TSE_1_LO 0x000003fa - -#define REG_A5XX_RBBM_PERFCTR_TSE_1_HI 0x000003fb - -#define REG_A5XX_RBBM_PERFCTR_TSE_2_LO 0x000003fc - -#define REG_A5XX_RBBM_PERFCTR_TSE_2_HI 0x000003fd - -#define REG_A5XX_RBBM_PERFCTR_TSE_3_LO 0x000003fe - -#define REG_A5XX_RBBM_PERFCTR_TSE_3_HI 0x000003ff - -#define REG_A5XX_RBBM_PERFCTR_RAS_0_LO 0x00000400 - -#define REG_A5XX_RBBM_PERFCTR_RAS_0_HI 0x00000401 - -#define REG_A5XX_RBBM_PERFCTR_RAS_1_LO 0x00000402 - -#define REG_A5XX_RBBM_PERFCTR_RAS_1_HI 0x00000403 - -#define REG_A5XX_RBBM_PERFCTR_RAS_2_LO 0x00000404 - -#define REG_A5XX_RBBM_PERFCTR_RAS_2_HI 0x00000405 - -#define REG_A5XX_RBBM_PERFCTR_RAS_3_LO 0x00000406 - -#define REG_A5XX_RBBM_PERFCTR_RAS_3_HI 0x00000407 - -#define REG_A5XX_RBBM_PERFCTR_UCHE_0_LO 0x00000408 - -#define REG_A5XX_RBBM_PERFCTR_UCHE_0_HI 0x00000409 - -#define REG_A5XX_RBBM_PERFCTR_UCHE_1_LO 0x0000040a - -#define REG_A5XX_RBBM_PERFCTR_UCHE_1_HI 0x0000040b - -#define REG_A5XX_RBBM_PERFCTR_UCHE_2_LO 0x0000040c - -#define REG_A5XX_RBBM_PERFCTR_UCHE_2_HI 0x0000040d - -#define REG_A5XX_RBBM_PERFCTR_UCHE_3_LO 0x0000040e - -#define REG_A5XX_RBBM_PERFCTR_UCHE_3_HI 0x0000040f - -#define REG_A5XX_RBBM_PERFCTR_UCHE_4_LO 0x00000410 - -#define REG_A5XX_RBBM_PERFCTR_UCHE_4_HI 0x00000411 - -#define REG_A5XX_RBBM_PERFCTR_UCHE_5_LO 0x00000412 - -#define REG_A5XX_RBBM_PERFCTR_UCHE_5_HI 0x00000413 - -#define REG_A5XX_RBBM_PERFCTR_UCHE_6_LO 0x00000414 - -#define REG_A5XX_RBBM_PERFCTR_UCHE_6_HI 0x00000415 - -#define REG_A5XX_RBBM_PERFCTR_UCHE_7_LO 0x00000416 - -#define REG_A5XX_RBBM_PERFCTR_UCHE_7_HI 0x00000417 - -#define REG_A5XX_RBBM_PERFCTR_TP_0_LO 0x00000418 - -#define REG_A5XX_RBBM_PERFCTR_TP_0_HI 0x00000419 - -#define REG_A5XX_RBBM_PERFCTR_TP_1_LO 0x0000041a - -#define REG_A5XX_RBBM_PERFCTR_TP_1_HI 0x0000041b - -#define REG_A5XX_RBBM_PERFCTR_TP_2_LO 0x0000041c - -#define REG_A5XX_RBBM_PERFCTR_TP_2_HI 0x0000041d - -#define REG_A5XX_RBBM_PERFCTR_TP_3_LO 0x0000041e - -#define REG_A5XX_RBBM_PERFCTR_TP_3_HI 0x0000041f - -#define REG_A5XX_RBBM_PERFCTR_TP_4_LO 0x00000420 - -#define REG_A5XX_RBBM_PERFCTR_TP_4_HI 0x00000421 - -#define REG_A5XX_RBBM_PERFCTR_TP_5_LO 0x00000422 - -#define REG_A5XX_RBBM_PERFCTR_TP_5_HI 0x00000423 - -#define REG_A5XX_RBBM_PERFCTR_TP_6_LO 0x00000424 - -#define REG_A5XX_RBBM_PERFCTR_TP_6_HI 0x00000425 - -#define REG_A5XX_RBBM_PERFCTR_TP_7_LO 0x00000426 - -#define REG_A5XX_RBBM_PERFCTR_TP_7_HI 0x00000427 - -#define REG_A5XX_RBBM_PERFCTR_SP_0_LO 0x00000428 - -#define REG_A5XX_RBBM_PERFCTR_SP_0_HI 0x00000429 - -#define REG_A5XX_RBBM_PERFCTR_SP_1_LO 0x0000042a - -#define REG_A5XX_RBBM_PERFCTR_SP_1_HI 0x0000042b - -#define REG_A5XX_RBBM_PERFCTR_SP_2_LO 0x0000042c - -#define REG_A5XX_RBBM_PERFCTR_SP_2_HI 0x0000042d - -#define REG_A5XX_RBBM_PERFCTR_SP_3_LO 0x0000042e - -#define REG_A5XX_RBBM_PERFCTR_SP_3_HI 0x0000042f - -#define REG_A5XX_RBBM_PERFCTR_SP_4_LO 0x00000430 - -#define REG_A5XX_RBBM_PERFCTR_SP_4_HI 0x00000431 - -#define REG_A5XX_RBBM_PERFCTR_SP_5_LO 0x00000432 - -#define REG_A5XX_RBBM_PERFCTR_SP_5_HI 0x00000433 - -#define REG_A5XX_RBBM_PERFCTR_SP_6_LO 0x00000434 - -#define REG_A5XX_RBBM_PERFCTR_SP_6_HI 0x00000435 - -#define REG_A5XX_RBBM_PERFCTR_SP_7_LO 0x00000436 - -#define REG_A5XX_RBBM_PERFCTR_SP_7_HI 0x00000437 - -#define REG_A5XX_RBBM_PERFCTR_SP_8_LO 0x00000438 - -#define REG_A5XX_RBBM_PERFCTR_SP_8_HI 0x00000439 - -#define REG_A5XX_RBBM_PERFCTR_SP_9_LO 0x0000043a - -#define REG_A5XX_RBBM_PERFCTR_SP_9_HI 0x0000043b - -#define REG_A5XX_RBBM_PERFCTR_SP_10_LO 0x0000043c - -#define REG_A5XX_RBBM_PERFCTR_SP_10_HI 0x0000043d - -#define REG_A5XX_RBBM_PERFCTR_SP_11_LO 0x0000043e - -#define REG_A5XX_RBBM_PERFCTR_SP_11_HI 0x0000043f - -#define REG_A5XX_RBBM_PERFCTR_RB_0_LO 0x00000440 - -#define REG_A5XX_RBBM_PERFCTR_RB_0_HI 0x00000441 - -#define REG_A5XX_RBBM_PERFCTR_RB_1_LO 0x00000442 - -#define REG_A5XX_RBBM_PERFCTR_RB_1_HI 0x00000443 - -#define REG_A5XX_RBBM_PERFCTR_RB_2_LO 0x00000444 - -#define REG_A5XX_RBBM_PERFCTR_RB_2_HI 0x00000445 - -#define REG_A5XX_RBBM_PERFCTR_RB_3_LO 0x00000446 - -#define REG_A5XX_RBBM_PERFCTR_RB_3_HI 0x00000447 - -#define REG_A5XX_RBBM_PERFCTR_RB_4_LO 0x00000448 - -#define REG_A5XX_RBBM_PERFCTR_RB_4_HI 0x00000449 - -#define REG_A5XX_RBBM_PERFCTR_RB_5_LO 0x0000044a - -#define REG_A5XX_RBBM_PERFCTR_RB_5_HI 0x0000044b - -#define REG_A5XX_RBBM_PERFCTR_RB_6_LO 0x0000044c - -#define REG_A5XX_RBBM_PERFCTR_RB_6_HI 0x0000044d - -#define REG_A5XX_RBBM_PERFCTR_RB_7_LO 0x0000044e - -#define REG_A5XX_RBBM_PERFCTR_RB_7_HI 0x0000044f - -#define REG_A5XX_RBBM_PERFCTR_VSC_0_LO 0x00000450 - -#define REG_A5XX_RBBM_PERFCTR_VSC_0_HI 0x00000451 - -#define REG_A5XX_RBBM_PERFCTR_VSC_1_LO 0x00000452 - -#define REG_A5XX_RBBM_PERFCTR_VSC_1_HI 0x00000453 - -#define REG_A5XX_RBBM_PERFCTR_LRZ_0_LO 0x00000454 - -#define REG_A5XX_RBBM_PERFCTR_LRZ_0_HI 0x00000455 - -#define REG_A5XX_RBBM_PERFCTR_LRZ_1_LO 0x00000456 - -#define REG_A5XX_RBBM_PERFCTR_LRZ_1_HI 0x00000457 - -#define REG_A5XX_RBBM_PERFCTR_LRZ_2_LO 0x00000458 - -#define REG_A5XX_RBBM_PERFCTR_LRZ_2_HI 0x00000459 - -#define REG_A5XX_RBBM_PERFCTR_LRZ_3_LO 0x0000045a - -#define REG_A5XX_RBBM_PERFCTR_LRZ_3_HI 0x0000045b - -#define REG_A5XX_RBBM_PERFCTR_CMP_0_LO 0x0000045c - -#define REG_A5XX_RBBM_PERFCTR_CMP_0_HI 0x0000045d - -#define REG_A5XX_RBBM_PERFCTR_CMP_1_LO 0x0000045e - -#define REG_A5XX_RBBM_PERFCTR_CMP_1_HI 0x0000045f - -#define REG_A5XX_RBBM_PERFCTR_CMP_2_LO 0x00000460 - -#define REG_A5XX_RBBM_PERFCTR_CMP_2_HI 0x00000461 - -#define REG_A5XX_RBBM_PERFCTR_CMP_3_LO 0x00000462 - -#define REG_A5XX_RBBM_PERFCTR_CMP_3_HI 0x00000463 - -#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b - -#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c - -#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d - -#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e - -#define REG_A5XX_RBBM_ALWAYSON_COUNTER_LO 0x000004d2 - -#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI 0x000004d3 - -#define REG_A5XX_RBBM_STATUS 0x000004f5 -#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x80000000 -#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x40000000 -#define A5XX_RBBM_STATUS_HLSQ_BUSY 0x20000000 -#define A5XX_RBBM_STATUS_VSC_BUSY 0x10000000 -#define A5XX_RBBM_STATUS_TPL1_BUSY 0x08000000 -#define A5XX_RBBM_STATUS_SP_BUSY 0x04000000 -#define A5XX_RBBM_STATUS_UCHE_BUSY 0x02000000 -#define A5XX_RBBM_STATUS_VPC_BUSY 0x01000000 -#define A5XX_RBBM_STATUS_VFDP_BUSY 0x00800000 -#define A5XX_RBBM_STATUS_VFD_BUSY 0x00400000 -#define A5XX_RBBM_STATUS_TESS_BUSY 0x00200000 -#define A5XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000 -#define A5XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000 -#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY 0x00040000 -#define A5XX_RBBM_STATUS_DCOM_BUSY 0x00020000 -#define A5XX_RBBM_STATUS_COM_BUSY 0x00010000 -#define A5XX_RBBM_STATUS_LRZ_BUZY 0x00008000 -#define A5XX_RBBM_STATUS_A2D_DSP_BUSY 0x00004000 -#define A5XX_RBBM_STATUS_CCUFCHE_BUSY 0x00002000 -#define A5XX_RBBM_STATUS_RB_BUSY 0x00001000 -#define A5XX_RBBM_STATUS_RAS_BUSY 0x00000800 -#define A5XX_RBBM_STATUS_TSE_BUSY 0x00000400 -#define A5XX_RBBM_STATUS_VBIF_BUSY 0x00000200 -#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST 0x00000100 -#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST 0x00000080 -#define A5XX_RBBM_STATUS_CP_BUSY 0x00000040 -#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY 0x00000020 -#define A5XX_RBBM_STATUS_CP_CRASH_BUSY 0x00000010 -#define A5XX_RBBM_STATUS_CP_ETS_BUSY 0x00000008 -#define A5XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004 -#define A5XX_RBBM_STATUS_CP_ME_BUSY 0x00000002 -#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001 - -#define REG_A5XX_RBBM_STATUS3 0x00000530 -#define A5XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT 0x01000000 - -#define REG_A5XX_RBBM_INT_0_STATUS 0x000004e1 - -#define REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS 0x000004f0 - -#define REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS 0x000004f1 - -#define REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS 0x000004f3 - -#define REG_A5XX_RBBM_AHB_ERROR_STATUS 0x000004f4 - -#define REG_A5XX_RBBM_PERFCTR_CNTL 0x00000464 - -#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 0x00000465 - -#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD1 0x00000466 - -#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD2 0x00000467 - -#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD3 0x00000468 - -#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000469 - -#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x0000046a - -#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000046f - -#define REG_A5XX_RBBM_AHB_ERROR 0x000004ed - -#define REG_A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC 0x00000504 - -#define REG_A5XX_RBBM_CFG_DBGBUS_OVER 0x00000505 - -#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT0 0x00000506 - -#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT1 0x00000507 - -#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT2 0x00000508 - -#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT3 0x00000509 - -#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT4 0x0000050a - -#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT5 0x0000050b - -#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR 0x0000050c - -#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0 0x0000050d - -#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1 0x0000050e - -#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2 0x0000050f - -#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3 0x00000510 - -#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4 0x00000511 - -#define REG_A5XX_RBBM_CFG_DBGBUS_MISR0 0x00000512 - -#define REG_A5XX_RBBM_CFG_DBGBUS_MISR1 0x00000513 - -#define REG_A5XX_RBBM_ISDB_CNT 0x00000533 - -#define REG_A5XX_RBBM_SECVID_TRUST_CONFIG 0x0000f000 - -#define REG_A5XX_RBBM_SECVID_TRUST_CNTL 0x0000f400 - -#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800 - -#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801 - -#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802 - -#define REG_A5XX_RBBM_SECVID_TSB_CNTL 0x0000f803 - -#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_LO 0x0000f804 - -#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_HI 0x0000f805 - -#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_LO 0x0000f806 - -#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_HI 0x0000f807 - -#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810 - -#define REG_A5XX_VSC_BIN_SIZE 0x00000bc2 -#define A5XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff -#define A5XX_VSC_BIN_SIZE_WIDTH__SHIFT 0 -static inline uint32_t A5XX_VSC_BIN_SIZE_WIDTH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A5XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A5XX_VSC_BIN_SIZE_WIDTH__MASK; -} -#define A5XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001fe00 -#define A5XX_VSC_BIN_SIZE_HEIGHT__SHIFT 9 -static inline uint32_t A5XX_VSC_BIN_SIZE_HEIGHT(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A5XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A5XX_VSC_BIN_SIZE_HEIGHT__MASK; -} - -#define REG_A5XX_VSC_SIZE_ADDRESS_LO 0x00000bc3 - -#define REG_A5XX_VSC_SIZE_ADDRESS_HI 0x00000bc4 - -#define REG_A5XX_UNKNOWN_0BC5 0x00000bc5 - -#define REG_A5XX_UNKNOWN_0BC6 0x00000bc6 - -#define REG_A5XX_VSC_PIPE_CONFIG(i0) (0x00000bd0 + 0x1*(i0)) - -static inline uint32_t REG_A5XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000bd0 + 0x1*i0; } -#define A5XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff -#define A5XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0 -static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_X(uint32_t val) -{ - return ((val) << A5XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_X__MASK; -} -#define A5XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00 -#define A5XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10 -static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val) -{ - return ((val) << A5XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_Y__MASK; -} -#define A5XX_VSC_PIPE_CONFIG_REG_W__MASK 0x00f00000 -#define A5XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20 -static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_W(uint32_t val) -{ - return ((val) << A5XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_W__MASK; -} -#define A5XX_VSC_PIPE_CONFIG_REG_H__MASK 0x0f000000 -#define A5XX_VSC_PIPE_CONFIG_REG_H__SHIFT 24 -static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_H(uint32_t val) -{ - return ((val) << A5XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_H__MASK; -} - -#define REG_A5XX_VSC_PIPE_DATA_ADDRESS(i0) (0x00000be0 + 0x2*(i0)) - -static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS_LO(uint32_t i0) { return 0x00000be0 + 0x2*i0; } - -static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS_HI(uint32_t i0) { return 0x00000be1 + 0x2*i0; } - -#define REG_A5XX_VSC_PIPE_DATA_LENGTH(i0) (0x00000c00 + 0x1*(i0)) - -static inline uint32_t REG_A5XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c00 + 0x1*i0; } - -#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0 0x00000c60 - -#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1 0x00000c61 - -#define REG_A5XX_VSC_RESOLVE_CNTL 0x00000cdd -#define A5XX_VSC_RESOLVE_CNTL_WINDOW_OFFSET_DISABLE 0x80000000 -#define A5XX_VSC_RESOLVE_CNTL_X__MASK 0x00007fff -#define A5XX_VSC_RESOLVE_CNTL_X__SHIFT 0 -static inline uint32_t A5XX_VSC_RESOLVE_CNTL_X(uint32_t val) -{ - return ((val) << A5XX_VSC_RESOLVE_CNTL_X__SHIFT) & A5XX_VSC_RESOLVE_CNTL_X__MASK; -} -#define A5XX_VSC_RESOLVE_CNTL_Y__MASK 0x7fff0000 -#define A5XX_VSC_RESOLVE_CNTL_Y__SHIFT 16 -static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val) -{ - return ((val) << A5XX_VSC_RESOLVE_CNTL_Y__SHIFT) & A5XX_VSC_RESOLVE_CNTL_Y__MASK; -} - -#define REG_A5XX_GRAS_ADDR_MODE_CNTL 0x00000c81 - -#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c90 - -#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c91 - -#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c92 - -#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c93 - -#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c94 - -#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c95 - -#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c96 - -#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c97 - -#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 0x00000c98 - -#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 0x00000c99 - -#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 0x00000c9a - -#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 0x00000c9b - -#define REG_A5XX_RB_DBG_ECO_CNTL 0x00000cc4 - -#define REG_A5XX_RB_ADDR_MODE_CNTL 0x00000cc5 - -#define REG_A5XX_RB_MODE_CNTL 0x00000cc6 - -#define REG_A5XX_RB_CCU_CNTL 0x00000cc7 - -#define REG_A5XX_RB_PERFCTR_RB_SEL_0 0x00000cd0 - -#define REG_A5XX_RB_PERFCTR_RB_SEL_1 0x00000cd1 - -#define REG_A5XX_RB_PERFCTR_RB_SEL_2 0x00000cd2 - -#define REG_A5XX_RB_PERFCTR_RB_SEL_3 0x00000cd3 - -#define REG_A5XX_RB_PERFCTR_RB_SEL_4 0x00000cd4 - -#define REG_A5XX_RB_PERFCTR_RB_SEL_5 0x00000cd5 - -#define REG_A5XX_RB_PERFCTR_RB_SEL_6 0x00000cd6 - -#define REG_A5XX_RB_PERFCTR_RB_SEL_7 0x00000cd7 - -#define REG_A5XX_RB_PERFCTR_CCU_SEL_0 0x00000cd8 - -#define REG_A5XX_RB_PERFCTR_CCU_SEL_1 0x00000cd9 - -#define REG_A5XX_RB_PERFCTR_CCU_SEL_2 0x00000cda - -#define REG_A5XX_RB_PERFCTR_CCU_SEL_3 0x00000cdb - -#define REG_A5XX_RB_POWERCTR_RB_SEL_0 0x00000ce0 - -#define REG_A5XX_RB_POWERCTR_RB_SEL_1 0x00000ce1 - -#define REG_A5XX_RB_POWERCTR_RB_SEL_2 0x00000ce2 - -#define REG_A5XX_RB_POWERCTR_RB_SEL_3 0x00000ce3 - -#define REG_A5XX_RB_POWERCTR_CCU_SEL_0 0x00000ce4 - -#define REG_A5XX_RB_POWERCTR_CCU_SEL_1 0x00000ce5 - -#define REG_A5XX_RB_PERFCTR_CMP_SEL_0 0x00000cec - -#define REG_A5XX_RB_PERFCTR_CMP_SEL_1 0x00000ced - -#define REG_A5XX_RB_PERFCTR_CMP_SEL_2 0x00000cee - -#define REG_A5XX_RB_PERFCTR_CMP_SEL_3 0x00000cef - -#define REG_A5XX_PC_DBG_ECO_CNTL 0x00000d00 -#define A5XX_PC_DBG_ECO_CNTL_TWOPASSUSEWFI 0x00000100 - -#define REG_A5XX_PC_ADDR_MODE_CNTL 0x00000d01 - -#define REG_A5XX_PC_MODE_CNTL 0x00000d02 - -#define REG_A5XX_PC_INDEX_BUF_LO 0x00000d04 - -#define REG_A5XX_PC_INDEX_BUF_HI 0x00000d05 - -#define REG_A5XX_PC_START_INDEX 0x00000d06 - -#define REG_A5XX_PC_MAX_INDEX 0x00000d07 - -#define REG_A5XX_PC_TESSFACTOR_ADDR_LO 0x00000d08 - -#define REG_A5XX_PC_TESSFACTOR_ADDR_HI 0x00000d09 - -#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10 - -#define REG_A5XX_PC_PERFCTR_PC_SEL_1 0x00000d11 - -#define REG_A5XX_PC_PERFCTR_PC_SEL_2 0x00000d12 - -#define REG_A5XX_PC_PERFCTR_PC_SEL_3 0x00000d13 - -#define REG_A5XX_PC_PERFCTR_PC_SEL_4 0x00000d14 - -#define REG_A5XX_PC_PERFCTR_PC_SEL_5 0x00000d15 - -#define REG_A5XX_PC_PERFCTR_PC_SEL_6 0x00000d16 - -#define REG_A5XX_PC_PERFCTR_PC_SEL_7 0x00000d17 - -#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0 0x00000e00 - -#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01 - -#define REG_A5XX_HLSQ_DBG_ECO_CNTL 0x00000e04 - -#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05 - -#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06 - -#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e10 - -#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e11 - -#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e12 - -#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e13 - -#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e14 - -#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e15 - -#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e16 - -#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e17 - -#define REG_A5XX_HLSQ_SPTP_RDSEL 0x00000f08 - -#define REG_A5XX_HLSQ_DBG_READ_SEL 0x0000bc00 - -#define REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000a000 - -#define REG_A5XX_VFD_ADDR_MODE_CNTL 0x00000e41 - -#define REG_A5XX_VFD_MODE_CNTL 0x00000e42 - -#define REG_A5XX_VFD_PERFCTR_VFD_SEL_0 0x00000e50 - -#define REG_A5XX_VFD_PERFCTR_VFD_SEL_1 0x00000e51 - -#define REG_A5XX_VFD_PERFCTR_VFD_SEL_2 0x00000e52 - -#define REG_A5XX_VFD_PERFCTR_VFD_SEL_3 0x00000e53 - -#define REG_A5XX_VFD_PERFCTR_VFD_SEL_4 0x00000e54 - -#define REG_A5XX_VFD_PERFCTR_VFD_SEL_5 0x00000e55 - -#define REG_A5XX_VFD_PERFCTR_VFD_SEL_6 0x00000e56 - -#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7 0x00000e57 - -#define REG_A5XX_VPC_DBG_ECO_CNTL 0x00000e60 -#define A5XX_VPC_DBG_ECO_CNTL_ALLFLATOPTDIS 0x00000400 - -#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61 - -#define REG_A5XX_VPC_MODE_CNTL 0x00000e62 -#define A5XX_VPC_MODE_CNTL_BINNING_PASS 0x00000001 - -#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0 0x00000e64 - -#define REG_A5XX_VPC_PERFCTR_VPC_SEL_1 0x00000e65 - -#define REG_A5XX_VPC_PERFCTR_VPC_SEL_2 0x00000e66 - -#define REG_A5XX_VPC_PERFCTR_VPC_SEL_3 0x00000e67 - -#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80 - -#define REG_A5XX_UCHE_MODE_CNTL 0x00000e81 - -#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82 - -#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87 - -#define REG_A5XX_UCHE_WRITE_THRU_BASE_HI 0x00000e88 - -#define REG_A5XX_UCHE_TRAP_BASE_LO 0x00000e89 - -#define REG_A5XX_UCHE_TRAP_BASE_HI 0x00000e8a - -#define REG_A5XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e8b - -#define REG_A5XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e8c - -#define REG_A5XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e8d - -#define REG_A5XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e8e - -#define REG_A5XX_UCHE_DBG_ECO_CNTL_2 0x00000e8f - -#define REG_A5XX_UCHE_DBG_ECO_CNTL 0x00000e90 - -#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO 0x00000e91 - -#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_HI 0x00000e92 - -#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_LO 0x00000e93 - -#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_HI 0x00000e94 - -#define REG_A5XX_UCHE_CACHE_INVALIDATE 0x00000e95 - -#define REG_A5XX_UCHE_CACHE_WAYS 0x00000e96 - -#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000ea0 - -#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000ea1 - -#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000ea2 - -#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000ea3 - -#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000ea4 - -#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000ea5 - -#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000ea6 - -#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000ea7 - -#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 0x00000ea8 - -#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 0x00000ea9 - -#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 0x00000eaa - -#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 0x00000eab - -#define REG_A5XX_UCHE_TRAP_LOG_LO 0x00000eb1 - -#define REG_A5XX_UCHE_TRAP_LOG_HI 0x00000eb2 - -#define REG_A5XX_SP_DBG_ECO_CNTL 0x00000ec0 - -#define REG_A5XX_SP_ADDR_MODE_CNTL 0x00000ec1 - -#define REG_A5XX_SP_MODE_CNTL 0x00000ec2 - -#define REG_A5XX_SP_PERFCTR_SP_SEL_0 0x00000ed0 - -#define REG_A5XX_SP_PERFCTR_SP_SEL_1 0x00000ed1 - -#define REG_A5XX_SP_PERFCTR_SP_SEL_2 0x00000ed2 - -#define REG_A5XX_SP_PERFCTR_SP_SEL_3 0x00000ed3 - -#define REG_A5XX_SP_PERFCTR_SP_SEL_4 0x00000ed4 - -#define REG_A5XX_SP_PERFCTR_SP_SEL_5 0x00000ed5 - -#define REG_A5XX_SP_PERFCTR_SP_SEL_6 0x00000ed6 - -#define REG_A5XX_SP_PERFCTR_SP_SEL_7 0x00000ed7 - -#define REG_A5XX_SP_PERFCTR_SP_SEL_8 0x00000ed8 - -#define REG_A5XX_SP_PERFCTR_SP_SEL_9 0x00000ed9 - -#define REG_A5XX_SP_PERFCTR_SP_SEL_10 0x00000eda - -#define REG_A5XX_SP_PERFCTR_SP_SEL_11 0x00000edb - -#define REG_A5XX_SP_POWERCTR_SP_SEL_0 0x00000edc - -#define REG_A5XX_SP_POWERCTR_SP_SEL_1 0x00000edd - -#define REG_A5XX_SP_POWERCTR_SP_SEL_2 0x00000ede - -#define REG_A5XX_SP_POWERCTR_SP_SEL_3 0x00000edf - -#define REG_A5XX_TPL1_ADDR_MODE_CNTL 0x00000f01 - -#define REG_A5XX_TPL1_MODE_CNTL 0x00000f02 - -#define REG_A5XX_TPL1_PERFCTR_TP_SEL_0 0x00000f10 - -#define REG_A5XX_TPL1_PERFCTR_TP_SEL_1 0x00000f11 - -#define REG_A5XX_TPL1_PERFCTR_TP_SEL_2 0x00000f12 - -#define REG_A5XX_TPL1_PERFCTR_TP_SEL_3 0x00000f13 - -#define REG_A5XX_TPL1_PERFCTR_TP_SEL_4 0x00000f14 - -#define REG_A5XX_TPL1_PERFCTR_TP_SEL_5 0x00000f15 - -#define REG_A5XX_TPL1_PERFCTR_TP_SEL_6 0x00000f16 - -#define REG_A5XX_TPL1_PERFCTR_TP_SEL_7 0x00000f17 - -#define REG_A5XX_TPL1_POWERCTR_TP_SEL_0 0x00000f18 - -#define REG_A5XX_TPL1_POWERCTR_TP_SEL_1 0x00000f19 - -#define REG_A5XX_TPL1_POWERCTR_TP_SEL_2 0x00000f1a - -#define REG_A5XX_TPL1_POWERCTR_TP_SEL_3 0x00000f1b - -#define REG_A5XX_VBIF_VERSION 0x00003000 - -#define REG_A5XX_VBIF_CLKON 0x00003001 - -#define REG_A5XX_VBIF_ABIT_SORT 0x00003028 - -#define REG_A5XX_VBIF_ABIT_SORT_CONF 0x00003029 - -#define REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049 - -#define REG_A5XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a - -#define REG_A5XX_VBIF_IN_RD_LIM_CONF0 0x0000302c - -#define REG_A5XX_VBIF_IN_RD_LIM_CONF1 0x0000302d - -#define REG_A5XX_VBIF_XIN_HALT_CTRL0 0x00003080 - -#define REG_A5XX_VBIF_XIN_HALT_CTRL1 0x00003081 - -#define REG_A5XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084 - -#define REG_A5XX_VBIF_TEST_BUS1_CTRL0 0x00003085 - -#define REG_A5XX_VBIF_TEST_BUS1_CTRL1 0x00003086 - -#define REG_A5XX_VBIF_TEST_BUS2_CTRL0 0x00003087 - -#define REG_A5XX_VBIF_TEST_BUS2_CTRL1 0x00003088 - -#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c - -#define REG_A5XX_VBIF_PERF_CNT_EN0 0x000030c0 - -#define REG_A5XX_VBIF_PERF_CNT_EN1 0x000030c1 - -#define REG_A5XX_VBIF_PERF_CNT_EN2 0x000030c2 - -#define REG_A5XX_VBIF_PERF_CNT_EN3 0x000030c3 - -#define REG_A5XX_VBIF_PERF_CNT_CLR0 0x000030c8 - -#define REG_A5XX_VBIF_PERF_CNT_CLR1 0x000030c9 - -#define REG_A5XX_VBIF_PERF_CNT_CLR2 0x000030ca - -#define REG_A5XX_VBIF_PERF_CNT_CLR3 0x000030cb - -#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0 - -#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1 - -#define REG_A5XX_VBIF_PERF_CNT_SEL2 0x000030d2 - -#define REG_A5XX_VBIF_PERF_CNT_SEL3 0x000030d3 - -#define REG_A5XX_VBIF_PERF_CNT_LOW0 0x000030d8 - -#define REG_A5XX_VBIF_PERF_CNT_LOW1 0x000030d9 - -#define REG_A5XX_VBIF_PERF_CNT_LOW2 0x000030da - -#define REG_A5XX_VBIF_PERF_CNT_LOW3 0x000030db - -#define REG_A5XX_VBIF_PERF_CNT_HIGH0 0x000030e0 - -#define REG_A5XX_VBIF_PERF_CNT_HIGH1 0x000030e1 - -#define REG_A5XX_VBIF_PERF_CNT_HIGH2 0x000030e2 - -#define REG_A5XX_VBIF_PERF_CNT_HIGH3 0x000030e3 - -#define REG_A5XX_VBIF_PERF_PWR_CNT_EN0 0x00003100 - -#define REG_A5XX_VBIF_PERF_PWR_CNT_EN1 0x00003101 - -#define REG_A5XX_VBIF_PERF_PWR_CNT_EN2 0x00003102 - -#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110 - -#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111 - -#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112 - -#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118 - -#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119 - -#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a - -#define REG_A5XX_GPMU_INST_RAM_BASE 0x00008800 - -#define REG_A5XX_GPMU_DATA_RAM_BASE 0x00009800 - -#define REG_A5XX_SP_POWER_COUNTER_0_LO 0x0000a840 - -#define REG_A5XX_SP_POWER_COUNTER_0_HI 0x0000a841 - -#define REG_A5XX_SP_POWER_COUNTER_1_LO 0x0000a842 - -#define REG_A5XX_SP_POWER_COUNTER_1_HI 0x0000a843 - -#define REG_A5XX_SP_POWER_COUNTER_2_LO 0x0000a844 - -#define REG_A5XX_SP_POWER_COUNTER_2_HI 0x0000a845 - -#define REG_A5XX_SP_POWER_COUNTER_3_LO 0x0000a846 - -#define REG_A5XX_SP_POWER_COUNTER_3_HI 0x0000a847 - -#define REG_A5XX_TP_POWER_COUNTER_0_LO 0x0000a848 - -#define REG_A5XX_TP_POWER_COUNTER_0_HI 0x0000a849 - -#define REG_A5XX_TP_POWER_COUNTER_1_LO 0x0000a84a - -#define REG_A5XX_TP_POWER_COUNTER_1_HI 0x0000a84b - -#define REG_A5XX_TP_POWER_COUNTER_2_LO 0x0000a84c - -#define REG_A5XX_TP_POWER_COUNTER_2_HI 0x0000a84d - -#define REG_A5XX_TP_POWER_COUNTER_3_LO 0x0000a84e - -#define REG_A5XX_TP_POWER_COUNTER_3_HI 0x0000a84f - -#define REG_A5XX_RB_POWER_COUNTER_0_LO 0x0000a850 - -#define REG_A5XX_RB_POWER_COUNTER_0_HI 0x0000a851 - -#define REG_A5XX_RB_POWER_COUNTER_1_LO 0x0000a852 - -#define REG_A5XX_RB_POWER_COUNTER_1_HI 0x0000a853 - -#define REG_A5XX_RB_POWER_COUNTER_2_LO 0x0000a854 - -#define REG_A5XX_RB_POWER_COUNTER_2_HI 0x0000a855 - -#define REG_A5XX_RB_POWER_COUNTER_3_LO 0x0000a856 - -#define REG_A5XX_RB_POWER_COUNTER_3_HI 0x0000a857 - -#define REG_A5XX_CCU_POWER_COUNTER_0_LO 0x0000a858 - -#define REG_A5XX_CCU_POWER_COUNTER_0_HI 0x0000a859 - -#define REG_A5XX_CCU_POWER_COUNTER_1_LO 0x0000a85a - -#define REG_A5XX_CCU_POWER_COUNTER_1_HI 0x0000a85b - -#define REG_A5XX_UCHE_POWER_COUNTER_0_LO 0x0000a85c - -#define REG_A5XX_UCHE_POWER_COUNTER_0_HI 0x0000a85d - -#define REG_A5XX_UCHE_POWER_COUNTER_1_LO 0x0000a85e - -#define REG_A5XX_UCHE_POWER_COUNTER_1_HI 0x0000a85f - -#define REG_A5XX_UCHE_POWER_COUNTER_2_LO 0x0000a860 - -#define REG_A5XX_UCHE_POWER_COUNTER_2_HI 0x0000a861 - -#define REG_A5XX_UCHE_POWER_COUNTER_3_LO 0x0000a862 - -#define REG_A5XX_UCHE_POWER_COUNTER_3_HI 0x0000a863 - -#define REG_A5XX_CP_POWER_COUNTER_0_LO 0x0000a864 - -#define REG_A5XX_CP_POWER_COUNTER_0_HI 0x0000a865 - -#define REG_A5XX_CP_POWER_COUNTER_1_LO 0x0000a866 - -#define REG_A5XX_CP_POWER_COUNTER_1_HI 0x0000a867 - -#define REG_A5XX_CP_POWER_COUNTER_2_LO 0x0000a868 - -#define REG_A5XX_CP_POWER_COUNTER_2_HI 0x0000a869 - -#define REG_A5XX_CP_POWER_COUNTER_3_LO 0x0000a86a - -#define REG_A5XX_CP_POWER_COUNTER_3_HI 0x0000a86b - -#define REG_A5XX_GPMU_POWER_COUNTER_0_LO 0x0000a86c - -#define REG_A5XX_GPMU_POWER_COUNTER_0_HI 0x0000a86d - -#define REG_A5XX_GPMU_POWER_COUNTER_1_LO 0x0000a86e - -#define REG_A5XX_GPMU_POWER_COUNTER_1_HI 0x0000a86f - -#define REG_A5XX_GPMU_POWER_COUNTER_2_LO 0x0000a870 - -#define REG_A5XX_GPMU_POWER_COUNTER_2_HI 0x0000a871 - -#define REG_A5XX_GPMU_POWER_COUNTER_3_LO 0x0000a872 - -#define REG_A5XX_GPMU_POWER_COUNTER_3_HI 0x0000a873 - -#define REG_A5XX_GPMU_POWER_COUNTER_4_LO 0x0000a874 - -#define REG_A5XX_GPMU_POWER_COUNTER_4_HI 0x0000a875 - -#define REG_A5XX_GPMU_POWER_COUNTER_5_LO 0x0000a876 - -#define REG_A5XX_GPMU_POWER_COUNTER_5_HI 0x0000a877 - -#define REG_A5XX_GPMU_POWER_COUNTER_ENABLE 0x0000a878 - -#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO 0x0000a879 - -#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI 0x0000a87a - -#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET 0x0000a87b - -#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_0 0x0000a87c - -#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1 0x0000a87d - -#define REG_A5XX_GPMU_GPMU_SP_CLOCK_CONTROL 0x0000a880 - -#define REG_A5XX_GPMU_SP_POWER_CNTL 0x0000a881 - -#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL 0x0000a886 - -#define REG_A5XX_GPMU_RBCCU_POWER_CNTL 0x0000a887 - -#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS 0x0000a88b -#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON 0x00100000 - -#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS 0x0000a88d -#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON 0x00100000 - -#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY 0x0000a891 - -#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL 0x0000a892 - -#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST 0x0000a893 - -#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894 - -#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3 - -#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL 0x0000a8a8 - -#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1 - -#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6 - -#define REG_A5XX_GPMU_CM3_SYSRESET 0x0000a8d8 - -#define REG_A5XX_GPMU_GENERAL_0 0x0000a8e0 - -#define REG_A5XX_GPMU_GENERAL_1 0x0000a8e1 - -#define REG_A5XX_GPMU_TEMP_SENSOR_ID 0x0000ac00 - -#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG 0x0000ac01 - -#define REG_A5XX_GPMU_TEMP_VAL 0x0000ac02 - -#define REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD 0x0000ac03 - -#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_STATUS 0x0000ac05 - -#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK 0x0000ac06 - -#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_0_1 0x0000ac40 - -#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_2_3 0x0000ac41 - -#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_0_1 0x0000ac42 - -#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_2_3 0x0000ac43 - -#define REG_A5XX_GPMU_BASE_LEAKAGE 0x0000ac46 - -#define REG_A5XX_GPMU_GPMU_VOLTAGE 0x0000ac60 - -#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_STATUS 0x0000ac61 - -#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK 0x0000ac62 - -#define REG_A5XX_GPMU_GPMU_PWR_THRESHOLD 0x0000ac80 - -#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL 0x0000acc4 - -#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS 0x0000acc5 - -#define REG_A5XX_GDPM_CONFIG1 0x0000b80c - -#define REG_A5XX_GDPM_CONFIG2 0x0000b80d - -#define REG_A5XX_GDPM_INT_EN 0x0000b80f - -#define REG_A5XX_GDPM_INT_MASK 0x0000b811 - -#define REG_A5XX_GPMU_BEC_ENABLE 0x0000b9a0 - -#define REG_A5XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000c41a - -#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000c41d - -#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000c41f - -#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x0000c421 - -#define REG_A5XX_GPU_CS_ENABLE_REG 0x0000c520 - -#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557 - -#define REG_A5XX_GRAS_CL_CNTL 0x0000e000 -#define A5XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040 - -#define REG_A5XX_GRAS_VS_CL_CNTL 0x0000e001 -#define A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK 0x000000ff -#define A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT 0 -static inline uint32_t A5XX_GRAS_VS_CL_CNTL_CLIP_MASK(uint32_t val) -{ - return ((val) << A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT) & A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK; -} -#define A5XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK 0x0000ff00 -#define A5XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT 8 -static inline uint32_t A5XX_GRAS_VS_CL_CNTL_CULL_MASK(uint32_t val) -{ - return ((val) << A5XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT) & A5XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK; -} - -#define REG_A5XX_UNKNOWN_E004 0x0000e004 - -#define REG_A5XX_GRAS_CNTL 0x0000e005 -#define A5XX_GRAS_CNTL_IJ_PERSP_PIXEL 0x00000001 -#define A5XX_GRAS_CNTL_IJ_PERSP_CENTROID 0x00000002 -#define A5XX_GRAS_CNTL_IJ_PERSP_SAMPLE 0x00000004 -#define A5XX_GRAS_CNTL_IJ_LINEAR_PIXEL 0x00000008 -#define A5XX_GRAS_CNTL_IJ_LINEAR_CENTROID 0x00000010 -#define A5XX_GRAS_CNTL_IJ_LINEAR_SAMPLE 0x00000020 -#define A5XX_GRAS_CNTL_COORD_MASK__MASK 0x000003c0 -#define A5XX_GRAS_CNTL_COORD_MASK__SHIFT 6 -static inline uint32_t A5XX_GRAS_CNTL_COORD_MASK(uint32_t val) -{ - return ((val) << A5XX_GRAS_CNTL_COORD_MASK__SHIFT) & A5XX_GRAS_CNTL_COORD_MASK__MASK; -} - -#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006 -#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff -#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0 -static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val) -{ - return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK; -} -#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00 -#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10 -static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val) -{ - return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK; -} - -#define REG_A5XX_GRAS_CL_VPORT_XOFFSET_0 0x0000e010 -#define A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff -#define A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0 -static inline uint32_t A5XX_GRAS_CL_VPORT_XOFFSET_0(float val) -{ - return ((fui(val)) << A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK; -} - -#define REG_A5XX_GRAS_CL_VPORT_XSCALE_0 0x0000e011 -#define A5XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff -#define A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0 -static inline uint32_t A5XX_GRAS_CL_VPORT_XSCALE_0(float val) -{ - return ((fui(val)) << A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_XSCALE_0__MASK; -} - -#define REG_A5XX_GRAS_CL_VPORT_YOFFSET_0 0x0000e012 -#define A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff -#define A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0 -static inline uint32_t A5XX_GRAS_CL_VPORT_YOFFSET_0(float val) -{ - return ((fui(val)) << A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK; -} - -#define REG_A5XX_GRAS_CL_VPORT_YSCALE_0 0x0000e013 -#define A5XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff -#define A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0 -static inline uint32_t A5XX_GRAS_CL_VPORT_YSCALE_0(float val) -{ - return ((fui(val)) << A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_YSCALE_0__MASK; -} - -#define REG_A5XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000e014 -#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff -#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0 -static inline uint32_t A5XX_GRAS_CL_VPORT_ZOFFSET_0(float val) -{ - return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK; -} - -#define REG_A5XX_GRAS_CL_VPORT_ZSCALE_0 0x0000e015 -#define A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff -#define A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0 -static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val) -{ - return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK; -} - -#define REG_A5XX_GRAS_SU_CNTL 0x0000e090 -#define A5XX_GRAS_SU_CNTL_CULL_FRONT 0x00000001 -#define A5XX_GRAS_SU_CNTL_CULL_BACK 0x00000002 -#define A5XX_GRAS_SU_CNTL_FRONT_CW 0x00000004 -#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8 -#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3 -static inline uint32_t A5XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val) -{ - return ((((int32_t)(val * 4.0))) << A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK; -} -#define A5XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800 -#define A5XX_GRAS_SU_CNTL_LINE_MODE__MASK 0x00002000 -#define A5XX_GRAS_SU_CNTL_LINE_MODE__SHIFT 13 -static inline uint32_t A5XX_GRAS_SU_CNTL_LINE_MODE(enum a5xx_line_mode val) -{ - return ((val) << A5XX_GRAS_SU_CNTL_LINE_MODE__SHIFT) & A5XX_GRAS_SU_CNTL_LINE_MODE__MASK; -} - -#define REG_A5XX_GRAS_SU_POINT_MINMAX 0x0000e091 -#define A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff -#define A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0 -static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MIN(float val) -{ - return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK; -} -#define A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000 -#define A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16 -static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MAX(float val) -{ - return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK; -} - -#define REG_A5XX_GRAS_SU_POINT_SIZE 0x0000e092 -#define A5XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff -#define A5XX_GRAS_SU_POINT_SIZE__SHIFT 0 -static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val) -{ - return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK; -} - -#define REG_A5XX_GRAS_SU_LAYERED 0x0000e093 - -#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094 -#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001 -#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_UNK1 0x00000002 - -#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000e095 -#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff -#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0 -static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_SCALE(float val) -{ - return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK; -} - -#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000e096 -#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff -#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0 -static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) -{ - return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; -} - -#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x0000e097 -#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff -#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0 -static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val) -{ - return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK; -} - -#define REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO 0x0000e098 -#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007 -#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0 -static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val) -{ - return ((val) << A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK; -} - -#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x0000e099 - -#define REG_A5XX_GRAS_SC_CNTL 0x0000e0a0 -#define A5XX_GRAS_SC_CNTL_BINNING_PASS 0x00000001 -#define A5XX_GRAS_SC_CNTL_SAMPLES_PASSED 0x00008000 - -#define REG_A5XX_GRAS_SC_BIN_CNTL 0x0000e0a1 - -#define REG_A5XX_GRAS_SC_RAS_MSAA_CNTL 0x0000e0a2 -#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 -#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 -static inline uint32_t A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK; -} - -#define REG_A5XX_GRAS_SC_DEST_MSAA_CNTL 0x0000e0a3 -#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003 -#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT 0 -static inline uint32_t A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK; -} -#define A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 - -#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL 0x0000e0a4 - -#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x0000e0aa -#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000 -#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff -#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0 -static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val) -{ - return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK; -} -#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000 -#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16 -static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val) -{ - return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK; -} - -#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x0000e0ab -#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000 -#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff -#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0 -static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val) -{ - return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK; -} -#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000 -#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16 -static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val) -{ - return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK; -} - -#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x0000e0ca -#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000 -#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff -#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0 -static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val) -{ - return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK; -} -#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000 -#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16 -static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val) -{ - return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK; -} - -#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x0000e0cb -#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000 -#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff -#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0 -static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val) -{ - return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK; -} -#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000 -#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16 -static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val) -{ - return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK; -} - -#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000e0ea -#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 -#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff -#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0 -static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val) -{ - return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK; -} -#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000 -#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16 -static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val) -{ - return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK; -} - -#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000e0eb -#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 -#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff -#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0 -static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val) -{ - return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK; -} -#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000 -#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16 -static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val) -{ - return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK; -} - -#define REG_A5XX_GRAS_LRZ_CNTL 0x0000e100 -#define A5XX_GRAS_LRZ_CNTL_ENABLE 0x00000001 -#define A5XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002 -#define A5XX_GRAS_LRZ_CNTL_GREATER 0x00000004 - -#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO 0x0000e101 - -#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI 0x0000e102 - -#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH 0x0000e103 -#define A5XX_GRAS_LRZ_BUFFER_PITCH__MASK 0xffffffff -#define A5XX_GRAS_LRZ_BUFFER_PITCH__SHIFT 0 -static inline uint32_t A5XX_GRAS_LRZ_BUFFER_PITCH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A5XX_GRAS_LRZ_BUFFER_PITCH__SHIFT) & A5XX_GRAS_LRZ_BUFFER_PITCH__MASK; -} - -#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104 - -#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x0000e105 - -#define REG_A5XX_RB_CNTL 0x0000e140 -#define A5XX_RB_CNTL_WIDTH__MASK 0x000000ff -#define A5XX_RB_CNTL_WIDTH__SHIFT 0 -static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK; -} -#define A5XX_RB_CNTL_HEIGHT__MASK 0x0001fe00 -#define A5XX_RB_CNTL_HEIGHT__SHIFT 9 -static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK; -} -#define A5XX_RB_CNTL_BYPASS 0x00020000 - -#define REG_A5XX_RB_RENDER_CNTL 0x0000e141 -#define A5XX_RB_RENDER_CNTL_BINNING_PASS 0x00000001 -#define A5XX_RB_RENDER_CNTL_SAMPLES_PASSED 0x00000040 -#define A5XX_RB_RENDER_CNTL_DISABLE_COLOR_PIPE 0x00000080 -#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000 -#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH2 0x00008000 -#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000 -#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16 -static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val) -{ - return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK; -} -#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK 0xff000000 -#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT 24 -static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS2(uint32_t val) -{ - return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK; -} - -#define REG_A5XX_RB_RAS_MSAA_CNTL 0x0000e142 -#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 -#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 -static inline uint32_t A5XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK; -} - -#define REG_A5XX_RB_DEST_MSAA_CNTL 0x0000e143 -#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003 -#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0 -static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK; -} -#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 - -#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144 -#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL 0x00000001 -#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID 0x00000002 -#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE 0x00000004 -#define A5XX_RB_RENDER_CONTROL0_IJ_LINEAR_PIXEL 0x00000008 -#define A5XX_RB_RENDER_CONTROL0_IJ_LINEAR_CENTROID 0x00000010 -#define A5XX_RB_RENDER_CONTROL0_IJ_LINEAR_SAMPLE 0x00000020 -#define A5XX_RB_RENDER_CONTROL0_COORD_MASK__MASK 0x000003c0 -#define A5XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT 6 -static inline uint32_t A5XX_RB_RENDER_CONTROL0_COORD_MASK(uint32_t val) -{ - return ((val) << A5XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT) & A5XX_RB_RENDER_CONTROL0_COORD_MASK__MASK; -} - -#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145 -#define A5XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001 -#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002 -#define A5XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000004 - -#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146 -#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f -#define A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT 0 -static inline uint32_t A5XX_RB_FS_OUTPUT_CNTL_MRT(uint32_t val) -{ - return ((val) << A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK; -} -#define A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z 0x00000020 - -#define REG_A5XX_RB_RENDER_COMPONENTS 0x0000e147 -#define A5XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f -#define A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0 -static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT0(uint32_t val) -{ - return ((val) << A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT0__MASK; -} -#define A5XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0 -#define A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4 -static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT1(uint32_t val) -{ - return ((val) << A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT1__MASK; -} -#define A5XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00 -#define A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8 -static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT2(uint32_t val) -{ - return ((val) << A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT2__MASK; -} -#define A5XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000 -#define A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12 -static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT3(uint32_t val) -{ - return ((val) << A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT3__MASK; -} -#define A5XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000 -#define A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16 -static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT4(uint32_t val) -{ - return ((val) << A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT4__MASK; -} -#define A5XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000 -#define A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20 -static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT5(uint32_t val) -{ - return ((val) << A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT5__MASK; -} -#define A5XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000 -#define A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24 -static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT6(uint32_t val) -{ - return ((val) << A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT6__MASK; -} -#define A5XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000 -#define A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28 -static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT7(uint32_t val) -{ - return ((val) << A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT7__MASK; -} - -#define REG_A5XX_RB_MRT(i0) (0x0000e150 + 0x7*(i0)) - -static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; } -#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001 -#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002 -#define A5XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004 -#define A5XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078 -#define A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3 -static inline uint32_t A5XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val) -{ - return ((val) << A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A5XX_RB_MRT_CONTROL_ROP_CODE__MASK; -} -#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780 -#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7 -static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val) -{ - return ((val) << A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK; -} - -static inline uint32_t REG_A5XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x0000e151 + 0x7*i0; } -#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f -#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0 -static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK; -} -#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0 -#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5 -static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) -{ - return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK; -} -#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00 -#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8 -static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK; -} -#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000 -#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16 -static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK; -} -#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000 -#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21 -static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) -{ - return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK; -} -#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000 -#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24 -static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK; -} - -static inline uint32_t REG_A5XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x0000e152 + 0x7*i0; } -#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff -#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0 -static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a5xx_color_fmt val) -{ - return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK; -} -#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300 -#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8 -static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode val) -{ - return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK; -} -#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00001800 -#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 11 -static inline uint32_t A5XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val) -{ - return ((val) << A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK; -} -#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000 -#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13 -static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; -} -#define A5XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000 - -static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 + 0x7*i0; } -#define A5XX_RB_MRT_PITCH__MASK 0xffffffff -#define A5XX_RB_MRT_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_MRT_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_MRT_PITCH__SHIFT) & A5XX_RB_MRT_PITCH__MASK; -} - -static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; } -#define A5XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff -#define A5XX_RB_MRT_ARRAY_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_MRT_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH__MASK; -} - -static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; } - -static inline uint32_t REG_A5XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x0000e156 + 0x7*i0; } - -#define REG_A5XX_RB_BLEND_RED 0x0000e1a0 -#define A5XX_RB_BLEND_RED_UINT__MASK 0x000000ff -#define A5XX_RB_BLEND_RED_UINT__SHIFT 0 -static inline uint32_t A5XX_RB_BLEND_RED_UINT(uint32_t val) -{ - return ((val) << A5XX_RB_BLEND_RED_UINT__SHIFT) & A5XX_RB_BLEND_RED_UINT__MASK; -} -#define A5XX_RB_BLEND_RED_SINT__MASK 0x0000ff00 -#define A5XX_RB_BLEND_RED_SINT__SHIFT 8 -static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val) -{ - return ((val) << A5XX_RB_BLEND_RED_SINT__SHIFT) & A5XX_RB_BLEND_RED_SINT__MASK; -} -#define A5XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000 -#define A5XX_RB_BLEND_RED_FLOAT__SHIFT 16 -static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val) -{ - return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK; -} - -#define REG_A5XX_RB_BLEND_RED_F32 0x0000e1a1 -#define A5XX_RB_BLEND_RED_F32__MASK 0xffffffff -#define A5XX_RB_BLEND_RED_F32__SHIFT 0 -static inline uint32_t A5XX_RB_BLEND_RED_F32(float val) -{ - return ((fui(val)) << A5XX_RB_BLEND_RED_F32__SHIFT) & A5XX_RB_BLEND_RED_F32__MASK; -} - -#define REG_A5XX_RB_BLEND_GREEN 0x0000e1a2 -#define A5XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff -#define A5XX_RB_BLEND_GREEN_UINT__SHIFT 0 -static inline uint32_t A5XX_RB_BLEND_GREEN_UINT(uint32_t val) -{ - return ((val) << A5XX_RB_BLEND_GREEN_UINT__SHIFT) & A5XX_RB_BLEND_GREEN_UINT__MASK; -} -#define A5XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00 -#define A5XX_RB_BLEND_GREEN_SINT__SHIFT 8 -static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val) -{ - return ((val) << A5XX_RB_BLEND_GREEN_SINT__SHIFT) & A5XX_RB_BLEND_GREEN_SINT__MASK; -} -#define A5XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000 -#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT 16 -static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val) -{ - return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK; -} - -#define REG_A5XX_RB_BLEND_GREEN_F32 0x0000e1a3 -#define A5XX_RB_BLEND_GREEN_F32__MASK 0xffffffff -#define A5XX_RB_BLEND_GREEN_F32__SHIFT 0 -static inline uint32_t A5XX_RB_BLEND_GREEN_F32(float val) -{ - return ((fui(val)) << A5XX_RB_BLEND_GREEN_F32__SHIFT) & A5XX_RB_BLEND_GREEN_F32__MASK; -} - -#define REG_A5XX_RB_BLEND_BLUE 0x0000e1a4 -#define A5XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff -#define A5XX_RB_BLEND_BLUE_UINT__SHIFT 0 -static inline uint32_t A5XX_RB_BLEND_BLUE_UINT(uint32_t val) -{ - return ((val) << A5XX_RB_BLEND_BLUE_UINT__SHIFT) & A5XX_RB_BLEND_BLUE_UINT__MASK; -} -#define A5XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00 -#define A5XX_RB_BLEND_BLUE_SINT__SHIFT 8 -static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val) -{ - return ((val) << A5XX_RB_BLEND_BLUE_SINT__SHIFT) & A5XX_RB_BLEND_BLUE_SINT__MASK; -} -#define A5XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000 -#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT 16 -static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val) -{ - return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK; -} - -#define REG_A5XX_RB_BLEND_BLUE_F32 0x0000e1a5 -#define A5XX_RB_BLEND_BLUE_F32__MASK 0xffffffff -#define A5XX_RB_BLEND_BLUE_F32__SHIFT 0 -static inline uint32_t A5XX_RB_BLEND_BLUE_F32(float val) -{ - return ((fui(val)) << A5XX_RB_BLEND_BLUE_F32__SHIFT) & A5XX_RB_BLEND_BLUE_F32__MASK; -} - -#define REG_A5XX_RB_BLEND_ALPHA 0x0000e1a6 -#define A5XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff -#define A5XX_RB_BLEND_ALPHA_UINT__SHIFT 0 -static inline uint32_t A5XX_RB_BLEND_ALPHA_UINT(uint32_t val) -{ - return ((val) << A5XX_RB_BLEND_ALPHA_UINT__SHIFT) & A5XX_RB_BLEND_ALPHA_UINT__MASK; -} -#define A5XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00 -#define A5XX_RB_BLEND_ALPHA_SINT__SHIFT 8 -static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val) -{ - return ((val) << A5XX_RB_BLEND_ALPHA_SINT__SHIFT) & A5XX_RB_BLEND_ALPHA_SINT__MASK; -} -#define A5XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000 -#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16 -static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val) -{ - return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK; -} - -#define REG_A5XX_RB_BLEND_ALPHA_F32 0x0000e1a7 -#define A5XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff -#define A5XX_RB_BLEND_ALPHA_F32__SHIFT 0 -static inline uint32_t A5XX_RB_BLEND_ALPHA_F32(float val) -{ - return ((fui(val)) << A5XX_RB_BLEND_ALPHA_F32__SHIFT) & A5XX_RB_BLEND_ALPHA_F32__MASK; -} - -#define REG_A5XX_RB_ALPHA_CONTROL 0x0000e1a8 -#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff -#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0 -static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val) -{ - return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK; -} -#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100 -#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00 -#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9 -static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val) -{ - return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK; -} - -#define REG_A5XX_RB_BLEND_CNTL 0x0000e1a9 -#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff -#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0 -static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val) -{ - return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK; -} -#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100 -#define A5XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400 -#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000 -#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16 -static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val) -{ - return ((val) << A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK; -} - -#define REG_A5XX_RB_DEPTH_PLANE_CNTL 0x0000e1b0 -#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001 -#define A5XX_RB_DEPTH_PLANE_CNTL_UNK1 0x00000002 - -#define REG_A5XX_RB_DEPTH_CNTL 0x0000e1b1 -#define A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000001 -#define A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002 -#define A5XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c -#define A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2 -static inline uint32_t A5XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val) -{ - return ((val) << A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A5XX_RB_DEPTH_CNTL_ZFUNC__MASK; -} -#define A5XX_RB_DEPTH_CNTL_Z_READ_ENABLE 0x00000040 - -#define REG_A5XX_RB_DEPTH_BUFFER_INFO 0x0000e1b2 -#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007 -#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0 -static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val) -{ - return ((val) << A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK; -} - -#define REG_A5XX_RB_DEPTH_BUFFER_BASE_LO 0x0000e1b3 - -#define REG_A5XX_RB_DEPTH_BUFFER_BASE_HI 0x0000e1b4 - -#define REG_A5XX_RB_DEPTH_BUFFER_PITCH 0x0000e1b5 -#define A5XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff -#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK; -} - -#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6 -#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff -#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK; -} - -#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0 -#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001 -#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002 -#define A5XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004 -#define A5XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700 -#define A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8 -static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val) -{ - return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC__MASK; -} -#define A5XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800 -#define A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11 -static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val) -{ - return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL__MASK; -} -#define A5XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000 -#define A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14 -static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val) -{ - return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS__MASK; -} -#define A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000 -#define A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17 -static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val) -{ - return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK; -} -#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000 -#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20 -static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val) -{ - return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK; -} -#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000 -#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23 -static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val) -{ - return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK; -} -#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000 -#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26 -static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val) -{ - return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK; -} -#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000 -#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29 -static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val) -{ - return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK; -} - -#define REG_A5XX_RB_STENCIL_INFO 0x0000e1c1 -#define A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001 - -#define REG_A5XX_RB_STENCIL_BASE_LO 0x0000e1c2 - -#define REG_A5XX_RB_STENCIL_BASE_HI 0x0000e1c3 - -#define REG_A5XX_RB_STENCIL_PITCH 0x0000e1c4 -#define A5XX_RB_STENCIL_PITCH__MASK 0xffffffff -#define A5XX_RB_STENCIL_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_STENCIL_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_STENCIL_PITCH__SHIFT) & A5XX_RB_STENCIL_PITCH__MASK; -} - -#define REG_A5XX_RB_STENCIL_ARRAY_PITCH 0x0000e1c5 -#define A5XX_RB_STENCIL_ARRAY_PITCH__MASK 0xffffffff -#define A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_STENCIL_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT) & A5XX_RB_STENCIL_ARRAY_PITCH__MASK; -} - -#define REG_A5XX_RB_STENCILREFMASK 0x0000e1c6 -#define A5XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff -#define A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0 -static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILREF(uint32_t val) -{ - return ((val) << A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILREF__MASK; -} -#define A5XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00 -#define A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8 -static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val) -{ - return ((val) << A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILMASK__MASK; -} -#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000 -#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16 -static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val) -{ - return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK; -} - -#define REG_A5XX_RB_STENCILREFMASK_BF 0x0000e1c7 -#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff -#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0 -static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val) -{ - return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK; -} -#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00 -#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8 -static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val) -{ - return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK; -} -#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000 -#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16 -static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val) -{ - return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK; -} - -#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0 -#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000 -#define A5XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff -#define A5XX_RB_WINDOW_OFFSET_X__SHIFT 0 -static inline uint32_t A5XX_RB_WINDOW_OFFSET_X(uint32_t val) -{ - return ((val) << A5XX_RB_WINDOW_OFFSET_X__SHIFT) & A5XX_RB_WINDOW_OFFSET_X__MASK; -} -#define A5XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000 -#define A5XX_RB_WINDOW_OFFSET_Y__SHIFT 16 -static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val) -{ - return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK; -} - -#define REG_A5XX_RB_SAMPLE_COUNT_CONTROL 0x0000e1d1 -#define A5XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002 - -#define REG_A5XX_RB_BLIT_CNTL 0x0000e210 -#define A5XX_RB_BLIT_CNTL_BUF__MASK 0x0000000f -#define A5XX_RB_BLIT_CNTL_BUF__SHIFT 0 -static inline uint32_t A5XX_RB_BLIT_CNTL_BUF(enum a5xx_blit_buf val) -{ - return ((val) << A5XX_RB_BLIT_CNTL_BUF__SHIFT) & A5XX_RB_BLIT_CNTL_BUF__MASK; -} - -#define REG_A5XX_RB_RESOLVE_CNTL_1 0x0000e211 -#define A5XX_RB_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000 -#define A5XX_RB_RESOLVE_CNTL_1_X__MASK 0x00007fff -#define A5XX_RB_RESOLVE_CNTL_1_X__SHIFT 0 -static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_X(uint32_t val) -{ - return ((val) << A5XX_RB_RESOLVE_CNTL_1_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_X__MASK; -} -#define A5XX_RB_RESOLVE_CNTL_1_Y__MASK 0x7fff0000 -#define A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT 16 -static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_Y(uint32_t val) -{ - return ((val) << A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_Y__MASK; -} - -#define REG_A5XX_RB_RESOLVE_CNTL_2 0x0000e212 -#define A5XX_RB_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000 -#define A5XX_RB_RESOLVE_CNTL_2_X__MASK 0x00007fff -#define A5XX_RB_RESOLVE_CNTL_2_X__SHIFT 0 -static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_X(uint32_t val) -{ - return ((val) << A5XX_RB_RESOLVE_CNTL_2_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_X__MASK; -} -#define A5XX_RB_RESOLVE_CNTL_2_Y__MASK 0x7fff0000 -#define A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT 16 -static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val) -{ - return ((val) << A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_Y__MASK; -} - -#define REG_A5XX_RB_RESOLVE_CNTL_3 0x0000e213 -#define A5XX_RB_RESOLVE_CNTL_3_TILED 0x00000001 - -#define REG_A5XX_RB_BLIT_DST_LO 0x0000e214 - -#define REG_A5XX_RB_BLIT_DST_HI 0x0000e215 - -#define REG_A5XX_RB_BLIT_DST_PITCH 0x0000e216 -#define A5XX_RB_BLIT_DST_PITCH__MASK 0xffffffff -#define A5XX_RB_BLIT_DST_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_BLIT_DST_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_BLIT_DST_PITCH__SHIFT) & A5XX_RB_BLIT_DST_PITCH__MASK; -} - -#define REG_A5XX_RB_BLIT_DST_ARRAY_PITCH 0x0000e217 -#define A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0xffffffff -#define A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK; -} - -#define REG_A5XX_RB_CLEAR_COLOR_DW0 0x0000e218 - -#define REG_A5XX_RB_CLEAR_COLOR_DW1 0x0000e219 - -#define REG_A5XX_RB_CLEAR_COLOR_DW2 0x0000e21a - -#define REG_A5XX_RB_CLEAR_COLOR_DW3 0x0000e21b - -#define REG_A5XX_RB_CLEAR_CNTL 0x0000e21c -#define A5XX_RB_CLEAR_CNTL_FAST_CLEAR 0x00000002 -#define A5XX_RB_CLEAR_CNTL_MSAA_RESOLVE 0x00000004 -#define A5XX_RB_CLEAR_CNTL_MASK__MASK 0x000000f0 -#define A5XX_RB_CLEAR_CNTL_MASK__SHIFT 4 -static inline uint32_t A5XX_RB_CLEAR_CNTL_MASK(uint32_t val) -{ - return ((val) << A5XX_RB_CLEAR_CNTL_MASK__SHIFT) & A5XX_RB_CLEAR_CNTL_MASK__MASK; -} - -#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x0000e240 - -#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x0000e241 - -#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x0000e242 - -#define REG_A5XX_RB_MRT_FLAG_BUFFER(i0) (0x0000e243 + 0x4*(i0)) - -static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return 0x0000e243 + 0x4*i0; } - -static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x0000e244 + 0x4*i0; } - -static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x0000e245 + 0x4*i0; } -#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK 0xffffffff -#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK; -} - -static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t i0) { return 0x0000e246 + 0x4*i0; } -#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK 0xffffffff -#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK; -} - -#define REG_A5XX_RB_BLIT_FLAG_DST_LO 0x0000e263 - -#define REG_A5XX_RB_BLIT_FLAG_DST_HI 0x0000e264 - -#define REG_A5XX_RB_BLIT_FLAG_DST_PITCH 0x0000e265 -#define A5XX_RB_BLIT_FLAG_DST_PITCH__MASK 0xffffffff -#define A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_BLIT_FLAG_DST_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_PITCH__MASK; -} - -#define REG_A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH 0x0000e266 -#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK 0xffffffff -#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK; -} - -#define REG_A5XX_RB_SAMPLE_COUNT_ADDR_LO 0x0000e267 - -#define REG_A5XX_RB_SAMPLE_COUNT_ADDR_HI 0x0000e268 - -#define REG_A5XX_VPC_CNTL_0 0x0000e280 -#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK 0x0000007f -#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT 0 -static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val) -{ - return ((val) << A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT) & A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK; -} -#define A5XX_VPC_CNTL_0_VARYING 0x00000800 - -#define REG_A5XX_VPC_VARYING_INTERP(i0) (0x0000e282 + 0x1*(i0)) - -static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; } - -#define REG_A5XX_VPC_VARYING_PS_REPL(i0) (0x0000e28a + 0x1*(i0)) - -static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; } - -#define REG_A5XX_UNKNOWN_E292 0x0000e292 - -#define REG_A5XX_UNKNOWN_E293 0x0000e293 - -#define REG_A5XX_VPC_VAR(i0) (0x0000e294 + 0x1*(i0)) - -static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; } - -#define REG_A5XX_VPC_GS_SIV_CNTL 0x0000e298 - -#define REG_A5XX_VPC_CLIP_CNTL 0x0000e29a -#define A5XX_VPC_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff -#define A5XX_VPC_CLIP_CNTL_CLIP_MASK__SHIFT 0 -static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_MASK(uint32_t val) -{ - return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_MASK__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_MASK__MASK; -} -#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00 -#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8 -static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val) -{ - return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__MASK; -} -#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000 -#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16 -static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val) -{ - return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__MASK; -} - -#define REG_A5XX_VPC_PACK 0x0000e29d -#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK 0x000000ff -#define A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT 0 -static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val) -{ - return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK; -} -#define A5XX_VPC_PACK_PSIZELOC__MASK 0x0000ff00 -#define A5XX_VPC_PACK_PSIZELOC__SHIFT 8 -static inline uint32_t A5XX_VPC_PACK_PSIZELOC(uint32_t val) -{ - return ((val) << A5XX_VPC_PACK_PSIZELOC__SHIFT) & A5XX_VPC_PACK_PSIZELOC__MASK; -} - -#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL 0x0000e2a0 - -#define REG_A5XX_VPC_SO_BUF_CNTL 0x0000e2a1 -#define A5XX_VPC_SO_BUF_CNTL_BUF0 0x00000001 -#define A5XX_VPC_SO_BUF_CNTL_BUF1 0x00000008 -#define A5XX_VPC_SO_BUF_CNTL_BUF2 0x00000040 -#define A5XX_VPC_SO_BUF_CNTL_BUF3 0x00000200 -#define A5XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000 - -#define REG_A5XX_VPC_SO_OVERRIDE 0x0000e2a2 -#define A5XX_VPC_SO_OVERRIDE_SO_DISABLE 0x00000001 - -#define REG_A5XX_VPC_SO_CNTL 0x0000e2a3 -#define A5XX_VPC_SO_CNTL_ENABLE 0x00010000 - -#define REG_A5XX_VPC_SO_PROG 0x0000e2a4 -#define A5XX_VPC_SO_PROG_A_BUF__MASK 0x00000003 -#define A5XX_VPC_SO_PROG_A_BUF__SHIFT 0 -static inline uint32_t A5XX_VPC_SO_PROG_A_BUF(uint32_t val) -{ - return ((val) << A5XX_VPC_SO_PROG_A_BUF__SHIFT) & A5XX_VPC_SO_PROG_A_BUF__MASK; -} -#define A5XX_VPC_SO_PROG_A_OFF__MASK 0x000007fc -#define A5XX_VPC_SO_PROG_A_OFF__SHIFT 2 -static inline uint32_t A5XX_VPC_SO_PROG_A_OFF(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A5XX_VPC_SO_PROG_A_OFF__SHIFT) & A5XX_VPC_SO_PROG_A_OFF__MASK; -} -#define A5XX_VPC_SO_PROG_A_EN 0x00000800 -#define A5XX_VPC_SO_PROG_B_BUF__MASK 0x00003000 -#define A5XX_VPC_SO_PROG_B_BUF__SHIFT 12 -static inline uint32_t A5XX_VPC_SO_PROG_B_BUF(uint32_t val) -{ - return ((val) << A5XX_VPC_SO_PROG_B_BUF__SHIFT) & A5XX_VPC_SO_PROG_B_BUF__MASK; -} -#define A5XX_VPC_SO_PROG_B_OFF__MASK 0x007fc000 -#define A5XX_VPC_SO_PROG_B_OFF__SHIFT 14 -static inline uint32_t A5XX_VPC_SO_PROG_B_OFF(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A5XX_VPC_SO_PROG_B_OFF__SHIFT) & A5XX_VPC_SO_PROG_B_OFF__MASK; -} -#define A5XX_VPC_SO_PROG_B_EN 0x00800000 - -#define REG_A5XX_VPC_SO(i0) (0x0000e2a7 + 0x7*(i0)) - -static inline uint32_t REG_A5XX_VPC_SO_BUFFER_BASE_LO(uint32_t i0) { return 0x0000e2a7 + 0x7*i0; } - -static inline uint32_t REG_A5XX_VPC_SO_BUFFER_BASE_HI(uint32_t i0) { return 0x0000e2a8 + 0x7*i0; } - -static inline uint32_t REG_A5XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000e2a9 + 0x7*i0; } - -static inline uint32_t REG_A5XX_VPC_SO_NCOMP(uint32_t i0) { return 0x0000e2aa + 0x7*i0; } - -static inline uint32_t REG_A5XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000e2ab + 0x7*i0; } - -static inline uint32_t REG_A5XX_VPC_SO_FLUSH_BASE_LO(uint32_t i0) { return 0x0000e2ac + 0x7*i0; } - -static inline uint32_t REG_A5XX_VPC_SO_FLUSH_BASE_HI(uint32_t i0) { return 0x0000e2ad + 0x7*i0; } - -#define REG_A5XX_PC_PRIMITIVE_CNTL 0x0000e384 -#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000007f -#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0 -static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val) -{ - return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK; -} -#define A5XX_PC_PRIMITIVE_CNTL_PRIMITIVE_RESTART 0x00000100 -#define A5XX_PC_PRIMITIVE_CNTL_COUNT_PRIMITIVES 0x00000200 -#define A5XX_PC_PRIMITIVE_CNTL_PROVOKING_VTX_LAST 0x00000400 - -#define REG_A5XX_PC_PRIM_VTX_CNTL 0x0000e385 -#define A5XX_PC_PRIM_VTX_CNTL_PSIZE 0x00000800 - -#define REG_A5XX_PC_RASTER_CNTL 0x0000e388 -#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x00000007 -#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 0 -static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val) -{ - return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK; -} -#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000038 -#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT 3 -static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val) -{ - return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK; -} -#define A5XX_PC_RASTER_CNTL_POLYMODE_ENABLE 0x00000040 - -#define REG_A5XX_PC_CLIP_CNTL 0x0000e389 -#define A5XX_PC_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff -#define A5XX_PC_CLIP_CNTL_CLIP_MASK__SHIFT 0 -static inline uint32_t A5XX_PC_CLIP_CNTL_CLIP_MASK(uint32_t val) -{ - return ((val) << A5XX_PC_CLIP_CNTL_CLIP_MASK__SHIFT) & A5XX_PC_CLIP_CNTL_CLIP_MASK__MASK; -} - -#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c - -#define REG_A5XX_PC_GS_LAYERED 0x0000e38d - -#define REG_A5XX_PC_GS_PARAM 0x0000e38e -#define A5XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff -#define A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0 -static inline uint32_t A5XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val) -{ - return ((val) << A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A5XX_PC_GS_PARAM_MAX_VERTICES__MASK; -} -#define A5XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800 -#define A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11 -static inline uint32_t A5XX_PC_GS_PARAM_INVOCATIONS(uint32_t val) -{ - return ((val) << A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A5XX_PC_GS_PARAM_INVOCATIONS__MASK; -} -#define A5XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000 -#define A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23 -static inline uint32_t A5XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val) -{ - return ((val) << A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A5XX_PC_GS_PARAM_PRIMTYPE__MASK; -} - -#define REG_A5XX_PC_HS_PARAM 0x0000e38f -#define A5XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f -#define A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0 -static inline uint32_t A5XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val) -{ - return ((val) << A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A5XX_PC_HS_PARAM_VERTICES_OUT__MASK; -} -#define A5XX_PC_HS_PARAM_SPACING__MASK 0x00600000 -#define A5XX_PC_HS_PARAM_SPACING__SHIFT 21 -static inline uint32_t A5XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val) -{ - return ((val) << A5XX_PC_HS_PARAM_SPACING__SHIFT) & A5XX_PC_HS_PARAM_SPACING__MASK; -} -#define A5XX_PC_HS_PARAM_CW 0x00800000 -#define A5XX_PC_HS_PARAM_CONNECTED 0x01000000 - -#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0 - -#define REG_A5XX_VFD_CONTROL_0 0x0000e400 -#define A5XX_VFD_CONTROL_0_VTXCNT__MASK 0x0000003f -#define A5XX_VFD_CONTROL_0_VTXCNT__SHIFT 0 -static inline uint32_t A5XX_VFD_CONTROL_0_VTXCNT(uint32_t val) -{ - return ((val) << A5XX_VFD_CONTROL_0_VTXCNT__SHIFT) & A5XX_VFD_CONTROL_0_VTXCNT__MASK; -} - -#define REG_A5XX_VFD_CONTROL_1 0x0000e401 -#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK 0x000000ff -#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT 0 -static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val) -{ - return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK; -} -#define A5XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00 -#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT 8 -static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val) -{ - return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK; -} -#define A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000 -#define A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16 -static inline uint32_t A5XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val) -{ - return ((val) << A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK; -} - -#define REG_A5XX_VFD_CONTROL_2 0x0000e402 -#define A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK 0x000000ff -#define A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT 0 -static inline uint32_t A5XX_VFD_CONTROL_2_REGID_PATCHID(uint32_t val) -{ - return ((val) << A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK; -} - -#define REG_A5XX_VFD_CONTROL_3 0x0000e403 -#define A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK 0x0000ff00 -#define A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT 8 -static inline uint32_t A5XX_VFD_CONTROL_3_REGID_PATCHID(uint32_t val) -{ - return ((val) << A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK; -} -#define A5XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000 -#define A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16 -static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val) -{ - return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSX__MASK; -} -#define A5XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000 -#define A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24 -static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val) -{ - return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSY__MASK; -} - -#define REG_A5XX_VFD_CONTROL_4 0x0000e404 - -#define REG_A5XX_VFD_CONTROL_5 0x0000e405 - -#define REG_A5XX_VFD_INDEX_OFFSET 0x0000e408 - -#define REG_A5XX_VFD_INSTANCE_START_OFFSET 0x0000e409 - -#define REG_A5XX_VFD_FETCH(i0) (0x0000e40a + 0x4*(i0)) - -static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; } - -static inline uint32_t REG_A5XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000e40b + 0x4*i0; } - -static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c + 0x4*i0; } - -static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; } - -#define REG_A5XX_VFD_DECODE(i0) (0x0000e48a + 0x2*(i0)) - -static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; } -#define A5XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f -#define A5XX_VFD_DECODE_INSTR_IDX__SHIFT 0 -static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val) -{ - return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK; -} -#define A5XX_VFD_DECODE_INSTR_INSTANCED 0x00020000 -#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000 -#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20 -static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val) -{ - return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK; -} -#define A5XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000 -#define A5XX_VFD_DECODE_INSTR_SWAP__SHIFT 28 -static inline uint32_t A5XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A5XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A5XX_VFD_DECODE_INSTR_SWAP__MASK; -} -#define A5XX_VFD_DECODE_INSTR_UNK30 0x40000000 -#define A5XX_VFD_DECODE_INSTR_FLOAT 0x80000000 - -static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; } - -#define REG_A5XX_VFD_DEST_CNTL(i0) (0x0000e4ca + 0x1*(i0)) - -static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; } -#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f -#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0 -static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val) -{ - return ((val) << A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK; -} -#define A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0 -#define A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4 -static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val) -{ - return ((val) << A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK; -} - -#define REG_A5XX_VFD_POWER_CNTL 0x0000e4f0 - -#define REG_A5XX_SP_SP_CNTL 0x0000e580 - -#define REG_A5XX_SP_VS_CONFIG 0x0000e584 -#define A5XX_SP_VS_CONFIG_ENABLED 0x00000001 -#define A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe -#define A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 -static inline uint32_t A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__MASK; -} -#define A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 -#define A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__SHIFT 8 -static inline uint32_t A5XX_SP_VS_CONFIG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__MASK; -} - -#define REG_A5XX_SP_FS_CONFIG 0x0000e585 -#define A5XX_SP_FS_CONFIG_ENABLED 0x00000001 -#define A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe -#define A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 -static inline uint32_t A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__MASK; -} -#define A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 -#define A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__SHIFT 8 -static inline uint32_t A5XX_SP_FS_CONFIG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__MASK; -} - -#define REG_A5XX_SP_HS_CONFIG 0x0000e586 -#define A5XX_SP_HS_CONFIG_ENABLED 0x00000001 -#define A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe -#define A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 -static inline uint32_t A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__MASK; -} -#define A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 -#define A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__SHIFT 8 -static inline uint32_t A5XX_SP_HS_CONFIG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__MASK; -} - -#define REG_A5XX_SP_DS_CONFIG 0x0000e587 -#define A5XX_SP_DS_CONFIG_ENABLED 0x00000001 -#define A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe -#define A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 -static inline uint32_t A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__MASK; -} -#define A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 -#define A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__SHIFT 8 -static inline uint32_t A5XX_SP_DS_CONFIG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__MASK; -} - -#define REG_A5XX_SP_GS_CONFIG 0x0000e588 -#define A5XX_SP_GS_CONFIG_ENABLED 0x00000001 -#define A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe -#define A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 -static inline uint32_t A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__MASK; -} -#define A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 -#define A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__SHIFT 8 -static inline uint32_t A5XX_SP_GS_CONFIG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__MASK; -} - -#define REG_A5XX_SP_CS_CONFIG 0x0000e589 -#define A5XX_SP_CS_CONFIG_ENABLED 0x00000001 -#define A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe -#define A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 -static inline uint32_t A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__MASK; -} -#define A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 -#define A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__SHIFT 8 -static inline uint32_t A5XX_SP_CS_CONFIG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__MASK; -} - -#define REG_A5XX_SP_VS_CONFIG_MAX_CONST 0x0000e58a - -#define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b - -#define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590 -#define A5XX_SP_VS_CTRL_REG0_BUFFER 0x00000004 -#define A5XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00000008 -#define A5XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 3 -static inline uint32_t A5XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A5XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_VS_CTRL_REG0_THREADSIZE__MASK; -} -#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 -#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 -static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 -#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 -static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A5XX_SP_VS_CTRL_REG0_VARYING 0x00010000 -#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00100000 -#define A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000 -#define A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 25 -static inline uint32_t A5XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val) -{ - return ((val) << A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK; -} - -#define REG_A5XX_SP_PRIMITIVE_CNTL 0x0000e592 -#define A5XX_SP_PRIMITIVE_CNTL_VSOUT__MASK 0x0000001f -#define A5XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT 0 -static inline uint32_t A5XX_SP_PRIMITIVE_CNTL_VSOUT(uint32_t val) -{ - return ((val) << A5XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT) & A5XX_SP_PRIMITIVE_CNTL_VSOUT__MASK; -} - -#define REG_A5XX_SP_VS_OUT(i0) (0x0000e593 + 0x1*(i0)) - -static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; } -#define A5XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff -#define A5XX_SP_VS_OUT_REG_A_REGID__SHIFT 0 -static inline uint32_t A5XX_SP_VS_OUT_REG_A_REGID(uint32_t val) -{ - return ((val) << A5XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_A_REGID__MASK; -} -#define A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00 -#define A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8 -static inline uint32_t A5XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val) -{ - return ((val) << A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK; -} -#define A5XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000 -#define A5XX_SP_VS_OUT_REG_B_REGID__SHIFT 16 -static inline uint32_t A5XX_SP_VS_OUT_REG_B_REGID(uint32_t val) -{ - return ((val) << A5XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_B_REGID__MASK; -} -#define A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000 -#define A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24 -static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val) -{ - return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK; -} - -#define REG_A5XX_SP_VS_VPC_DST(i0) (0x0000e5a3 + 0x1*(i0)) - -static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; } -#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff -#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0 -static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val) -{ - return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK; -} -#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 -#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8 -static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val) -{ - return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK; -} -#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 -#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16 -static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val) -{ - return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK; -} -#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 -#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24 -static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val) -{ - return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK; -} - -#define REG_A5XX_UNKNOWN_E5AB 0x0000e5ab - -#define REG_A5XX_SP_VS_OBJ_START_LO 0x0000e5ac - -#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad - -#define REG_A5XX_SP_VS_PVT_MEM_PARAM 0x0000e5ae -#define A5XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff -#define A5XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 -static inline uint32_t A5XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) -{ - assert(!(val & 0x1ff)); - return (((val >> 9)) << A5XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; -} -#define A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00 -#define A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8 -static inline uint32_t A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val) -{ - assert(!(val & 0x7ff)); - return (((val >> 11)) << A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK; -} -#define A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 -#define A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 -static inline uint32_t A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) -{ - return ((val) << A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; -} - -#define REG_A5XX_SP_VS_PVT_MEM_ADDR 0x0000e5af - -#define REG_A5XX_SP_VS_PVT_MEM_SIZE 0x0000e5b1 -#define A5XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff -#define A5XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 -static inline uint32_t A5XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A5XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; -} - -#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0 -#define A5XX_SP_FS_CTRL_REG0_BUFFER 0x00000004 -#define A5XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00000008 -#define A5XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 3 -static inline uint32_t A5XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A5XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_FS_CTRL_REG0_THREADSIZE__MASK; -} -#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 -#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 -static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 -#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 -static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A5XX_SP_FS_CTRL_REG0_VARYING 0x00010000 -#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00100000 -#define A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000 -#define A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 25 -static inline uint32_t A5XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val) -{ - return ((val) << A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK; -} - -#define REG_A5XX_UNKNOWN_E5C2 0x0000e5c2 - -#define REG_A5XX_SP_FS_OBJ_START_LO 0x0000e5c3 - -#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4 - -#define REG_A5XX_SP_FS_PVT_MEM_PARAM 0x0000e5c5 -#define A5XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff -#define A5XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 -static inline uint32_t A5XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) -{ - assert(!(val & 0x1ff)); - return (((val >> 9)) << A5XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; -} -#define A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00 -#define A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8 -static inline uint32_t A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val) -{ - assert(!(val & 0x7ff)); - return (((val >> 11)) << A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK; -} -#define A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 -#define A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 -static inline uint32_t A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) -{ - return ((val) << A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; -} - -#define REG_A5XX_SP_FS_PVT_MEM_ADDR 0x0000e5c6 - -#define REG_A5XX_SP_FS_PVT_MEM_SIZE 0x0000e5c8 -#define A5XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff -#define A5XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 -static inline uint32_t A5XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A5XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; -} - -#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9 -#define A5XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff -#define A5XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT 0 -static inline uint32_t A5XX_SP_BLEND_CNTL_ENABLE_BLEND(uint32_t val) -{ - return ((val) << A5XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK; -} -#define A5XX_SP_BLEND_CNTL_UNK8 0x00000100 -#define A5XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400 - -#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca -#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f -#define A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT 0 -static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_MRT(uint32_t val) -{ - return ((val) << A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK; -} -#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK 0x00001fe0 -#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT 5 -static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(uint32_t val) -{ - return ((val) << A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK; -} -#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK 0x001fe000 -#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT 13 -static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val) -{ - return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK; -} - -#define REG_A5XX_SP_FS_OUTPUT(i0) (0x0000e5cb + 0x1*(i0)) - -static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; } -#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff -#define A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0 -static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val) -{ - return ((val) << A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_REG_REGID__MASK; -} -#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100 - -#define REG_A5XX_SP_FS_MRT(i0) (0x0000e5d3 + 0x1*(i0)) - -static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; } -#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff -#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0 -static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val) -{ - return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK; -} -#define A5XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100 -#define A5XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200 -#define A5XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400 - -#define REG_A5XX_UNKNOWN_E5DB 0x0000e5db - -#define REG_A5XX_SP_CS_CTRL_REG0 0x0000e5f0 -#define A5XX_SP_CS_CTRL_REG0_BUFFER 0x00000004 -#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00000008 -#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 3 -static inline uint32_t A5XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK; -} -#define A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 -#define A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 -static inline uint32_t A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 -#define A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 -static inline uint32_t A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A5XX_SP_CS_CTRL_REG0_VARYING 0x00010000 -#define A5XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x00100000 -#define A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000 -#define A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 25 -static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val) -{ - return ((val) << A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK; -} - -#define REG_A5XX_UNKNOWN_E5F2 0x0000e5f2 - -#define REG_A5XX_SP_CS_OBJ_START_LO 0x0000e5f3 - -#define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4 - -#define REG_A5XX_SP_CS_PVT_MEM_PARAM 0x0000e5f5 -#define A5XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff -#define A5XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 -static inline uint32_t A5XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) -{ - assert(!(val & 0x1ff)); - return (((val >> 9)) << A5XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; -} -#define A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00 -#define A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8 -static inline uint32_t A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val) -{ - assert(!(val & 0x7ff)); - return (((val >> 11)) << A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK; -} -#define A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 -#define A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 -static inline uint32_t A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) -{ - return ((val) << A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; -} - -#define REG_A5XX_SP_CS_PVT_MEM_ADDR 0x0000e5f6 - -#define REG_A5XX_SP_CS_PVT_MEM_SIZE 0x0000e5f8 -#define A5XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff -#define A5XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 -static inline uint32_t A5XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A5XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; -} - -#define REG_A5XX_SP_HS_CTRL_REG0 0x0000e600 -#define A5XX_SP_HS_CTRL_REG0_BUFFER 0x00000004 -#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00000008 -#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT 3 -static inline uint32_t A5XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK; -} -#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 -#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 -static inline uint32_t A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 -#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 -static inline uint32_t A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A5XX_SP_HS_CTRL_REG0_VARYING 0x00010000 -#define A5XX_SP_HS_CTRL_REG0_PIXLODENABLE 0x00100000 -#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000 -#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 25 -static inline uint32_t A5XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val) -{ - return ((val) << A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK; -} - -#define REG_A5XX_UNKNOWN_E602 0x0000e602 - -#define REG_A5XX_SP_HS_OBJ_START_LO 0x0000e603 - -#define REG_A5XX_SP_HS_OBJ_START_HI 0x0000e604 - -#define REG_A5XX_SP_HS_PVT_MEM_PARAM 0x0000e605 -#define A5XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff -#define A5XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 -static inline uint32_t A5XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) -{ - assert(!(val & 0x1ff)); - return (((val >> 9)) << A5XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; -} -#define A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00 -#define A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8 -static inline uint32_t A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val) -{ - assert(!(val & 0x7ff)); - return (((val >> 11)) << A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK; -} -#define A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 -#define A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 -static inline uint32_t A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) -{ - return ((val) << A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; -} - -#define REG_A5XX_SP_HS_PVT_MEM_ADDR 0x0000e606 - -#define REG_A5XX_SP_HS_PVT_MEM_SIZE 0x0000e608 -#define A5XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff -#define A5XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 -static inline uint32_t A5XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A5XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; -} - -#define REG_A5XX_SP_DS_CTRL_REG0 0x0000e610 -#define A5XX_SP_DS_CTRL_REG0_BUFFER 0x00000004 -#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00000008 -#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT 3 -static inline uint32_t A5XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK; -} -#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 -#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 -static inline uint32_t A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 -#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 -static inline uint32_t A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A5XX_SP_DS_CTRL_REG0_VARYING 0x00010000 -#define A5XX_SP_DS_CTRL_REG0_PIXLODENABLE 0x00100000 -#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000 -#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 25 -static inline uint32_t A5XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val) -{ - return ((val) << A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK; -} - -#define REG_A5XX_UNKNOWN_E62B 0x0000e62b - -#define REG_A5XX_SP_DS_OBJ_START_LO 0x0000e62c - -#define REG_A5XX_SP_DS_OBJ_START_HI 0x0000e62d - -#define REG_A5XX_SP_DS_PVT_MEM_PARAM 0x0000e62e -#define A5XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff -#define A5XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 -static inline uint32_t A5XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) -{ - assert(!(val & 0x1ff)); - return (((val >> 9)) << A5XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; -} -#define A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00 -#define A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8 -static inline uint32_t A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val) -{ - assert(!(val & 0x7ff)); - return (((val >> 11)) << A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK; -} -#define A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 -#define A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 -static inline uint32_t A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) -{ - return ((val) << A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; -} - -#define REG_A5XX_SP_DS_PVT_MEM_ADDR 0x0000e62f - -#define REG_A5XX_SP_DS_PVT_MEM_SIZE 0x0000e631 -#define A5XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff -#define A5XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 -static inline uint32_t A5XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A5XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; -} - -#define REG_A5XX_SP_GS_CTRL_REG0 0x0000e640 -#define A5XX_SP_GS_CTRL_REG0_BUFFER 0x00000004 -#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00000008 -#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT 3 -static inline uint32_t A5XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK; -} -#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 -#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 -static inline uint32_t A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 -#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 -static inline uint32_t A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A5XX_SP_GS_CTRL_REG0_VARYING 0x00010000 -#define A5XX_SP_GS_CTRL_REG0_PIXLODENABLE 0x00100000 -#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000 -#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 25 -static inline uint32_t A5XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val) -{ - return ((val) << A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK; -} - -#define REG_A5XX_UNKNOWN_E65B 0x0000e65b - -#define REG_A5XX_SP_GS_OBJ_START_LO 0x0000e65c - -#define REG_A5XX_SP_GS_OBJ_START_HI 0x0000e65d - -#define REG_A5XX_SP_GS_PVT_MEM_PARAM 0x0000e65e -#define A5XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff -#define A5XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 -static inline uint32_t A5XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) -{ - assert(!(val & 0x1ff)); - return (((val >> 9)) << A5XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A5XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; -} -#define A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK 0x00ffff00 -#define A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT 8 -static inline uint32_t A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKOFFSET(uint32_t val) -{ - assert(!(val & 0x7ff)); - return (((val >> 11)) << A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKOFFSET__SHIFT) & A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKOFFSET__MASK; -} -#define A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 -#define A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 -static inline uint32_t A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) -{ - return ((val) << A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A5XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; -} - -#define REG_A5XX_SP_GS_PVT_MEM_ADDR 0x0000e65f - -#define REG_A5XX_SP_GS_PVT_MEM_SIZE 0x0000e661 -#define A5XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff -#define A5XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 -static inline uint32_t A5XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A5XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A5XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; -} - -#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704 -#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 -#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 -static inline uint32_t A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK; -} - -#define REG_A5XX_TPL1_TP_DEST_MSAA_CNTL 0x0000e705 -#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003 -#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0 -static inline uint32_t A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK; -} -#define A5XX_TPL1_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 - -#define REG_A5XX_TPL1_TP_BORDER_COLOR_BASE_ADDR_LO 0x0000e706 - -#define REG_A5XX_TPL1_TP_BORDER_COLOR_BASE_ADDR_HI 0x0000e707 - -#define REG_A5XX_TPL1_VS_TEX_COUNT 0x0000e700 - -#define REG_A5XX_TPL1_HS_TEX_COUNT 0x0000e701 - -#define REG_A5XX_TPL1_DS_TEX_COUNT 0x0000e702 - -#define REG_A5XX_TPL1_GS_TEX_COUNT 0x0000e703 - -#define REG_A5XX_TPL1_VS_TEX_SAMP_LO 0x0000e722 - -#define REG_A5XX_TPL1_VS_TEX_SAMP_HI 0x0000e723 - -#define REG_A5XX_TPL1_HS_TEX_SAMP_LO 0x0000e724 - -#define REG_A5XX_TPL1_HS_TEX_SAMP_HI 0x0000e725 - -#define REG_A5XX_TPL1_DS_TEX_SAMP_LO 0x0000e726 - -#define REG_A5XX_TPL1_DS_TEX_SAMP_HI 0x0000e727 - -#define REG_A5XX_TPL1_GS_TEX_SAMP_LO 0x0000e728 - -#define REG_A5XX_TPL1_GS_TEX_SAMP_HI 0x0000e729 - -#define REG_A5XX_TPL1_VS_TEX_CONST_LO 0x0000e72a - -#define REG_A5XX_TPL1_VS_TEX_CONST_HI 0x0000e72b - -#define REG_A5XX_TPL1_HS_TEX_CONST_LO 0x0000e72c - -#define REG_A5XX_TPL1_HS_TEX_CONST_HI 0x0000e72d - -#define REG_A5XX_TPL1_DS_TEX_CONST_LO 0x0000e72e - -#define REG_A5XX_TPL1_DS_TEX_CONST_HI 0x0000e72f - -#define REG_A5XX_TPL1_GS_TEX_CONST_LO 0x0000e730 - -#define REG_A5XX_TPL1_GS_TEX_CONST_HI 0x0000e731 - -#define REG_A5XX_TPL1_FS_TEX_COUNT 0x0000e750 - -#define REG_A5XX_TPL1_CS_TEX_COUNT 0x0000e751 - -#define REG_A5XX_TPL1_FS_TEX_SAMP_LO 0x0000e75a - -#define REG_A5XX_TPL1_FS_TEX_SAMP_HI 0x0000e75b - -#define REG_A5XX_TPL1_CS_TEX_SAMP_LO 0x0000e75c - -#define REG_A5XX_TPL1_CS_TEX_SAMP_HI 0x0000e75d - -#define REG_A5XX_TPL1_FS_TEX_CONST_LO 0x0000e75e - -#define REG_A5XX_TPL1_FS_TEX_CONST_HI 0x0000e75f - -#define REG_A5XX_TPL1_CS_TEX_CONST_LO 0x0000e760 - -#define REG_A5XX_TPL1_CS_TEX_CONST_HI 0x0000e761 - -#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL 0x0000e764 - -#define REG_A5XX_HLSQ_CONTROL_0_REG 0x0000e784 -#define A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000001 -#define A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 0 -static inline uint32_t A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK; -} -#define A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__MASK 0x00000004 -#define A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__SHIFT 2 -static inline uint32_t A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE(enum a3xx_threadsize val) -{ - return ((val) << A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__SHIFT) & A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__MASK; -} - -#define REG_A5XX_HLSQ_CONTROL_1_REG 0x0000e785 -#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x0000003f -#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT 0 -static inline uint32_t A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT) & A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK; -} - -#define REG_A5XX_HLSQ_CONTROL_2_REG 0x0000e786 -#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff -#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0 -static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK; -} -#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00 -#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8 -static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK; -} -#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000 -#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16 -static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK; -} -#define A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK 0xff000000 -#define A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT 24 -static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_CENTERRHW(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK; -} - -#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787 -#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff -#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0 -static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK; -} -#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00 -#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8 -static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK; -} -#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000 -#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16 -static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK; -} -#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000 -#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24 -static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK; -} - -#define REG_A5XX_HLSQ_CONTROL_4_REG 0x0000e788 -#define A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff -#define A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0 -static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK; -} -#define A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00 -#define A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8 -static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK; -} -#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000 -#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16 -static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK; -} -#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000 -#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24 -static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK; -} - -#define REG_A5XX_HLSQ_UPDATE_CNTL 0x0000e78a - -#define REG_A5XX_HLSQ_VS_CONFIG 0x0000e78b -#define A5XX_HLSQ_VS_CONFIG_ENABLED 0x00000001 -#define A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe -#define A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 -static inline uint32_t A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__MASK; -} -#define A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 -#define A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__SHIFT 8 -static inline uint32_t A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__MASK; -} - -#define REG_A5XX_HLSQ_FS_CONFIG 0x0000e78c -#define A5XX_HLSQ_FS_CONFIG_ENABLED 0x00000001 -#define A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe -#define A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 -static inline uint32_t A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__MASK; -} -#define A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 -#define A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__SHIFT 8 -static inline uint32_t A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__MASK; -} - -#define REG_A5XX_HLSQ_HS_CONFIG 0x0000e78d -#define A5XX_HLSQ_HS_CONFIG_ENABLED 0x00000001 -#define A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe -#define A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 -static inline uint32_t A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__MASK; -} -#define A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 -#define A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__SHIFT 8 -static inline uint32_t A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__MASK; -} - -#define REG_A5XX_HLSQ_DS_CONFIG 0x0000e78e -#define A5XX_HLSQ_DS_CONFIG_ENABLED 0x00000001 -#define A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe -#define A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 -static inline uint32_t A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__MASK; -} -#define A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 -#define A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__SHIFT 8 -static inline uint32_t A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__MASK; -} - -#define REG_A5XX_HLSQ_GS_CONFIG 0x0000e78f -#define A5XX_HLSQ_GS_CONFIG_ENABLED 0x00000001 -#define A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe -#define A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 -static inline uint32_t A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__MASK; -} -#define A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 -#define A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__SHIFT 8 -static inline uint32_t A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__MASK; -} - -#define REG_A5XX_HLSQ_CS_CONFIG 0x0000e790 -#define A5XX_HLSQ_CS_CONFIG_ENABLED 0x00000001 -#define A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe -#define A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 -static inline uint32_t A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__MASK; -} -#define A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 -#define A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__SHIFT 8 -static inline uint32_t A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__MASK; -} - -#define REG_A5XX_HLSQ_VS_CNTL 0x0000e791 -#define A5XX_HLSQ_VS_CNTL_SSBO_ENABLE 0x00000001 -#define A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK 0xfffffffe -#define A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT 1 -static inline uint32_t A5XX_HLSQ_VS_CNTL_INSTRLEN(uint32_t val) -{ - return ((val) << A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK; -} - -#define REG_A5XX_HLSQ_FS_CNTL 0x0000e792 -#define A5XX_HLSQ_FS_CNTL_SSBO_ENABLE 0x00000001 -#define A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK 0xfffffffe -#define A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT 1 -static inline uint32_t A5XX_HLSQ_FS_CNTL_INSTRLEN(uint32_t val) -{ - return ((val) << A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK; -} - -#define REG_A5XX_HLSQ_HS_CNTL 0x0000e793 -#define A5XX_HLSQ_HS_CNTL_SSBO_ENABLE 0x00000001 -#define A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK 0xfffffffe -#define A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT 1 -static inline uint32_t A5XX_HLSQ_HS_CNTL_INSTRLEN(uint32_t val) -{ - return ((val) << A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK; -} - -#define REG_A5XX_HLSQ_DS_CNTL 0x0000e794 -#define A5XX_HLSQ_DS_CNTL_SSBO_ENABLE 0x00000001 -#define A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK 0xfffffffe -#define A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT 1 -static inline uint32_t A5XX_HLSQ_DS_CNTL_INSTRLEN(uint32_t val) -{ - return ((val) << A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK; -} - -#define REG_A5XX_HLSQ_GS_CNTL 0x0000e795 -#define A5XX_HLSQ_GS_CNTL_SSBO_ENABLE 0x00000001 -#define A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK 0xfffffffe -#define A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT 1 -static inline uint32_t A5XX_HLSQ_GS_CNTL_INSTRLEN(uint32_t val) -{ - return ((val) << A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK; -} - -#define REG_A5XX_HLSQ_CS_CNTL 0x0000e796 -#define A5XX_HLSQ_CS_CNTL_SSBO_ENABLE 0x00000001 -#define A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK 0xfffffffe -#define A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT 1 -static inline uint32_t A5XX_HLSQ_CS_CNTL_INSTRLEN(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK; -} - -#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_X 0x0000e7b9 - -#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000e7ba - -#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000e7bb - -#define REG_A5XX_HLSQ_CS_NDRANGE_0 0x0000e7b0 -#define A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003 -#define A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0 -static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK; -} -#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc -#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2 -static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK; -} -#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000 -#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12 -static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK; -} -#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000 -#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22 -static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK; -} - -#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1 -#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff -#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0 -static inline uint32_t A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK; -} - -#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2 -#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff -#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0 -static inline uint32_t A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK; -} - -#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3 -#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff -#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0 -static inline uint32_t A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK; -} - -#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4 -#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff -#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0 -static inline uint32_t A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK; -} - -#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5 -#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff -#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0 -static inline uint32_t A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK; -} - -#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6 -#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff -#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0 -static inline uint32_t A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK; -} - -#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7 -#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff -#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT 0 -static inline uint32_t A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK; -} -#define A5XX_HLSQ_CS_CNTL_0_UNK0__MASK 0x0000ff00 -#define A5XX_HLSQ_CS_CNTL_0_UNK0__SHIFT 8 -static inline uint32_t A5XX_HLSQ_CS_CNTL_0_UNK0(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_CNTL_0_UNK0__SHIFT) & A5XX_HLSQ_CS_CNTL_0_UNK0__MASK; -} -#define A5XX_HLSQ_CS_CNTL_0_UNK1__MASK 0x00ff0000 -#define A5XX_HLSQ_CS_CNTL_0_UNK1__SHIFT 16 -static inline uint32_t A5XX_HLSQ_CS_CNTL_0_UNK1(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_CNTL_0_UNK1__SHIFT) & A5XX_HLSQ_CS_CNTL_0_UNK1__MASK; -} -#define A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000 -#define A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24 -static inline uint32_t A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val) -{ - return ((val) << A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK; -} - -#define REG_A5XX_HLSQ_CS_CNTL_1 0x0000e7b8 - -#define REG_A5XX_UNKNOWN_E7C0 0x0000e7c0 - -#define REG_A5XX_HLSQ_VS_CONSTLEN 0x0000e7c3 - -#define REG_A5XX_HLSQ_VS_INSTRLEN 0x0000e7c4 - -#define REG_A5XX_UNKNOWN_E7C5 0x0000e7c5 - -#define REG_A5XX_HLSQ_HS_CONSTLEN 0x0000e7c8 - -#define REG_A5XX_HLSQ_HS_INSTRLEN 0x0000e7c9 - -#define REG_A5XX_UNKNOWN_E7CA 0x0000e7ca - -#define REG_A5XX_HLSQ_DS_CONSTLEN 0x0000e7cd - -#define REG_A5XX_HLSQ_DS_INSTRLEN 0x0000e7ce - -#define REG_A5XX_UNKNOWN_E7CF 0x0000e7cf - -#define REG_A5XX_HLSQ_GS_CONSTLEN 0x0000e7d2 - -#define REG_A5XX_HLSQ_GS_INSTRLEN 0x0000e7d3 - -#define REG_A5XX_UNKNOWN_E7D4 0x0000e7d4 - -#define REG_A5XX_HLSQ_FS_CONSTLEN 0x0000e7d7 - -#define REG_A5XX_HLSQ_FS_INSTRLEN 0x0000e7d8 - -#define REG_A5XX_UNKNOWN_E7D9 0x0000e7d9 - -#define REG_A5XX_HLSQ_CS_CONSTLEN 0x0000e7dc - -#define REG_A5XX_HLSQ_CS_INSTRLEN 0x0000e7dd - -#define REG_A5XX_RB_2D_BLIT_CNTL 0x00002100 - -#define REG_A5XX_RB_2D_SRC_SOLID_DW0 0x00002101 - -#define REG_A5XX_RB_2D_SRC_SOLID_DW1 0x00002102 - -#define REG_A5XX_RB_2D_SRC_SOLID_DW2 0x00002103 - -#define REG_A5XX_RB_2D_SRC_SOLID_DW3 0x00002104 - -#define REG_A5XX_RB_2D_SRC_INFO 0x00002107 -#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff -#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0 -static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val) -{ - return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK; -} -#define A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK 0x00000300 -#define A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT 8 -static inline uint32_t A5XX_RB_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val) -{ - return ((val) << A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK; -} -#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00 -#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT 10 -static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK; -} -#define A5XX_RB_2D_SRC_INFO_FLAGS 0x00001000 -#define A5XX_RB_2D_SRC_INFO_SRGB 0x00002000 - -#define REG_A5XX_RB_2D_SRC_LO 0x00002108 - -#define REG_A5XX_RB_2D_SRC_HI 0x00002109 - -#define REG_A5XX_RB_2D_SRC_SIZE 0x0000210a -#define A5XX_RB_2D_SRC_SIZE_PITCH__MASK 0x0000ffff -#define A5XX_RB_2D_SRC_SIZE_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_2D_SRC_SIZE_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_2D_SRC_SIZE_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_PITCH__MASK; -} -#define A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__MASK 0xffff0000 -#define A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__SHIFT 16 -static inline uint32_t A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__MASK; -} - -#define REG_A5XX_RB_2D_DST_INFO 0x00002110 -#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff -#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0 -static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val) -{ - return ((val) << A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK; -} -#define A5XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300 -#define A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8 -static inline uint32_t A5XX_RB_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val) -{ - return ((val) << A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_DST_INFO_TILE_MODE__MASK; -} -#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00 -#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10 -static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK; -} -#define A5XX_RB_2D_DST_INFO_FLAGS 0x00001000 -#define A5XX_RB_2D_DST_INFO_SRGB 0x00002000 - -#define REG_A5XX_RB_2D_DST_LO 0x00002111 - -#define REG_A5XX_RB_2D_DST_HI 0x00002112 - -#define REG_A5XX_RB_2D_DST_SIZE 0x00002113 -#define A5XX_RB_2D_DST_SIZE_PITCH__MASK 0x0000ffff -#define A5XX_RB_2D_DST_SIZE_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_2D_DST_SIZE_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_2D_DST_SIZE_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_PITCH__MASK; -} -#define A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__MASK 0xffff0000 -#define A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__SHIFT 16 -static inline uint32_t A5XX_RB_2D_DST_SIZE_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__MASK; -} - -#define REG_A5XX_RB_2D_SRC_FLAGS_LO 0x00002140 - -#define REG_A5XX_RB_2D_SRC_FLAGS_HI 0x00002141 - -#define REG_A5XX_RB_2D_SRC_FLAGS_PITCH 0x00002142 -#define A5XX_RB_2D_SRC_FLAGS_PITCH__MASK 0xffffffff -#define A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_2D_SRC_FLAGS_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_SRC_FLAGS_PITCH__MASK; -} - -#define REG_A5XX_RB_2D_DST_FLAGS_LO 0x00002143 - -#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144 - -#define REG_A5XX_RB_2D_DST_FLAGS_PITCH 0x00002145 -#define A5XX_RB_2D_DST_FLAGS_PITCH__MASK 0xffffffff -#define A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT 0 -static inline uint32_t A5XX_RB_2D_DST_FLAGS_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_DST_FLAGS_PITCH__MASK; -} - -#define REG_A5XX_GRAS_2D_BLIT_CNTL 0x00002180 - -#define REG_A5XX_GRAS_2D_SRC_INFO 0x00002181 -#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff -#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0 -static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val) -{ - return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK; -} -#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300 -#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT 8 -static inline uint32_t A5XX_GRAS_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val) -{ - return ((val) << A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK; -} -#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00 -#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10 -static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK; -} -#define A5XX_GRAS_2D_SRC_INFO_FLAGS 0x00001000 -#define A5XX_GRAS_2D_SRC_INFO_SRGB 0x00002000 - -#define REG_A5XX_GRAS_2D_DST_INFO 0x00002182 -#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff -#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT 0 -static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val) -{ - return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK; -} -#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK 0x00000300 -#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT 8 -static inline uint32_t A5XX_GRAS_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val) -{ - return ((val) << A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK; -} -#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00 -#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT 10 -static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK; -} -#define A5XX_GRAS_2D_DST_INFO_FLAGS 0x00001000 -#define A5XX_GRAS_2D_DST_INFO_SRGB 0x00002000 - -#define REG_A5XX_UNKNOWN_2184 0x00002184 - -#define REG_A5XX_TEX_SAMP_0 0x00000000 -#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001 -#define A5XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006 -#define A5XX_TEX_SAMP_0_XY_MAG__SHIFT 1 -static inline uint32_t A5XX_TEX_SAMP_0_XY_MAG(enum a5xx_tex_filter val) -{ - return ((val) << A5XX_TEX_SAMP_0_XY_MAG__SHIFT) & A5XX_TEX_SAMP_0_XY_MAG__MASK; -} -#define A5XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018 -#define A5XX_TEX_SAMP_0_XY_MIN__SHIFT 3 -static inline uint32_t A5XX_TEX_SAMP_0_XY_MIN(enum a5xx_tex_filter val) -{ - return ((val) << A5XX_TEX_SAMP_0_XY_MIN__SHIFT) & A5XX_TEX_SAMP_0_XY_MIN__MASK; -} -#define A5XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0 -#define A5XX_TEX_SAMP_0_WRAP_S__SHIFT 5 -static inline uint32_t A5XX_TEX_SAMP_0_WRAP_S(enum a5xx_tex_clamp val) -{ - return ((val) << A5XX_TEX_SAMP_0_WRAP_S__SHIFT) & A5XX_TEX_SAMP_0_WRAP_S__MASK; -} -#define A5XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700 -#define A5XX_TEX_SAMP_0_WRAP_T__SHIFT 8 -static inline uint32_t A5XX_TEX_SAMP_0_WRAP_T(enum a5xx_tex_clamp val) -{ - return ((val) << A5XX_TEX_SAMP_0_WRAP_T__SHIFT) & A5XX_TEX_SAMP_0_WRAP_T__MASK; -} -#define A5XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800 -#define A5XX_TEX_SAMP_0_WRAP_R__SHIFT 11 -static inline uint32_t A5XX_TEX_SAMP_0_WRAP_R(enum a5xx_tex_clamp val) -{ - return ((val) << A5XX_TEX_SAMP_0_WRAP_R__SHIFT) & A5XX_TEX_SAMP_0_WRAP_R__MASK; -} -#define A5XX_TEX_SAMP_0_ANISO__MASK 0x0001c000 -#define A5XX_TEX_SAMP_0_ANISO__SHIFT 14 -static inline uint32_t A5XX_TEX_SAMP_0_ANISO(enum a5xx_tex_aniso val) -{ - return ((val) << A5XX_TEX_SAMP_0_ANISO__SHIFT) & A5XX_TEX_SAMP_0_ANISO__MASK; -} -#define A5XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000 -#define A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19 -static inline uint32_t A5XX_TEX_SAMP_0_LOD_BIAS(float val) -{ - return ((((int32_t)(val * 256.0))) << A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A5XX_TEX_SAMP_0_LOD_BIAS__MASK; -} - -#define REG_A5XX_TEX_SAMP_1 0x00000001 -#define A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e -#define A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1 -static inline uint32_t A5XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val) -{ - return ((val) << A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK; -} -#define A5XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010 -#define A5XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020 -#define A5XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040 -#define A5XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00 -#define A5XX_TEX_SAMP_1_MAX_LOD__SHIFT 8 -static inline uint32_t A5XX_TEX_SAMP_1_MAX_LOD(float val) -{ - return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A5XX_TEX_SAMP_1_MAX_LOD__MASK; -} -#define A5XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000 -#define A5XX_TEX_SAMP_1_MIN_LOD__SHIFT 20 -static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val) -{ - return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A5XX_TEX_SAMP_1_MIN_LOD__MASK; -} - -#define REG_A5XX_TEX_SAMP_2 0x00000002 -#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK 0xffffff80 -#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT 7 -static inline uint32_t A5XX_TEX_SAMP_2_BCOLOR_OFFSET(uint32_t val) -{ - return ((val) << A5XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A5XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK; -} - -#define REG_A5XX_TEX_SAMP_3 0x00000003 - -#define REG_A5XX_TEX_CONST_0 0x00000000 -#define A5XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003 -#define A5XX_TEX_CONST_0_TILE_MODE__SHIFT 0 -static inline uint32_t A5XX_TEX_CONST_0_TILE_MODE(enum a5xx_tile_mode val) -{ - return ((val) << A5XX_TEX_CONST_0_TILE_MODE__SHIFT) & A5XX_TEX_CONST_0_TILE_MODE__MASK; -} -#define A5XX_TEX_CONST_0_SRGB 0x00000004 -#define A5XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070 -#define A5XX_TEX_CONST_0_SWIZ_X__SHIFT 4 -static inline uint32_t A5XX_TEX_CONST_0_SWIZ_X(enum a5xx_tex_swiz val) -{ - return ((val) << A5XX_TEX_CONST_0_SWIZ_X__SHIFT) & A5XX_TEX_CONST_0_SWIZ_X__MASK; -} -#define A5XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380 -#define A5XX_TEX_CONST_0_SWIZ_Y__SHIFT 7 -static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Y(enum a5xx_tex_swiz val) -{ - return ((val) << A5XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Y__MASK; -} -#define A5XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00 -#define A5XX_TEX_CONST_0_SWIZ_Z__SHIFT 10 -static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Z(enum a5xx_tex_swiz val) -{ - return ((val) << A5XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Z__MASK; -} -#define A5XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000 -#define A5XX_TEX_CONST_0_SWIZ_W__SHIFT 13 -static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val) -{ - return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK; -} -#define A5XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000 -#define A5XX_TEX_CONST_0_MIPLVLS__SHIFT 16 -static inline uint32_t A5XX_TEX_CONST_0_MIPLVLS(uint32_t val) -{ - return ((val) << A5XX_TEX_CONST_0_MIPLVLS__SHIFT) & A5XX_TEX_CONST_0_MIPLVLS__MASK; -} -#define A5XX_TEX_CONST_0_SAMPLES__MASK 0x00300000 -#define A5XX_TEX_CONST_0_SAMPLES__SHIFT 20 -static inline uint32_t A5XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A5XX_TEX_CONST_0_SAMPLES__SHIFT) & A5XX_TEX_CONST_0_SAMPLES__MASK; -} -#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000 -#define A5XX_TEX_CONST_0_FMT__SHIFT 22 -static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val) -{ - return ((val) << A5XX_TEX_CONST_0_FMT__SHIFT) & A5XX_TEX_CONST_0_FMT__MASK; -} -#define A5XX_TEX_CONST_0_SWAP__MASK 0xc0000000 -#define A5XX_TEX_CONST_0_SWAP__SHIFT 30 -static inline uint32_t A5XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A5XX_TEX_CONST_0_SWAP__SHIFT) & A5XX_TEX_CONST_0_SWAP__MASK; -} - -#define REG_A5XX_TEX_CONST_1 0x00000001 -#define A5XX_TEX_CONST_1_WIDTH__MASK 0x00007fff -#define A5XX_TEX_CONST_1_WIDTH__SHIFT 0 -static inline uint32_t A5XX_TEX_CONST_1_WIDTH(uint32_t val) -{ - return ((val) << A5XX_TEX_CONST_1_WIDTH__SHIFT) & A5XX_TEX_CONST_1_WIDTH__MASK; -} -#define A5XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000 -#define A5XX_TEX_CONST_1_HEIGHT__SHIFT 15 -static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val) -{ - return ((val) << A5XX_TEX_CONST_1_HEIGHT__SHIFT) & A5XX_TEX_CONST_1_HEIGHT__MASK; -} - -#define REG_A5XX_TEX_CONST_2 0x00000002 -#define A5XX_TEX_CONST_2_BUFFER 0x00000010 -#define A5XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f -#define A5XX_TEX_CONST_2_PITCHALIGN__SHIFT 0 -static inline uint32_t A5XX_TEX_CONST_2_PITCHALIGN(uint32_t val) -{ - return ((val) << A5XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A5XX_TEX_CONST_2_PITCHALIGN__MASK; -} -#define A5XX_TEX_CONST_2_PITCH__MASK 0x1fffff80 -#define A5XX_TEX_CONST_2_PITCH__SHIFT 7 -static inline uint32_t A5XX_TEX_CONST_2_PITCH(uint32_t val) -{ - return ((val) << A5XX_TEX_CONST_2_PITCH__SHIFT) & A5XX_TEX_CONST_2_PITCH__MASK; -} -#define A5XX_TEX_CONST_2_TYPE__MASK 0xe0000000 -#define A5XX_TEX_CONST_2_TYPE__SHIFT 29 -static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val) -{ - return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK; -} - -#define REG_A5XX_TEX_CONST_3 0x00000003 -#define A5XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff -#define A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0 -static inline uint32_t A5XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A5XX_TEX_CONST_3_ARRAY_PITCH__MASK; -} -#define A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK 0x07800000 -#define A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT 23 -static inline uint32_t A5XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK; -} -#define A5XX_TEX_CONST_3_TILE_ALL 0x08000000 -#define A5XX_TEX_CONST_3_FLAG 0x10000000 - -#define REG_A5XX_TEX_CONST_4 0x00000004 -#define A5XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0 -#define A5XX_TEX_CONST_4_BASE_LO__SHIFT 5 -static inline uint32_t A5XX_TEX_CONST_4_BASE_LO(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A5XX_TEX_CONST_4_BASE_LO__SHIFT) & A5XX_TEX_CONST_4_BASE_LO__MASK; -} - -#define REG_A5XX_TEX_CONST_5 0x00000005 -#define A5XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff -#define A5XX_TEX_CONST_5_BASE_HI__SHIFT 0 -static inline uint32_t A5XX_TEX_CONST_5_BASE_HI(uint32_t val) -{ - return ((val) << A5XX_TEX_CONST_5_BASE_HI__SHIFT) & A5XX_TEX_CONST_5_BASE_HI__MASK; -} -#define A5XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000 -#define A5XX_TEX_CONST_5_DEPTH__SHIFT 17 -static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val) -{ - return ((val) << A5XX_TEX_CONST_5_DEPTH__SHIFT) & A5XX_TEX_CONST_5_DEPTH__MASK; -} - -#define REG_A5XX_TEX_CONST_6 0x00000006 - -#define REG_A5XX_TEX_CONST_7 0x00000007 - -#define REG_A5XX_TEX_CONST_8 0x00000008 - -#define REG_A5XX_TEX_CONST_9 0x00000009 - -#define REG_A5XX_TEX_CONST_10 0x0000000a - -#define REG_A5XX_TEX_CONST_11 0x0000000b - -#define REG_A5XX_SSBO_0_0 0x00000000 -#define A5XX_SSBO_0_0_BASE_LO__MASK 0xffffffe0 -#define A5XX_SSBO_0_0_BASE_LO__SHIFT 5 -static inline uint32_t A5XX_SSBO_0_0_BASE_LO(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A5XX_SSBO_0_0_BASE_LO__SHIFT) & A5XX_SSBO_0_0_BASE_LO__MASK; -} - -#define REG_A5XX_SSBO_0_1 0x00000001 -#define A5XX_SSBO_0_1_PITCH__MASK 0x003fffff -#define A5XX_SSBO_0_1_PITCH__SHIFT 0 -static inline uint32_t A5XX_SSBO_0_1_PITCH(uint32_t val) -{ - return ((val) << A5XX_SSBO_0_1_PITCH__SHIFT) & A5XX_SSBO_0_1_PITCH__MASK; -} - -#define REG_A5XX_SSBO_0_2 0x00000002 -#define A5XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000 -#define A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12 -static inline uint32_t A5XX_SSBO_0_2_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A5XX_SSBO_0_2_ARRAY_PITCH__MASK; -} - -#define REG_A5XX_SSBO_0_3 0x00000003 -#define A5XX_SSBO_0_3_CPP__MASK 0x0000003f -#define A5XX_SSBO_0_3_CPP__SHIFT 0 -static inline uint32_t A5XX_SSBO_0_3_CPP(uint32_t val) -{ - return ((val) << A5XX_SSBO_0_3_CPP__SHIFT) & A5XX_SSBO_0_3_CPP__MASK; -} - -#define REG_A5XX_SSBO_1_0 0x00000000 -#define A5XX_SSBO_1_0_FMT__MASK 0x0000ff00 -#define A5XX_SSBO_1_0_FMT__SHIFT 8 -static inline uint32_t A5XX_SSBO_1_0_FMT(enum a5xx_tex_fmt val) -{ - return ((val) << A5XX_SSBO_1_0_FMT__SHIFT) & A5XX_SSBO_1_0_FMT__MASK; -} -#define A5XX_SSBO_1_0_WIDTH__MASK 0xffff0000 -#define A5XX_SSBO_1_0_WIDTH__SHIFT 16 -static inline uint32_t A5XX_SSBO_1_0_WIDTH(uint32_t val) -{ - return ((val) << A5XX_SSBO_1_0_WIDTH__SHIFT) & A5XX_SSBO_1_0_WIDTH__MASK; -} - -#define REG_A5XX_SSBO_1_1 0x00000001 -#define A5XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff -#define A5XX_SSBO_1_1_HEIGHT__SHIFT 0 -static inline uint32_t A5XX_SSBO_1_1_HEIGHT(uint32_t val) -{ - return ((val) << A5XX_SSBO_1_1_HEIGHT__SHIFT) & A5XX_SSBO_1_1_HEIGHT__MASK; -} -#define A5XX_SSBO_1_1_DEPTH__MASK 0xffff0000 -#define A5XX_SSBO_1_1_DEPTH__SHIFT 16 -static inline uint32_t A5XX_SSBO_1_1_DEPTH(uint32_t val) -{ - return ((val) << A5XX_SSBO_1_1_DEPTH__SHIFT) & A5XX_SSBO_1_1_DEPTH__MASK; -} - -#define REG_A5XX_SSBO_2_0 0x00000000 -#define A5XX_SSBO_2_0_BASE_LO__MASK 0xffffffff -#define A5XX_SSBO_2_0_BASE_LO__SHIFT 0 -static inline uint32_t A5XX_SSBO_2_0_BASE_LO(uint32_t val) -{ - return ((val) << A5XX_SSBO_2_0_BASE_LO__SHIFT) & A5XX_SSBO_2_0_BASE_LO__MASK; -} - -#define REG_A5XX_SSBO_2_1 0x00000001 -#define A5XX_SSBO_2_1_BASE_HI__MASK 0xffffffff -#define A5XX_SSBO_2_1_BASE_HI__SHIFT 0 -static inline uint32_t A5XX_SSBO_2_1_BASE_HI(uint32_t val) -{ - return ((val) << A5XX_SSBO_2_1_BASE_HI__SHIFT) & A5XX_SSBO_2_1_BASE_HI__MASK; -} - -#define REG_A5XX_UBO_0 0x00000000 -#define A5XX_UBO_0_BASE_LO__MASK 0xffffffff -#define A5XX_UBO_0_BASE_LO__SHIFT 0 -static inline uint32_t A5XX_UBO_0_BASE_LO(uint32_t val) -{ - return ((val) << A5XX_UBO_0_BASE_LO__SHIFT) & A5XX_UBO_0_BASE_LO__MASK; -} - -#define REG_A5XX_UBO_1 0x00000001 -#define A5XX_UBO_1_BASE_HI__MASK 0x0001ffff -#define A5XX_UBO_1_BASE_HI__SHIFT 0 -static inline uint32_t A5XX_UBO_1_BASE_HI(uint32_t val) -{ - return ((val) << A5XX_UBO_1_BASE_HI__SHIFT) & A5XX_UBO_1_BASE_HI__MASK; -} - -#ifdef __cplusplus -#endif - -#endif /* A5XX_XML */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h deleted file mode 100644 index 92e23bf245..0000000000 --- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h +++ /dev/null @@ -1,11858 +0,0 @@ -#ifndef A6XX_XML -#define A6XX_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng gen_header.py tool in this git repository: -http://gitlab.freedesktop.org/mesa/mesa/ -git clone https://gitlab.freedesktop.org/mesa/mesa.git - -The rules-ng-ng source files this header was generated from are: - -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 243381 bytes, from Sat Feb 24 09:06:40 2024) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85856 bytes, from Fri Feb 23 13:07:00 2024) - -Copyright (C) 2013-2024 by the following authors: -- Rob Clark Rob Clark -- Ilia Mirkin Ilia Mirkin - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -*/ - -#ifdef __KERNEL__ -#include -#define assert(x) BUG_ON(!(x)) -#else -#include -#endif - -#ifdef __cplusplus -#define __struct_cast(X) -#else -#define __struct_cast(X) (struct X) -#endif - -enum a6xx_tile_mode { - TILE6_LINEAR = 0, - TILE6_2 = 2, - TILE6_3 = 3, -}; - -enum a6xx_format { - FMT6_A8_UNORM = 2, - FMT6_8_UNORM = 3, - FMT6_8_SNORM = 4, - FMT6_8_UINT = 5, - FMT6_8_SINT = 6, - FMT6_4_4_4_4_UNORM = 8, - FMT6_5_5_5_1_UNORM = 10, - FMT6_1_5_5_5_UNORM = 12, - FMT6_5_6_5_UNORM = 14, - FMT6_8_8_UNORM = 15, - FMT6_8_8_SNORM = 16, - FMT6_8_8_UINT = 17, - FMT6_8_8_SINT = 18, - FMT6_L8_A8_UNORM = 19, - FMT6_16_UNORM = 21, - FMT6_16_SNORM = 22, - FMT6_16_FLOAT = 23, - FMT6_16_UINT = 24, - FMT6_16_SINT = 25, - FMT6_8_8_8_UNORM = 33, - FMT6_8_8_8_SNORM = 34, - FMT6_8_8_8_UINT = 35, - FMT6_8_8_8_SINT = 36, - FMT6_8_8_8_8_UNORM = 48, - FMT6_8_8_8_X8_UNORM = 49, - FMT6_8_8_8_8_SNORM = 50, - FMT6_8_8_8_8_UINT = 51, - FMT6_8_8_8_8_SINT = 52, - FMT6_9_9_9_E5_FLOAT = 53, - FMT6_10_10_10_2_UNORM = 54, - FMT6_10_10_10_2_UNORM_DEST = 55, - FMT6_10_10_10_2_SNORM = 57, - FMT6_10_10_10_2_UINT = 58, - FMT6_10_10_10_2_SINT = 59, - FMT6_11_11_10_FLOAT = 66, - FMT6_16_16_UNORM = 67, - FMT6_16_16_SNORM = 68, - FMT6_16_16_FLOAT = 69, - FMT6_16_16_UINT = 70, - FMT6_16_16_SINT = 71, - FMT6_32_UNORM = 72, - FMT6_32_SNORM = 73, - FMT6_32_FLOAT = 74, - FMT6_32_UINT = 75, - FMT6_32_SINT = 76, - FMT6_32_FIXED = 77, - FMT6_16_16_16_UNORM = 88, - FMT6_16_16_16_SNORM = 89, - FMT6_16_16_16_FLOAT = 90, - FMT6_16_16_16_UINT = 91, - FMT6_16_16_16_SINT = 92, - FMT6_16_16_16_16_UNORM = 96, - FMT6_16_16_16_16_SNORM = 97, - FMT6_16_16_16_16_FLOAT = 98, - FMT6_16_16_16_16_UINT = 99, - FMT6_16_16_16_16_SINT = 100, - FMT6_32_32_UNORM = 101, - FMT6_32_32_SNORM = 102, - FMT6_32_32_FLOAT = 103, - FMT6_32_32_UINT = 104, - FMT6_32_32_SINT = 105, - FMT6_32_32_FIXED = 106, - FMT6_32_32_32_UNORM = 112, - FMT6_32_32_32_SNORM = 113, - FMT6_32_32_32_UINT = 114, - FMT6_32_32_32_SINT = 115, - FMT6_32_32_32_FLOAT = 116, - FMT6_32_32_32_FIXED = 117, - FMT6_32_32_32_32_UNORM = 128, - FMT6_32_32_32_32_SNORM = 129, - FMT6_32_32_32_32_FLOAT = 130, - FMT6_32_32_32_32_UINT = 131, - FMT6_32_32_32_32_SINT = 132, - FMT6_32_32_32_32_FIXED = 133, - FMT6_G8R8B8R8_422_UNORM = 140, - FMT6_R8G8R8B8_422_UNORM = 141, - FMT6_R8_G8B8_2PLANE_420_UNORM = 142, - FMT6_NV21 = 143, - FMT6_R8_G8_B8_3PLANE_420_UNORM = 144, - FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8 = 145, - FMT6_NV12_Y = 148, - FMT6_NV12_UV = 149, - FMT6_NV12_VU = 150, - FMT6_NV12_4R = 151, - FMT6_NV12_4R_Y = 152, - FMT6_NV12_4R_UV = 153, - FMT6_P010 = 154, - FMT6_P010_Y = 155, - FMT6_P010_UV = 156, - FMT6_TP10 = 157, - FMT6_TP10_Y = 158, - FMT6_TP10_UV = 159, - FMT6_Z24_UNORM_S8_UINT = 160, - FMT6_ETC2_RG11_UNORM = 171, - FMT6_ETC2_RG11_SNORM = 172, - FMT6_ETC2_R11_UNORM = 173, - FMT6_ETC2_R11_SNORM = 174, - FMT6_ETC1 = 175, - FMT6_ETC2_RGB8 = 176, - FMT6_ETC2_RGBA8 = 177, - FMT6_ETC2_RGB8A1 = 178, - FMT6_DXT1 = 179, - FMT6_DXT3 = 180, - FMT6_DXT5 = 181, - FMT6_RGTC1_UNORM = 183, - FMT6_RGTC1_SNORM = 184, - FMT6_RGTC2_UNORM = 187, - FMT6_RGTC2_SNORM = 188, - FMT6_BPTC_UFLOAT = 190, - FMT6_BPTC_FLOAT = 191, - FMT6_BPTC = 192, - FMT6_ASTC_4x4 = 193, - FMT6_ASTC_5x4 = 194, - FMT6_ASTC_5x5 = 195, - FMT6_ASTC_6x5 = 196, - FMT6_ASTC_6x6 = 197, - FMT6_ASTC_8x5 = 198, - FMT6_ASTC_8x6 = 199, - FMT6_ASTC_8x8 = 200, - FMT6_ASTC_10x5 = 201, - FMT6_ASTC_10x6 = 202, - FMT6_ASTC_10x8 = 203, - FMT6_ASTC_10x10 = 204, - FMT6_ASTC_12x10 = 205, - FMT6_ASTC_12x12 = 206, - FMT6_Z24_UINT_S8_UINT = 234, - FMT6_NONE = 255, -}; - -enum a6xx_polygon_mode { - POLYMODE6_POINTS = 1, - POLYMODE6_LINES = 2, - POLYMODE6_TRIANGLES = 3, -}; - -enum a6xx_depth_format { - DEPTH6_NONE = 0, - DEPTH6_16 = 1, - DEPTH6_24_8 = 2, - DEPTH6_32 = 4, -}; - -enum a6xx_shader_id { - A6XX_TP0_TMO_DATA = 9, - A6XX_TP0_SMO_DATA = 10, - A6XX_TP0_MIPMAP_BASE_DATA = 11, - A6XX_TP1_TMO_DATA = 25, - A6XX_TP1_SMO_DATA = 26, - A6XX_TP1_MIPMAP_BASE_DATA = 27, - A6XX_SP_INST_DATA = 41, - A6XX_SP_LB_0_DATA = 42, - A6XX_SP_LB_1_DATA = 43, - A6XX_SP_LB_2_DATA = 44, - A6XX_SP_LB_3_DATA = 45, - A6XX_SP_LB_4_DATA = 46, - A6XX_SP_LB_5_DATA = 47, - A6XX_SP_CB_BINDLESS_DATA = 48, - A6XX_SP_CB_LEGACY_DATA = 49, - A6XX_SP_UAV_DATA = 50, - A6XX_SP_INST_TAG = 51, - A6XX_SP_CB_BINDLESS_TAG = 52, - A6XX_SP_TMO_UMO_TAG = 53, - A6XX_SP_SMO_TAG = 54, - A6XX_SP_STATE_DATA = 55, - A6XX_HLSQ_CHUNK_CVS_RAM = 73, - A6XX_HLSQ_CHUNK_CPS_RAM = 74, - A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 75, - A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 76, - A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 77, - A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 78, - A6XX_HLSQ_CVS_MISC_RAM = 80, - A6XX_HLSQ_CPS_MISC_RAM = 81, - A6XX_HLSQ_INST_RAM = 82, - A6XX_HLSQ_GFX_CVS_CONST_RAM = 83, - A6XX_HLSQ_GFX_CPS_CONST_RAM = 84, - A6XX_HLSQ_CVS_MISC_RAM_TAG = 85, - A6XX_HLSQ_CPS_MISC_RAM_TAG = 86, - A6XX_HLSQ_INST_RAM_TAG = 87, - A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 88, - A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 89, - A6XX_HLSQ_PWR_REST_RAM = 90, - A6XX_HLSQ_PWR_REST_TAG = 91, - A6XX_HLSQ_DATAPATH_META = 96, - A6XX_HLSQ_FRONTEND_META = 97, - A6XX_HLSQ_INDIRECT_META = 98, - A6XX_HLSQ_BACKEND_META = 99, - A6XX_SP_LB_6_DATA = 112, - A6XX_SP_LB_7_DATA = 113, - A6XX_HLSQ_INST_RAM_1 = 115, -}; - -enum a7xx_statetype_id { - A7XX_TP0_NCTX_REG = 0, - A7XX_TP0_CTX0_3D_CVS_REG = 1, - A7XX_TP0_CTX0_3D_CPS_REG = 2, - A7XX_TP0_CTX1_3D_CVS_REG = 3, - A7XX_TP0_CTX1_3D_CPS_REG = 4, - A7XX_TP0_CTX2_3D_CPS_REG = 5, - A7XX_TP0_CTX3_3D_CPS_REG = 6, - A7XX_TP0_TMO_DATA = 9, - A7XX_TP0_SMO_DATA = 10, - A7XX_TP0_MIPMAP_BASE_DATA = 11, - A7XX_SP_NCTX_REG = 32, - A7XX_SP_CTX0_3D_CVS_REG = 33, - A7XX_SP_CTX0_3D_CPS_REG = 34, - A7XX_SP_CTX1_3D_CVS_REG = 35, - A7XX_SP_CTX1_3D_CPS_REG = 36, - A7XX_SP_CTX2_3D_CPS_REG = 37, - A7XX_SP_CTX3_3D_CPS_REG = 38, - A7XX_SP_INST_DATA = 39, - A7XX_SP_INST_DATA_1 = 40, - A7XX_SP_LB_0_DATA = 41, - A7XX_SP_LB_1_DATA = 42, - A7XX_SP_LB_2_DATA = 43, - A7XX_SP_LB_3_DATA = 44, - A7XX_SP_LB_4_DATA = 45, - A7XX_SP_LB_5_DATA = 46, - A7XX_SP_LB_6_DATA = 47, - A7XX_SP_LB_7_DATA = 48, - A7XX_SP_CB_RAM = 49, - A7XX_SP_LB_13_DATA = 50, - A7XX_SP_LB_14_DATA = 51, - A7XX_SP_INST_TAG = 52, - A7XX_SP_INST_DATA_2 = 53, - A7XX_SP_TMO_TAG = 54, - A7XX_SP_SMO_TAG = 55, - A7XX_SP_STATE_DATA = 56, - A7XX_SP_HWAVE_RAM = 57, - A7XX_SP_L0_INST_BUF = 58, - A7XX_SP_LB_8_DATA = 59, - A7XX_SP_LB_9_DATA = 60, - A7XX_SP_LB_10_DATA = 61, - A7XX_SP_LB_11_DATA = 62, - A7XX_SP_LB_12_DATA = 63, - A7XX_HLSQ_DATAPATH_DSTR_META = 64, - A7XX_HLSQ_L2STC_TAG_RAM = 67, - A7XX_HLSQ_L2STC_INFO_CMD = 68, - A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG = 69, - A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG = 70, - A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM = 71, - A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM = 72, - A7XX_HLSQ_CHUNK_CVS_RAM = 73, - A7XX_HLSQ_CHUNK_CPS_RAM = 74, - A7XX_HLSQ_CHUNK_CVS_RAM_TAG = 75, - A7XX_HLSQ_CHUNK_CPS_RAM_TAG = 76, - A7XX_HLSQ_ICB_CVS_CB_BASE_TAG = 77, - A7XX_HLSQ_ICB_CPS_CB_BASE_TAG = 78, - A7XX_HLSQ_CVS_MISC_RAM = 79, - A7XX_HLSQ_CPS_MISC_RAM = 80, - A7XX_HLSQ_CPS_MISC_RAM_1 = 81, - A7XX_HLSQ_INST_RAM = 82, - A7XX_HLSQ_GFX_CVS_CONST_RAM = 83, - A7XX_HLSQ_GFX_CPS_CONST_RAM = 84, - A7XX_HLSQ_CVS_MISC_RAM_TAG = 85, - A7XX_HLSQ_CPS_MISC_RAM_TAG = 86, - A7XX_HLSQ_INST_RAM_TAG = 87, - A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 88, - A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 89, - A7XX_HLSQ_GFX_LOCAL_MISC_RAM = 90, - A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG = 91, - A7XX_HLSQ_INST_RAM_1 = 92, - A7XX_HLSQ_STPROC_META = 93, - A7XX_HLSQ_BV_BE_META = 94, - A7XX_HLSQ_INST_RAM_2 = 95, - A7XX_HLSQ_DATAPATH_META = 96, - A7XX_HLSQ_FRONTEND_META = 97, - A7XX_HLSQ_INDIRECT_META = 98, - A7XX_HLSQ_BACKEND_META = 99, -}; - -enum a6xx_debugbus_id { - A6XX_DBGBUS_CP = 1, - A6XX_DBGBUS_RBBM = 2, - A6XX_DBGBUS_VBIF = 3, - A6XX_DBGBUS_HLSQ = 4, - A6XX_DBGBUS_UCHE = 5, - A6XX_DBGBUS_DPM = 6, - A6XX_DBGBUS_TESS = 7, - A6XX_DBGBUS_PC = 8, - A6XX_DBGBUS_VFDP = 9, - A6XX_DBGBUS_VPC = 10, - A6XX_DBGBUS_TSE = 11, - A6XX_DBGBUS_RAS = 12, - A6XX_DBGBUS_VSC = 13, - A6XX_DBGBUS_COM = 14, - A6XX_DBGBUS_LRZ = 16, - A6XX_DBGBUS_A2D = 17, - A6XX_DBGBUS_CCUFCHE = 18, - A6XX_DBGBUS_GMU_CX = 19, - A6XX_DBGBUS_RBP = 20, - A6XX_DBGBUS_DCS = 21, - A6XX_DBGBUS_DBGC = 22, - A6XX_DBGBUS_CX = 23, - A6XX_DBGBUS_GMU_GX = 24, - A6XX_DBGBUS_TPFCHE = 25, - A6XX_DBGBUS_GBIF_GX = 26, - A6XX_DBGBUS_GPC = 29, - A6XX_DBGBUS_LARC = 30, - A6XX_DBGBUS_HLSQ_SPTP = 31, - A6XX_DBGBUS_RB_0 = 32, - A6XX_DBGBUS_RB_1 = 33, - A6XX_DBGBUS_RB_2 = 34, - A6XX_DBGBUS_UCHE_WRAPPER = 36, - A6XX_DBGBUS_CCU_0 = 40, - A6XX_DBGBUS_CCU_1 = 41, - A6XX_DBGBUS_CCU_2 = 42, - A6XX_DBGBUS_VFD_0 = 56, - A6XX_DBGBUS_VFD_1 = 57, - A6XX_DBGBUS_VFD_2 = 58, - A6XX_DBGBUS_VFD_3 = 59, - A6XX_DBGBUS_VFD_4 = 60, - A6XX_DBGBUS_VFD_5 = 61, - A6XX_DBGBUS_SP_0 = 64, - A6XX_DBGBUS_SP_1 = 65, - A6XX_DBGBUS_SP_2 = 66, - A6XX_DBGBUS_TPL1_0 = 72, - A6XX_DBGBUS_TPL1_1 = 73, - A6XX_DBGBUS_TPL1_2 = 74, - A6XX_DBGBUS_TPL1_3 = 75, - A6XX_DBGBUS_TPL1_4 = 76, - A6XX_DBGBUS_TPL1_5 = 77, - A6XX_DBGBUS_SPTP_0 = 88, - A6XX_DBGBUS_SPTP_1 = 89, - A6XX_DBGBUS_SPTP_2 = 90, - A6XX_DBGBUS_SPTP_3 = 91, - A6XX_DBGBUS_SPTP_4 = 92, - A6XX_DBGBUS_SPTP_5 = 93, -}; - -enum a7xx_state_location { - A7XX_HLSQ_STATE = 0, - A7XX_HLSQ_DP = 1, - A7XX_SP_TOP = 2, - A7XX_USPTP = 3, -}; - -enum a7xx_pipe { - A7XX_PIPE_NONE = 0, - A7XX_PIPE_BR = 1, - A7XX_PIPE_BV = 2, - A7XX_PIPE_LPAC = 3, -}; - -enum a7xx_cluster { - A7XX_CLUSTER_NONE = 0, - A7XX_CLUSTER_FE = 1, - A7XX_CLUSTER_SP_VS = 2, - A7XX_CLUSTER_PC_VS = 3, - A7XX_CLUSTER_GRAS = 4, - A7XX_CLUSTER_SP_PS = 5, - A7XX_CLUSTER_VPC_PS = 6, - A7XX_CLUSTER_PS = 7, -}; - -enum a7xx_debugbus_id { - A7XX_DBGBUS_CP_0_0 = 1, - A7XX_DBGBUS_CP_0_1 = 2, - A7XX_DBGBUS_RBBM = 3, - A7XX_DBGBUS_GBIF_GX = 5, - A7XX_DBGBUS_GBIF_CX = 6, - A7XX_DBGBUS_HLSQ = 7, - A7XX_DBGBUS_UCHE_0 = 9, - A7XX_DBGBUS_UCHE_1 = 10, - A7XX_DBGBUS_TESS_BR = 13, - A7XX_DBGBUS_TESS_BV = 14, - A7XX_DBGBUS_PC_BR = 17, - A7XX_DBGBUS_PC_BV = 18, - A7XX_DBGBUS_VFDP_BR = 21, - A7XX_DBGBUS_VFDP_BV = 22, - A7XX_DBGBUS_VPC_BR = 25, - A7XX_DBGBUS_VPC_BV = 26, - A7XX_DBGBUS_TSE_BR = 29, - A7XX_DBGBUS_TSE_BV = 30, - A7XX_DBGBUS_RAS_BR = 33, - A7XX_DBGBUS_RAS_BV = 34, - A7XX_DBGBUS_VSC = 37, - A7XX_DBGBUS_COM_0 = 39, - A7XX_DBGBUS_LRZ_BR = 43, - A7XX_DBGBUS_LRZ_BV = 44, - A7XX_DBGBUS_UFC_0 = 47, - A7XX_DBGBUS_UFC_1 = 48, - A7XX_DBGBUS_GMU_GX = 55, - A7XX_DBGBUS_DBGC = 59, - A7XX_DBGBUS_CX = 60, - A7XX_DBGBUS_GMU_CX = 61, - A7XX_DBGBUS_GPC_BR = 62, - A7XX_DBGBUS_GPC_BV = 63, - A7XX_DBGBUS_LARC = 66, - A7XX_DBGBUS_HLSQ_SPTP = 68, - A7XX_DBGBUS_RB_0 = 70, - A7XX_DBGBUS_RB_1 = 71, - A7XX_DBGBUS_RB_2 = 72, - A7XX_DBGBUS_RB_3 = 73, - A7XX_DBGBUS_RB_4 = 74, - A7XX_DBGBUS_RB_5 = 75, - A7XX_DBGBUS_UCHE_WRAPPER = 102, - A7XX_DBGBUS_CCU_0 = 106, - A7XX_DBGBUS_CCU_1 = 107, - A7XX_DBGBUS_CCU_2 = 108, - A7XX_DBGBUS_CCU_3 = 109, - A7XX_DBGBUS_CCU_4 = 110, - A7XX_DBGBUS_CCU_5 = 111, - A7XX_DBGBUS_VFD_BR_0 = 138, - A7XX_DBGBUS_VFD_BR_1 = 139, - A7XX_DBGBUS_VFD_BR_2 = 140, - A7XX_DBGBUS_VFD_BR_3 = 141, - A7XX_DBGBUS_VFD_BR_4 = 142, - A7XX_DBGBUS_VFD_BR_5 = 143, - A7XX_DBGBUS_VFD_BR_6 = 144, - A7XX_DBGBUS_VFD_BR_7 = 145, - A7XX_DBGBUS_VFD_BV_0 = 202, - A7XX_DBGBUS_VFD_BV_1 = 203, - A7XX_DBGBUS_VFD_BV_2 = 204, - A7XX_DBGBUS_VFD_BV_3 = 205, - A7XX_DBGBUS_USP_0 = 234, - A7XX_DBGBUS_USP_1 = 235, - A7XX_DBGBUS_USP_2 = 236, - A7XX_DBGBUS_USP_3 = 237, - A7XX_DBGBUS_USP_4 = 238, - A7XX_DBGBUS_USP_5 = 239, - A7XX_DBGBUS_TP_0 = 266, - A7XX_DBGBUS_TP_1 = 267, - A7XX_DBGBUS_TP_2 = 268, - A7XX_DBGBUS_TP_3 = 269, - A7XX_DBGBUS_TP_4 = 270, - A7XX_DBGBUS_TP_5 = 271, - A7XX_DBGBUS_TP_6 = 272, - A7XX_DBGBUS_TP_7 = 273, - A7XX_DBGBUS_TP_8 = 274, - A7XX_DBGBUS_TP_9 = 275, - A7XX_DBGBUS_TP_10 = 276, - A7XX_DBGBUS_TP_11 = 277, - A7XX_DBGBUS_USPTP_0 = 330, - A7XX_DBGBUS_USPTP_1 = 331, - A7XX_DBGBUS_USPTP_2 = 332, - A7XX_DBGBUS_USPTP_3 = 333, - A7XX_DBGBUS_USPTP_4 = 334, - A7XX_DBGBUS_USPTP_5 = 335, - A7XX_DBGBUS_USPTP_6 = 336, - A7XX_DBGBUS_USPTP_7 = 337, - A7XX_DBGBUS_USPTP_8 = 338, - A7XX_DBGBUS_USPTP_9 = 339, - A7XX_DBGBUS_USPTP_10 = 340, - A7XX_DBGBUS_USPTP_11 = 341, - A7XX_DBGBUS_CCHE_0 = 396, - A7XX_DBGBUS_CCHE_1 = 397, - A7XX_DBGBUS_CCHE_2 = 398, - A7XX_DBGBUS_VPC_DSTR_0 = 408, - A7XX_DBGBUS_VPC_DSTR_1 = 409, - A7XX_DBGBUS_VPC_DSTR_2 = 410, - A7XX_DBGBUS_HLSQ_DP_STR_0 = 411, - A7XX_DBGBUS_HLSQ_DP_STR_1 = 412, - A7XX_DBGBUS_HLSQ_DP_STR_2 = 413, - A7XX_DBGBUS_HLSQ_DP_STR_3 = 414, - A7XX_DBGBUS_HLSQ_DP_STR_4 = 415, - A7XX_DBGBUS_HLSQ_DP_STR_5 = 416, - A7XX_DBGBUS_UFC_DSTR_0 = 443, - A7XX_DBGBUS_UFC_DSTR_1 = 444, - A7XX_DBGBUS_UFC_DSTR_2 = 445, - A7XX_DBGBUS_CGC_SUBCORE = 446, - A7XX_DBGBUS_CGC_CORE = 447, -}; - -enum a6xx_cp_perfcounter_select { - PERF_CP_ALWAYS_COUNT = 0, - PERF_CP_BUSY_GFX_CORE_IDLE = 1, - PERF_CP_BUSY_CYCLES = 2, - PERF_CP_NUM_PREEMPTIONS = 3, - PERF_CP_PREEMPTION_REACTION_DELAY = 4, - PERF_CP_PREEMPTION_SWITCH_OUT_TIME = 5, - PERF_CP_PREEMPTION_SWITCH_IN_TIME = 6, - PERF_CP_DEAD_DRAWS_IN_BIN_RENDER = 7, - PERF_CP_PREDICATED_DRAWS_KILLED = 8, - PERF_CP_MODE_SWITCH = 9, - PERF_CP_ZPASS_DONE = 10, - PERF_CP_CONTEXT_DONE = 11, - PERF_CP_CACHE_FLUSH = 12, - PERF_CP_LONG_PREEMPTIONS = 13, - PERF_CP_SQE_I_CACHE_STARVE = 14, - PERF_CP_SQE_IDLE = 15, - PERF_CP_SQE_PM4_STARVE_RB_IB = 16, - PERF_CP_SQE_PM4_STARVE_SDS = 17, - PERF_CP_SQE_MRB_STARVE = 18, - PERF_CP_SQE_RRB_STARVE = 19, - PERF_CP_SQE_VSD_STARVE = 20, - PERF_CP_VSD_DECODE_STARVE = 21, - PERF_CP_SQE_PIPE_OUT_STALL = 22, - PERF_CP_SQE_SYNC_STALL = 23, - PERF_CP_SQE_PM4_WFI_STALL = 24, - PERF_CP_SQE_SYS_WFI_STALL = 25, - PERF_CP_SQE_T4_EXEC = 26, - PERF_CP_SQE_LOAD_STATE_EXEC = 27, - PERF_CP_SQE_SAVE_SDS_STATE = 28, - PERF_CP_SQE_DRAW_EXEC = 29, - PERF_CP_SQE_CTXT_REG_BUNCH_EXEC = 30, - PERF_CP_SQE_EXEC_PROFILED = 31, - PERF_CP_MEMORY_POOL_EMPTY = 32, - PERF_CP_MEMORY_POOL_SYNC_STALL = 33, - PERF_CP_MEMORY_POOL_ABOVE_THRESH = 34, - PERF_CP_AHB_WR_STALL_PRE_DRAWS = 35, - PERF_CP_AHB_STALL_SQE_GMU = 36, - PERF_CP_AHB_STALL_SQE_WR_OTHER = 37, - PERF_CP_AHB_STALL_SQE_RD_OTHER = 38, - PERF_CP_CLUSTER0_EMPTY = 39, - PERF_CP_CLUSTER1_EMPTY = 40, - PERF_CP_CLUSTER2_EMPTY = 41, - PERF_CP_CLUSTER3_EMPTY = 42, - PERF_CP_CLUSTER4_EMPTY = 43, - PERF_CP_CLUSTER5_EMPTY = 44, - PERF_CP_PM4_DATA = 45, - PERF_CP_PM4_HEADERS = 46, - PERF_CP_VBIF_READ_BEATS = 47, - PERF_CP_VBIF_WRITE_BEATS = 48, - PERF_CP_SQE_INSTR_COUNTER = 49, -}; - -enum a6xx_rbbm_perfcounter_select { - PERF_RBBM_ALWAYS_COUNT = 0, - PERF_RBBM_ALWAYS_ON = 1, - PERF_RBBM_TSE_BUSY = 2, - PERF_RBBM_RAS_BUSY = 3, - PERF_RBBM_PC_DCALL_BUSY = 4, - PERF_RBBM_PC_VSD_BUSY = 5, - PERF_RBBM_STATUS_MASKED = 6, - PERF_RBBM_COM_BUSY = 7, - PERF_RBBM_DCOM_BUSY = 8, - PERF_RBBM_VBIF_BUSY = 9, - PERF_RBBM_VSC_BUSY = 10, - PERF_RBBM_TESS_BUSY = 11, - PERF_RBBM_UCHE_BUSY = 12, - PERF_RBBM_HLSQ_BUSY = 13, -}; - -enum a6xx_pc_perfcounter_select { - PERF_PC_BUSY_CYCLES = 0, - PERF_PC_WORKING_CYCLES = 1, - PERF_PC_STALL_CYCLES_VFD = 2, - PERF_PC_STALL_CYCLES_TSE = 3, - PERF_PC_STALL_CYCLES_VPC = 4, - PERF_PC_STALL_CYCLES_UCHE = 5, - PERF_PC_STALL_CYCLES_TESS = 6, - PERF_PC_STALL_CYCLES_TSE_ONLY = 7, - PERF_PC_STALL_CYCLES_VPC_ONLY = 8, - PERF_PC_PASS1_TF_STALL_CYCLES = 9, - PERF_PC_STARVE_CYCLES_FOR_INDEX = 10, - PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR = 11, - PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM = 12, - PERF_PC_STARVE_CYCLES_FOR_POSITION = 13, - PERF_PC_STARVE_CYCLES_DI = 14, - PERF_PC_VIS_STREAMS_LOADED = 15, - PERF_PC_INSTANCES = 16, - PERF_PC_VPC_PRIMITIVES = 17, - PERF_PC_DEAD_PRIM = 18, - PERF_PC_LIVE_PRIM = 19, - PERF_PC_VERTEX_HITS = 20, - PERF_PC_IA_VERTICES = 21, - PERF_PC_IA_PRIMITIVES = 22, - PERF_PC_GS_PRIMITIVES = 23, - PERF_PC_HS_INVOCATIONS = 24, - PERF_PC_DS_INVOCATIONS = 25, - PERF_PC_VS_INVOCATIONS = 26, - PERF_PC_GS_INVOCATIONS = 27, - PERF_PC_DS_PRIMITIVES = 28, - PERF_PC_VPC_POS_DATA_TRANSACTION = 29, - PERF_PC_3D_DRAWCALLS = 30, - PERF_PC_2D_DRAWCALLS = 31, - PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS = 32, - PERF_TESS_BUSY_CYCLES = 33, - PERF_TESS_WORKING_CYCLES = 34, - PERF_TESS_STALL_CYCLES_PC = 35, - PERF_TESS_STARVE_CYCLES_PC = 36, - PERF_PC_TSE_TRANSACTION = 37, - PERF_PC_TSE_VERTEX = 38, - PERF_PC_TESS_PC_UV_TRANS = 39, - PERF_PC_TESS_PC_UV_PATCHES = 40, - PERF_PC_TESS_FACTOR_TRANS = 41, -}; - -enum a6xx_vfd_perfcounter_select { - PERF_VFD_BUSY_CYCLES = 0, - PERF_VFD_STALL_CYCLES_UCHE = 1, - PERF_VFD_STALL_CYCLES_VPC_ALLOC = 2, - PERF_VFD_STALL_CYCLES_SP_INFO = 3, - PERF_VFD_STALL_CYCLES_SP_ATTR = 4, - PERF_VFD_STARVE_CYCLES_UCHE = 5, - PERF_VFD_RBUFFER_FULL = 6, - PERF_VFD_ATTR_INFO_FIFO_FULL = 7, - PERF_VFD_DECODED_ATTRIBUTE_BYTES = 8, - PERF_VFD_NUM_ATTRIBUTES = 9, - PERF_VFD_UPPER_SHADER_FIBERS = 10, - PERF_VFD_LOWER_SHADER_FIBERS = 11, - PERF_VFD_MODE_0_FIBERS = 12, - PERF_VFD_MODE_1_FIBERS = 13, - PERF_VFD_MODE_2_FIBERS = 14, - PERF_VFD_MODE_3_FIBERS = 15, - PERF_VFD_MODE_4_FIBERS = 16, - PERF_VFD_TOTAL_VERTICES = 17, - PERF_VFDP_STALL_CYCLES_VFD = 18, - PERF_VFDP_STALL_CYCLES_VFD_INDEX = 19, - PERF_VFDP_STALL_CYCLES_VFD_PROG = 20, - PERF_VFDP_STARVE_CYCLES_PC = 21, - PERF_VFDP_VS_STAGE_WAVES = 22, -}; - -enum a6xx_hlsq_perfcounter_select { - PERF_HLSQ_BUSY_CYCLES = 0, - PERF_HLSQ_STALL_CYCLES_UCHE = 1, - PERF_HLSQ_STALL_CYCLES_SP_STATE = 2, - PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE = 3, - PERF_HLSQ_UCHE_LATENCY_CYCLES = 4, - PERF_HLSQ_UCHE_LATENCY_COUNT = 5, - PERF_HLSQ_FS_STAGE_1X_WAVES = 6, - PERF_HLSQ_FS_STAGE_2X_WAVES = 7, - PERF_HLSQ_QUADS = 8, - PERF_HLSQ_CS_INVOCATIONS = 9, - PERF_HLSQ_COMPUTE_DRAWCALLS = 10, - PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING = 11, - PERF_HLSQ_DUAL_FS_PROG_ACTIVE = 12, - PERF_HLSQ_DUAL_VS_PROG_ACTIVE = 13, - PERF_HLSQ_FS_BATCH_COUNT_ZERO = 14, - PERF_HLSQ_VS_BATCH_COUNT_ZERO = 15, - PERF_HLSQ_WAVE_PENDING_NO_QUAD = 16, - PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE = 17, - PERF_HLSQ_STALL_CYCLES_VPC = 18, - PERF_HLSQ_PIXELS = 19, - PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC = 20, -}; - -enum a6xx_vpc_perfcounter_select { - PERF_VPC_BUSY_CYCLES = 0, - PERF_VPC_WORKING_CYCLES = 1, - PERF_VPC_STALL_CYCLES_UCHE = 2, - PERF_VPC_STALL_CYCLES_VFD_WACK = 3, - PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC = 4, - PERF_VPC_STALL_CYCLES_PC = 5, - PERF_VPC_STALL_CYCLES_SP_LM = 6, - PERF_VPC_STARVE_CYCLES_SP = 7, - PERF_VPC_STARVE_CYCLES_LRZ = 8, - PERF_VPC_PC_PRIMITIVES = 9, - PERF_VPC_SP_COMPONENTS = 10, - PERF_VPC_STALL_CYCLES_VPCRAM_POS = 11, - PERF_VPC_LRZ_ASSIGN_PRIMITIVES = 12, - PERF_VPC_RB_VISIBLE_PRIMITIVES = 13, - PERF_VPC_LM_TRANSACTION = 14, - PERF_VPC_STREAMOUT_TRANSACTION = 15, - PERF_VPC_VS_BUSY_CYCLES = 16, - PERF_VPC_PS_BUSY_CYCLES = 17, - PERF_VPC_VS_WORKING_CYCLES = 18, - PERF_VPC_PS_WORKING_CYCLES = 19, - PERF_VPC_STARVE_CYCLES_RB = 20, - PERF_VPC_NUM_VPCRAM_READ_POS = 21, - PERF_VPC_WIT_FULL_CYCLES = 22, - PERF_VPC_VPCRAM_FULL_CYCLES = 23, - PERF_VPC_LM_FULL_WAIT_FOR_INTP_END = 24, - PERF_VPC_NUM_VPCRAM_WRITE = 25, - PERF_VPC_NUM_VPCRAM_READ_SO = 26, - PERF_VPC_NUM_ATTR_REQ_LM = 27, -}; - -enum a6xx_tse_perfcounter_select { - PERF_TSE_BUSY_CYCLES = 0, - PERF_TSE_CLIPPING_CYCLES = 1, - PERF_TSE_STALL_CYCLES_RAS = 2, - PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE = 3, - PERF_TSE_STALL_CYCLES_LRZ_ZPLANE = 4, - PERF_TSE_STARVE_CYCLES_PC = 5, - PERF_TSE_INPUT_PRIM = 6, - PERF_TSE_INPUT_NULL_PRIM = 7, - PERF_TSE_TRIVAL_REJ_PRIM = 8, - PERF_TSE_CLIPPED_PRIM = 9, - PERF_TSE_ZERO_AREA_PRIM = 10, - PERF_TSE_FACENESS_CULLED_PRIM = 11, - PERF_TSE_ZERO_PIXEL_PRIM = 12, - PERF_TSE_OUTPUT_NULL_PRIM = 13, - PERF_TSE_OUTPUT_VISIBLE_PRIM = 14, - PERF_TSE_CINVOCATION = 15, - PERF_TSE_CPRIMITIVES = 16, - PERF_TSE_2D_INPUT_PRIM = 17, - PERF_TSE_2D_ALIVE_CYCLES = 18, - PERF_TSE_CLIP_PLANES = 19, -}; - -enum a6xx_ras_perfcounter_select { - PERF_RAS_BUSY_CYCLES = 0, - PERF_RAS_SUPERTILE_ACTIVE_CYCLES = 1, - PERF_RAS_STALL_CYCLES_LRZ = 2, - PERF_RAS_STARVE_CYCLES_TSE = 3, - PERF_RAS_SUPER_TILES = 4, - PERF_RAS_8X4_TILES = 5, - PERF_RAS_MASKGEN_ACTIVE = 6, - PERF_RAS_FULLY_COVERED_SUPER_TILES = 7, - PERF_RAS_FULLY_COVERED_8X4_TILES = 8, - PERF_RAS_PRIM_KILLED_INVISILBE = 9, - PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES = 10, - PERF_RAS_LRZ_INTF_WORKING_CYCLES = 11, - PERF_RAS_BLOCKS = 12, -}; - -enum a6xx_uche_perfcounter_select { - PERF_UCHE_BUSY_CYCLES = 0, - PERF_UCHE_STALL_CYCLES_ARBITER = 1, - PERF_UCHE_VBIF_LATENCY_CYCLES = 2, - PERF_UCHE_VBIF_LATENCY_SAMPLES = 3, - PERF_UCHE_VBIF_READ_BEATS_TP = 4, - PERF_UCHE_VBIF_READ_BEATS_VFD = 5, - PERF_UCHE_VBIF_READ_BEATS_HLSQ = 6, - PERF_UCHE_VBIF_READ_BEATS_LRZ = 7, - PERF_UCHE_VBIF_READ_BEATS_SP = 8, - PERF_UCHE_READ_REQUESTS_TP = 9, - PERF_UCHE_READ_REQUESTS_VFD = 10, - PERF_UCHE_READ_REQUESTS_HLSQ = 11, - PERF_UCHE_READ_REQUESTS_LRZ = 12, - PERF_UCHE_READ_REQUESTS_SP = 13, - PERF_UCHE_WRITE_REQUESTS_LRZ = 14, - PERF_UCHE_WRITE_REQUESTS_SP = 15, - PERF_UCHE_WRITE_REQUESTS_VPC = 16, - PERF_UCHE_WRITE_REQUESTS_VSC = 17, - PERF_UCHE_EVICTS = 18, - PERF_UCHE_BANK_REQ0 = 19, - PERF_UCHE_BANK_REQ1 = 20, - PERF_UCHE_BANK_REQ2 = 21, - PERF_UCHE_BANK_REQ3 = 22, - PERF_UCHE_BANK_REQ4 = 23, - PERF_UCHE_BANK_REQ5 = 24, - PERF_UCHE_BANK_REQ6 = 25, - PERF_UCHE_BANK_REQ7 = 26, - PERF_UCHE_VBIF_READ_BEATS_CH0 = 27, - PERF_UCHE_VBIF_READ_BEATS_CH1 = 28, - PERF_UCHE_GMEM_READ_BEATS = 29, - PERF_UCHE_TPH_REF_FULL = 30, - PERF_UCHE_TPH_VICTIM_FULL = 31, - PERF_UCHE_TPH_EXT_FULL = 32, - PERF_UCHE_VBIF_STALL_WRITE_DATA = 33, - PERF_UCHE_DCMP_LATENCY_SAMPLES = 34, - PERF_UCHE_DCMP_LATENCY_CYCLES = 35, - PERF_UCHE_VBIF_READ_BEATS_PC = 36, - PERF_UCHE_READ_REQUESTS_PC = 37, - PERF_UCHE_RAM_READ_REQ = 38, - PERF_UCHE_RAM_WRITE_REQ = 39, -}; - -enum a6xx_tp_perfcounter_select { - PERF_TP_BUSY_CYCLES = 0, - PERF_TP_STALL_CYCLES_UCHE = 1, - PERF_TP_LATENCY_CYCLES = 2, - PERF_TP_LATENCY_TRANS = 3, - PERF_TP_FLAG_CACHE_REQUEST_SAMPLES = 4, - PERF_TP_FLAG_CACHE_REQUEST_LATENCY = 5, - PERF_TP_L1_CACHELINE_REQUESTS = 6, - PERF_TP_L1_CACHELINE_MISSES = 7, - PERF_TP_SP_TP_TRANS = 8, - PERF_TP_TP_SP_TRANS = 9, - PERF_TP_OUTPUT_PIXELS = 10, - PERF_TP_FILTER_WORKLOAD_16BIT = 11, - PERF_TP_FILTER_WORKLOAD_32BIT = 12, - PERF_TP_QUADS_RECEIVED = 13, - PERF_TP_QUADS_OFFSET = 14, - PERF_TP_QUADS_SHADOW = 15, - PERF_TP_QUADS_ARRAY = 16, - PERF_TP_QUADS_GRADIENT = 17, - PERF_TP_QUADS_1D = 18, - PERF_TP_QUADS_2D = 19, - PERF_TP_QUADS_BUFFER = 20, - PERF_TP_QUADS_3D = 21, - PERF_TP_QUADS_CUBE = 22, - PERF_TP_DIVERGENT_QUADS_RECEIVED = 23, - PERF_TP_PRT_NON_RESIDENT_EVENTS = 24, - PERF_TP_OUTPUT_PIXELS_POINT = 25, - PERF_TP_OUTPUT_PIXELS_BILINEAR = 26, - PERF_TP_OUTPUT_PIXELS_MIP = 27, - PERF_TP_OUTPUT_PIXELS_ANISO = 28, - PERF_TP_OUTPUT_PIXELS_ZERO_LOD = 29, - PERF_TP_FLAG_CACHE_REQUESTS = 30, - PERF_TP_FLAG_CACHE_MISSES = 31, - PERF_TP_L1_5_L2_REQUESTS = 32, - PERF_TP_2D_OUTPUT_PIXELS = 33, - PERF_TP_2D_OUTPUT_PIXELS_POINT = 34, - PERF_TP_2D_OUTPUT_PIXELS_BILINEAR = 35, - PERF_TP_2D_FILTER_WORKLOAD_16BIT = 36, - PERF_TP_2D_FILTER_WORKLOAD_32BIT = 37, - PERF_TP_TPA2TPC_TRANS = 38, - PERF_TP_L1_MISSES_ASTC_1TILE = 39, - PERF_TP_L1_MISSES_ASTC_2TILE = 40, - PERF_TP_L1_MISSES_ASTC_4TILE = 41, - PERF_TP_L1_5_L2_COMPRESS_REQS = 42, - PERF_TP_L1_5_L2_COMPRESS_MISS = 43, - PERF_TP_L1_BANK_CONFLICT = 44, - PERF_TP_L1_5_MISS_LATENCY_CYCLES = 45, - PERF_TP_L1_5_MISS_LATENCY_TRANS = 46, - PERF_TP_QUADS_CONSTANT_MULTIPLIED = 47, - PERF_TP_FRONTEND_WORKING_CYCLES = 48, - PERF_TP_L1_TAG_WORKING_CYCLES = 49, - PERF_TP_L1_DATA_WRITE_WORKING_CYCLES = 50, - PERF_TP_PRE_L1_DECOM_WORKING_CYCLES = 51, - PERF_TP_BACKEND_WORKING_CYCLES = 52, - PERF_TP_FLAG_CACHE_WORKING_CYCLES = 53, - PERF_TP_L1_5_CACHE_WORKING_CYCLES = 54, - PERF_TP_STARVE_CYCLES_SP = 55, - PERF_TP_STARVE_CYCLES_UCHE = 56, -}; - -enum a6xx_sp_perfcounter_select { - PERF_SP_BUSY_CYCLES = 0, - PERF_SP_ALU_WORKING_CYCLES = 1, - PERF_SP_EFU_WORKING_CYCLES = 2, - PERF_SP_STALL_CYCLES_VPC = 3, - PERF_SP_STALL_CYCLES_TP = 4, - PERF_SP_STALL_CYCLES_UCHE = 5, - PERF_SP_STALL_CYCLES_RB = 6, - PERF_SP_NON_EXECUTION_CYCLES = 7, - PERF_SP_WAVE_CONTEXTS = 8, - PERF_SP_WAVE_CONTEXT_CYCLES = 9, - PERF_SP_FS_STAGE_WAVE_CYCLES = 10, - PERF_SP_FS_STAGE_WAVE_SAMPLES = 11, - PERF_SP_VS_STAGE_WAVE_CYCLES = 12, - PERF_SP_VS_STAGE_WAVE_SAMPLES = 13, - PERF_SP_FS_STAGE_DURATION_CYCLES = 14, - PERF_SP_VS_STAGE_DURATION_CYCLES = 15, - PERF_SP_WAVE_CTRL_CYCLES = 16, - PERF_SP_WAVE_LOAD_CYCLES = 17, - PERF_SP_WAVE_EMIT_CYCLES = 18, - PERF_SP_WAVE_NOP_CYCLES = 19, - PERF_SP_WAVE_WAIT_CYCLES = 20, - PERF_SP_WAVE_FETCH_CYCLES = 21, - PERF_SP_WAVE_IDLE_CYCLES = 22, - PERF_SP_WAVE_END_CYCLES = 23, - PERF_SP_WAVE_LONG_SYNC_CYCLES = 24, - PERF_SP_WAVE_SHORT_SYNC_CYCLES = 25, - PERF_SP_WAVE_JOIN_CYCLES = 26, - PERF_SP_LM_LOAD_INSTRUCTIONS = 27, - PERF_SP_LM_STORE_INSTRUCTIONS = 28, - PERF_SP_LM_ATOMICS = 29, - PERF_SP_GM_LOAD_INSTRUCTIONS = 30, - PERF_SP_GM_STORE_INSTRUCTIONS = 31, - PERF_SP_GM_ATOMICS = 32, - PERF_SP_VS_STAGE_TEX_INSTRUCTIONS = 33, - PERF_SP_VS_STAGE_EFU_INSTRUCTIONS = 34, - PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 35, - PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 36, - PERF_SP_FS_STAGE_TEX_INSTRUCTIONS = 37, - PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS = 38, - PERF_SP_FS_STAGE_EFU_INSTRUCTIONS = 39, - PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 40, - PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 41, - PERF_SP_FS_STAGE_BARY_INSTRUCTIONS = 42, - PERF_SP_VS_INSTRUCTIONS = 43, - PERF_SP_FS_INSTRUCTIONS = 44, - PERF_SP_ADDR_LOCK_COUNT = 45, - PERF_SP_UCHE_READ_TRANS = 46, - PERF_SP_UCHE_WRITE_TRANS = 47, - PERF_SP_EXPORT_VPC_TRANS = 48, - PERF_SP_EXPORT_RB_TRANS = 49, - PERF_SP_PIXELS_KILLED = 50, - PERF_SP_ICL1_REQUESTS = 51, - PERF_SP_ICL1_MISSES = 52, - PERF_SP_HS_INSTRUCTIONS = 53, - PERF_SP_DS_INSTRUCTIONS = 54, - PERF_SP_GS_INSTRUCTIONS = 55, - PERF_SP_CS_INSTRUCTIONS = 56, - PERF_SP_GPR_READ = 57, - PERF_SP_GPR_WRITE = 58, - PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS = 59, - PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS = 60, - PERF_SP_LM_BANK_CONFLICTS = 61, - PERF_SP_TEX_CONTROL_WORKING_CYCLES = 62, - PERF_SP_LOAD_CONTROL_WORKING_CYCLES = 63, - PERF_SP_FLOW_CONTROL_WORKING_CYCLES = 64, - PERF_SP_LM_WORKING_CYCLES = 65, - PERF_SP_DISPATCHER_WORKING_CYCLES = 66, - PERF_SP_SEQUENCER_WORKING_CYCLES = 67, - PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP = 68, - PERF_SP_STARVE_CYCLES_HLSQ = 69, - PERF_SP_NON_EXECUTION_LS_CYCLES = 70, - PERF_SP_WORKING_EU = 71, - PERF_SP_ANY_EU_WORKING = 72, - PERF_SP_WORKING_EU_FS_STAGE = 73, - PERF_SP_ANY_EU_WORKING_FS_STAGE = 74, - PERF_SP_WORKING_EU_VS_STAGE = 75, - PERF_SP_ANY_EU_WORKING_VS_STAGE = 76, - PERF_SP_WORKING_EU_CS_STAGE = 77, - PERF_SP_ANY_EU_WORKING_CS_STAGE = 78, - PERF_SP_GPR_READ_PREFETCH = 79, - PERF_SP_GPR_READ_CONFLICT = 80, - PERF_SP_GPR_WRITE_CONFLICT = 81, - PERF_SP_GM_LOAD_LATENCY_CYCLES = 82, - PERF_SP_GM_LOAD_LATENCY_SAMPLES = 83, - PERF_SP_EXECUTABLE_WAVES = 84, -}; - -enum a6xx_rb_perfcounter_select { - PERF_RB_BUSY_CYCLES = 0, - PERF_RB_STALL_CYCLES_HLSQ = 1, - PERF_RB_STALL_CYCLES_FIFO0_FULL = 2, - PERF_RB_STALL_CYCLES_FIFO1_FULL = 3, - PERF_RB_STALL_CYCLES_FIFO2_FULL = 4, - PERF_RB_STARVE_CYCLES_SP = 5, - PERF_RB_STARVE_CYCLES_LRZ_TILE = 6, - PERF_RB_STARVE_CYCLES_CCU = 7, - PERF_RB_STARVE_CYCLES_Z_PLANE = 8, - PERF_RB_STARVE_CYCLES_BARY_PLANE = 9, - PERF_RB_Z_WORKLOAD = 10, - PERF_RB_HLSQ_ACTIVE = 11, - PERF_RB_Z_READ = 12, - PERF_RB_Z_WRITE = 13, - PERF_RB_C_READ = 14, - PERF_RB_C_WRITE = 15, - PERF_RB_TOTAL_PASS = 16, - PERF_RB_Z_PASS = 17, - PERF_RB_Z_FAIL = 18, - PERF_RB_S_FAIL = 19, - PERF_RB_BLENDED_FXP_COMPONENTS = 20, - PERF_RB_BLENDED_FP16_COMPONENTS = 21, - PERF_RB_PS_INVOCATIONS = 22, - PERF_RB_2D_ALIVE_CYCLES = 23, - PERF_RB_2D_STALL_CYCLES_A2D = 24, - PERF_RB_2D_STARVE_CYCLES_SRC = 25, - PERF_RB_2D_STARVE_CYCLES_SP = 26, - PERF_RB_2D_STARVE_CYCLES_DST = 27, - PERF_RB_2D_VALID_PIXELS = 28, - PERF_RB_3D_PIXELS = 29, - PERF_RB_BLENDER_WORKING_CYCLES = 30, - PERF_RB_ZPROC_WORKING_CYCLES = 31, - PERF_RB_CPROC_WORKING_CYCLES = 32, - PERF_RB_SAMPLER_WORKING_CYCLES = 33, - PERF_RB_STALL_CYCLES_CCU_COLOR_READ = 34, - PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE = 35, - PERF_RB_STALL_CYCLES_CCU_DEPTH_READ = 36, - PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE = 37, - PERF_RB_STALL_CYCLES_VPC = 38, - PERF_RB_2D_INPUT_TRANS = 39, - PERF_RB_2D_OUTPUT_RB_DST_TRANS = 40, - PERF_RB_2D_OUTPUT_RB_SRC_TRANS = 41, - PERF_RB_BLENDED_FP32_COMPONENTS = 42, - PERF_RB_COLOR_PIX_TILES = 43, - PERF_RB_STALL_CYCLES_CCU = 44, - PERF_RB_EARLY_Z_ARB3_GRANT = 45, - PERF_RB_LATE_Z_ARB3_GRANT = 46, - PERF_RB_EARLY_Z_SKIP_GRANT = 47, -}; - -enum a6xx_vsc_perfcounter_select { - PERF_VSC_BUSY_CYCLES = 0, - PERF_VSC_WORKING_CYCLES = 1, - PERF_VSC_STALL_CYCLES_UCHE = 2, - PERF_VSC_EOT_NUM = 3, - PERF_VSC_INPUT_TILES = 4, -}; - -enum a6xx_ccu_perfcounter_select { - PERF_CCU_BUSY_CYCLES = 0, - PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN = 1, - PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN = 2, - PERF_CCU_STARVE_CYCLES_FLAG_RETURN = 3, - PERF_CCU_DEPTH_BLOCKS = 4, - PERF_CCU_COLOR_BLOCKS = 5, - PERF_CCU_DEPTH_BLOCK_HIT = 6, - PERF_CCU_COLOR_BLOCK_HIT = 7, - PERF_CCU_PARTIAL_BLOCK_READ = 8, - PERF_CCU_GMEM_READ = 9, - PERF_CCU_GMEM_WRITE = 10, - PERF_CCU_DEPTH_READ_FLAG0_COUNT = 11, - PERF_CCU_DEPTH_READ_FLAG1_COUNT = 12, - PERF_CCU_DEPTH_READ_FLAG2_COUNT = 13, - PERF_CCU_DEPTH_READ_FLAG3_COUNT = 14, - PERF_CCU_DEPTH_READ_FLAG4_COUNT = 15, - PERF_CCU_DEPTH_READ_FLAG5_COUNT = 16, - PERF_CCU_DEPTH_READ_FLAG6_COUNT = 17, - PERF_CCU_DEPTH_READ_FLAG8_COUNT = 18, - PERF_CCU_COLOR_READ_FLAG0_COUNT = 19, - PERF_CCU_COLOR_READ_FLAG1_COUNT = 20, - PERF_CCU_COLOR_READ_FLAG2_COUNT = 21, - PERF_CCU_COLOR_READ_FLAG3_COUNT = 22, - PERF_CCU_COLOR_READ_FLAG4_COUNT = 23, - PERF_CCU_COLOR_READ_FLAG5_COUNT = 24, - PERF_CCU_COLOR_READ_FLAG6_COUNT = 25, - PERF_CCU_COLOR_READ_FLAG8_COUNT = 26, - PERF_CCU_2D_RD_REQ = 27, - PERF_CCU_2D_WR_REQ = 28, -}; - -enum a6xx_lrz_perfcounter_select { - PERF_LRZ_BUSY_CYCLES = 0, - PERF_LRZ_STARVE_CYCLES_RAS = 1, - PERF_LRZ_STALL_CYCLES_RB = 2, - PERF_LRZ_STALL_CYCLES_VSC = 3, - PERF_LRZ_STALL_CYCLES_VPC = 4, - PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH = 5, - PERF_LRZ_STALL_CYCLES_UCHE = 6, - PERF_LRZ_LRZ_READ = 7, - PERF_LRZ_LRZ_WRITE = 8, - PERF_LRZ_READ_LATENCY = 9, - PERF_LRZ_MERGE_CACHE_UPDATING = 10, - PERF_LRZ_PRIM_KILLED_BY_MASKGEN = 11, - PERF_LRZ_PRIM_KILLED_BY_LRZ = 12, - PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ = 13, - PERF_LRZ_FULL_8X8_TILES = 14, - PERF_LRZ_PARTIAL_8X8_TILES = 15, - PERF_LRZ_TILE_KILLED = 16, - PERF_LRZ_TOTAL_PIXEL = 17, - PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ = 18, - PERF_LRZ_FULLY_COVERED_TILES = 19, - PERF_LRZ_PARTIAL_COVERED_TILES = 20, - PERF_LRZ_FEEDBACK_ACCEPT = 21, - PERF_LRZ_FEEDBACK_DISCARD = 22, - PERF_LRZ_FEEDBACK_STALL = 23, - PERF_LRZ_STALL_CYCLES_RB_ZPLANE = 24, - PERF_LRZ_STALL_CYCLES_RB_BPLANE = 25, - PERF_LRZ_STALL_CYCLES_VC = 26, - PERF_LRZ_RAS_MASK_TRANS = 27, -}; - -enum a6xx_cmp_perfcounter_select { - PERF_CMPDECMP_STALL_CYCLES_ARB = 0, - PERF_CMPDECMP_VBIF_LATENCY_CYCLES = 1, - PERF_CMPDECMP_VBIF_LATENCY_SAMPLES = 2, - PERF_CMPDECMP_VBIF_READ_DATA_CCU = 3, - PERF_CMPDECMP_VBIF_WRITE_DATA_CCU = 4, - PERF_CMPDECMP_VBIF_READ_REQUEST = 5, - PERF_CMPDECMP_VBIF_WRITE_REQUEST = 6, - PERF_CMPDECMP_VBIF_READ_DATA = 7, - PERF_CMPDECMP_VBIF_WRITE_DATA = 8, - PERF_CMPDECMP_FLAG_FETCH_CYCLES = 9, - PERF_CMPDECMP_FLAG_FETCH_SAMPLES = 10, - PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT = 11, - PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT = 12, - PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT = 13, - PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT = 14, - PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT = 15, - PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT = 16, - PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT = 17, - PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT = 18, - PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT = 19, - PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT = 20, - PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT = 21, - PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT = 22, - PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT = 23, - PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT = 24, - PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ = 25, - PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR = 26, - PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN = 27, - PERF_CMPDECMP_2D_RD_DATA = 28, - PERF_CMPDECMP_2D_WR_DATA = 29, - PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0 = 30, - PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1 = 31, - PERF_CMPDECMP_2D_OUTPUT_TRANS = 32, - PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE = 33, - PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT = 34, - PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT = 35, - PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT = 36, - PERF_CMPDECMP_2D_BUSY_CYCLES = 37, - PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES = 38, - PERF_CMPDECMP_2D_PIXELS = 39, -}; - -enum a6xx_2d_ifmt { - R2D_UNORM8 = 16, - R2D_INT32 = 7, - R2D_INT16 = 6, - R2D_INT8 = 5, - R2D_FLOAT32 = 4, - R2D_FLOAT16 = 3, - R2D_UNORM8_SRGB = 1, - R2D_RAW = 0, -}; - -enum a6xx_ztest_mode { - A6XX_EARLY_Z = 0, - A6XX_LATE_Z = 1, - A6XX_EARLY_LRZ_LATE_Z = 2, - A6XX_INVALID_ZTEST = 3, -}; - -enum a6xx_tess_spacing { - TESS_EQUAL = 0, - TESS_FRACTIONAL_ODD = 2, - TESS_FRACTIONAL_EVEN = 3, -}; - -enum a6xx_tess_output { - TESS_POINTS = 0, - TESS_LINES = 1, - TESS_CW_TRIS = 2, - TESS_CCW_TRIS = 3, -}; - -enum a6xx_sequenced_thread_dist { - DIST_SCREEN_COORD = 0, - DIST_ALL_TO_RB0 = 1, -}; - -enum a6xx_single_prim_mode { - NO_FLUSH = 0, - FLUSH_PER_OVERLAP_AND_OVERWRITE = 1, - FLUSH_PER_OVERLAP = 3, -}; - -enum a6xx_raster_mode { - TYPE_TILED = 0, - TYPE_WRITER = 1, -}; - -enum a6xx_raster_direction { - LR_TB = 0, - RL_TB = 1, - LR_BT = 2, - RB_BT = 3, -}; - -enum a6xx_render_mode { - RENDERING_PASS = 0, - BINNING_PASS = 1, -}; - -enum a6xx_buffers_location { - BUFFERS_IN_GMEM = 0, - BUFFERS_IN_SYSMEM = 3, -}; - -enum a6xx_lrz_dir_status { - LRZ_DIR_LE = 1, - LRZ_DIR_GE = 2, - LRZ_DIR_INVALID = 3, -}; - -enum a6xx_fragcoord_sample_mode { - FRAGCOORD_CENTER = 0, - FRAGCOORD_SAMPLE = 3, -}; - -enum a6xx_rotation { - ROTATE_0 = 0, - ROTATE_90 = 1, - ROTATE_180 = 2, - ROTATE_270 = 3, - ROTATE_HFLIP = 4, - ROTATE_VFLIP = 5, -}; - -enum a6xx_ccu_cache_size { - CCU_CACHE_SIZE_FULL = 0, - CCU_CACHE_SIZE_HALF = 1, - CCU_CACHE_SIZE_QUARTER = 2, - CCU_CACHE_SIZE_EIGHTH = 3, -}; - -enum a6xx_varying_interp_mode { - INTERP_SMOOTH = 0, - INTERP_FLAT = 1, - INTERP_ZERO = 2, - INTERP_ONE = 3, -}; - -enum a6xx_varying_ps_repl_mode { - PS_REPL_NONE = 0, - PS_REPL_S = 1, - PS_REPL_T = 2, - PS_REPL_ONE_MINUS_T = 3, -}; - -enum a6xx_threadsize { - THREAD64 = 0, - THREAD128 = 1, -}; - -enum a6xx_bindless_descriptor_size { - BINDLESS_DESCRIPTOR_16B = 1, - BINDLESS_DESCRIPTOR_64B = 3, -}; - -enum a6xx_isam_mode { - ISAMMODE_CL = 1, - ISAMMODE_GL = 2, -}; - -enum a7xx_cs_yalign { - CS_YALIGN_1 = 8, - CS_YALIGN_2 = 4, - CS_YALIGN_4 = 2, - CS_YALIGN_8 = 1, -}; - -enum a6xx_tex_filter { - A6XX_TEX_NEAREST = 0, - A6XX_TEX_LINEAR = 1, - A6XX_TEX_ANISO = 2, - A6XX_TEX_CUBIC = 3, -}; - -enum a6xx_tex_clamp { - A6XX_TEX_REPEAT = 0, - A6XX_TEX_CLAMP_TO_EDGE = 1, - A6XX_TEX_MIRROR_REPEAT = 2, - A6XX_TEX_CLAMP_TO_BORDER = 3, - A6XX_TEX_MIRROR_CLAMP = 4, -}; - -enum a6xx_tex_aniso { - A6XX_TEX_ANISO_1 = 0, - A6XX_TEX_ANISO_2 = 1, - A6XX_TEX_ANISO_4 = 2, - A6XX_TEX_ANISO_8 = 3, - A6XX_TEX_ANISO_16 = 4, -}; - -enum a6xx_reduction_mode { - A6XX_REDUCTION_MODE_AVERAGE = 0, - A6XX_REDUCTION_MODE_MIN = 1, - A6XX_REDUCTION_MODE_MAX = 2, -}; - -enum a6xx_tex_swiz { - A6XX_TEX_X = 0, - A6XX_TEX_Y = 1, - A6XX_TEX_Z = 2, - A6XX_TEX_W = 3, - A6XX_TEX_ZERO = 4, - A6XX_TEX_ONE = 5, -}; - -enum a6xx_tex_type { - A6XX_TEX_1D = 0, - A6XX_TEX_2D = 1, - A6XX_TEX_CUBE = 2, - A6XX_TEX_3D = 3, - A6XX_TEX_BUFFER = 4, -}; - -#define A6XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001 -#define A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR 0x00000002 -#define A6XX_RBBM_INT_0_MASK_CP_IPC_INTR_0 0x00000010 -#define A6XX_RBBM_INT_0_MASK_CP_IPC_INTR_1 0x00000020 -#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW 0x00000040 -#define A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080 -#define A6XX_RBBM_INT_0_MASK_CP_SW 0x00000100 -#define A6XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200 -#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400 -#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800 -#define A6XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000 -#define A6XX_RBBM_INT_0_MASK_CP_IB2 0x00002000 -#define A6XX_RBBM_INT_0_MASK_CP_IB1 0x00004000 -#define A6XX_RBBM_INT_0_MASK_CP_RB 0x00008000 -#define A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT 0x00008000 -#define A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPTLPAC 0x00010000 -#define A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000 -#define A6XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000 -#define A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000 -#define A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS_LPAC 0x00200000 -#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000 -#define A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT 0x00800000 -#define A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000 -#define A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000 -#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000 -#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000 -#define A6XX_RBBM_INT_0_MASK_TSBWRITEERROR 0x10000000 -#define A6XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000 -#define A6XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000 - -#define A6XX_CP_INT_CP_OPCODE_ERROR 0x00000001 -#define A6XX_CP_INT_CP_UCODE_ERROR 0x00000002 -#define A6XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004 -#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010 -#define A6XX_CP_INT_CP_AHB_ERROR 0x00000020 -#define A6XX_CP_INT_CP_VSD_PARITY_ERROR 0x00000040 -#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR 0x00000080 -#define A6XX_CP_INT_CP_OPCODE_ERROR_LPAC 0x00000100 -#define A6XX_CP_INT_CP_UCODE_ERROR_LPAC 0x00000200 -#define A6XX_CP_INT_CP_HW_FAULT_ERROR_LPAC 0x00000400 -#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR_LPAC 0x00000800 -#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR_LPAC 0x00001000 -#define A6XX_CP_INT_CP_OPCODE_ERROR_BV 0x00002000 -#define A6XX_CP_INT_CP_UCODE_ERROR_BV 0x00004000 -#define A6XX_CP_INT_CP_HW_FAULT_ERROR_BV 0x00008000 -#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR_BV 0x00010000 -#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR_BV 0x00020000 - -#define REG_A6XX_CP_RB_BASE 0x00000800 - -#define REG_A6XX_CP_RB_CNTL 0x00000802 - -#define REG_A6XX_CP_RB_RPTR_ADDR 0x00000804 - -#define REG_A6XX_CP_RB_RPTR 0x00000806 - -#define REG_A6XX_CP_RB_WPTR 0x00000807 - -#define REG_A6XX_CP_SQE_CNTL 0x00000808 - -#define REG_A6XX_CP_CP2GMU_STATUS 0x00000812 -#define A6XX_CP_CP2GMU_STATUS_IFPC 0x00000001 - -#define REG_A6XX_CP_HW_FAULT 0x00000821 - -#define REG_A6XX_CP_INTERRUPT_STATUS 0x00000823 -#define REG_A6XX_CP_PROTECT_STATUS 0x00000824 - -#define REG_A6XX_CP_STATUS_1 0x00000825 - -#define REG_A6XX_CP_SQE_INSTR_BASE 0x00000830 - -#define REG_A6XX_CP_MISC_CNTL 0x00000840 - -#define REG_A6XX_CP_APRIV_CNTL 0x00000844 -#define A6XX_CP_APRIV_CNTL_CDWRITE 0x00000040 -#define A6XX_CP_APRIV_CNTL_CDREAD 0x00000020 -#define A6XX_CP_APRIV_CNTL_RBRPWB 0x00000008 -#define A6XX_CP_APRIV_CNTL_RBPRIVLEVEL 0x00000004 -#define A6XX_CP_APRIV_CNTL_RBFETCH 0x00000002 -#define A6XX_CP_APRIV_CNTL_ICACHE 0x00000001 - -#define REG_A6XX_CP_PREEMPT_THRESHOLD 0x000008c0 - -#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1 -#define A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__MASK 0x000000ff -#define A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__SHIFT 0 -static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_MRB_START(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__MASK; -} -#define A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__MASK 0x0000ff00 -#define A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__SHIFT 8 -static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_VSD_START(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__MASK; -} -#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK 0x00ff0000 -#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT 16 -static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB1_START(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK; -} -#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK 0xff000000 -#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT 24 -static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB2_START(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK; -} - -#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2 -#define A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK 0x000001ff -#define A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT 0 -static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_SDS_START(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK; -} -#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK 0xffff0000 -#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT 16 -static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK; -} - -#define REG_A6XX_CP_MEM_POOL_SIZE 0x000008c3 - -#define REG_A6XX_CP_CHICKEN_DBG 0x00000841 - -#define REG_A6XX_CP_ADDR_MODE_CNTL 0x00000842 - -#define REG_A6XX_CP_DBG_ECO_CNTL 0x00000843 - -#define REG_A6XX_CP_PROTECT_CNTL 0x0000084f -#define A6XX_CP_PROTECT_CNTL_LAST_SPAN_INF_RANGE 0x00000008 -#define A6XX_CP_PROTECT_CNTL_ACCESS_FAULT_ON_VIOL_EN 0x00000002 -#define A6XX_CP_PROTECT_CNTL_ACCESS_PROT_EN 0x00000001 - -#define REG_A6XX_CP_SCRATCH(i0) (0x00000883 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000883 + 0x1*i0; } - -#define REG_A6XX_CP_PROTECT(i0) (0x00000850 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000850 + 0x1*i0; } -#define A6XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0003ffff -#define A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0 -static inline uint32_t A6XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val) -{ - return ((val) << A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A6XX_CP_PROTECT_REG_BASE_ADDR__MASK; -} -#define A6XX_CP_PROTECT_REG_MASK_LEN__MASK 0x7ffc0000 -#define A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT 18 -static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val) -{ - return ((val) << A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A6XX_CP_PROTECT_REG_MASK_LEN__MASK; -} -#define A6XX_CP_PROTECT_REG_READ 0x80000000 - -#define REG_A6XX_CP_CONTEXT_SWITCH_CNTL 0x000008a0 - -#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO 0x000008a1 - -#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR 0x000008a3 - -#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR 0x000008a5 - -#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR 0x000008a7 - -#define REG_A7XX_CP_CONTEXT_SWITCH_LEVEL_STATUS 0x000008ab - -#define REG_A6XX_CP_PERFCTR_CP_SEL(i0) (0x000008d0 + 0x1*(i0)) - -#define REG_A7XX_CP_BV_PERFCTR_CP_SEL(i0) (0x000008e0 + 0x1*(i0)) - -#define REG_A6XX_CP_CRASH_SCRIPT_BASE 0x00000900 - -#define REG_A6XX_CP_CRASH_DUMP_CNTL 0x00000902 - -#define REG_A6XX_CP_CRASH_DUMP_STATUS 0x00000903 - -#define REG_A6XX_CP_SQE_STAT_ADDR 0x00000908 - -#define REG_A6XX_CP_SQE_STAT_DATA 0x00000909 - -#define REG_A6XX_CP_DRAW_STATE_ADDR 0x0000090a - -#define REG_A6XX_CP_DRAW_STATE_DATA 0x0000090b - -#define REG_A6XX_CP_ROQ_DBG_ADDR 0x0000090c - -#define REG_A6XX_CP_ROQ_DBG_DATA 0x0000090d - -#define REG_A6XX_CP_MEM_POOL_DBG_ADDR 0x0000090e - -#define REG_A6XX_CP_MEM_POOL_DBG_DATA 0x0000090f - -#define REG_A6XX_CP_SQE_UCODE_DBG_ADDR 0x00000910 - -#define REG_A6XX_CP_SQE_UCODE_DBG_DATA 0x00000911 - -#define REG_A6XX_CP_IB1_BASE 0x00000928 - -#define REG_A6XX_CP_IB1_REM_SIZE 0x0000092a - -#define REG_A6XX_CP_IB2_BASE 0x0000092b - -#define REG_A6XX_CP_IB2_REM_SIZE 0x0000092d - -#define REG_A6XX_CP_SDS_BASE 0x0000092e - -#define REG_A6XX_CP_SDS_REM_SIZE 0x00000930 - -#define REG_A6XX_CP_MRB_BASE 0x00000931 - -#define REG_A6XX_CP_MRB_REM_SIZE 0x00000933 - -#define REG_A6XX_CP_VSD_BASE 0x00000934 - -#define REG_A6XX_CP_ROQ_RB_STAT 0x00000939 -#define A6XX_CP_ROQ_RB_STAT_RPTR__MASK 0x000003ff -#define A6XX_CP_ROQ_RB_STAT_RPTR__SHIFT 0 -static inline uint32_t A6XX_CP_ROQ_RB_STAT_RPTR(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_RB_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_RB_STAT_RPTR__MASK; -} -#define A6XX_CP_ROQ_RB_STAT_WPTR__MASK 0x03ff0000 -#define A6XX_CP_ROQ_RB_STAT_WPTR__SHIFT 16 -static inline uint32_t A6XX_CP_ROQ_RB_STAT_WPTR(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_RB_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_RB_STAT_WPTR__MASK; -} - -#define REG_A6XX_CP_ROQ_IB1_STAT 0x0000093a -#define A6XX_CP_ROQ_IB1_STAT_RPTR__MASK 0x000003ff -#define A6XX_CP_ROQ_IB1_STAT_RPTR__SHIFT 0 -static inline uint32_t A6XX_CP_ROQ_IB1_STAT_RPTR(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_IB1_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_IB1_STAT_RPTR__MASK; -} -#define A6XX_CP_ROQ_IB1_STAT_WPTR__MASK 0x03ff0000 -#define A6XX_CP_ROQ_IB1_STAT_WPTR__SHIFT 16 -static inline uint32_t A6XX_CP_ROQ_IB1_STAT_WPTR(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_IB1_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_IB1_STAT_WPTR__MASK; -} - -#define REG_A6XX_CP_ROQ_IB2_STAT 0x0000093b -#define A6XX_CP_ROQ_IB2_STAT_RPTR__MASK 0x000003ff -#define A6XX_CP_ROQ_IB2_STAT_RPTR__SHIFT 0 -static inline uint32_t A6XX_CP_ROQ_IB2_STAT_RPTR(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_IB2_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_IB2_STAT_RPTR__MASK; -} -#define A6XX_CP_ROQ_IB2_STAT_WPTR__MASK 0x03ff0000 -#define A6XX_CP_ROQ_IB2_STAT_WPTR__SHIFT 16 -static inline uint32_t A6XX_CP_ROQ_IB2_STAT_WPTR(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_IB2_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_IB2_STAT_WPTR__MASK; -} - -#define REG_A6XX_CP_ROQ_SDS_STAT 0x0000093c -#define A6XX_CP_ROQ_SDS_STAT_RPTR__MASK 0x000003ff -#define A6XX_CP_ROQ_SDS_STAT_RPTR__SHIFT 0 -static inline uint32_t A6XX_CP_ROQ_SDS_STAT_RPTR(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_SDS_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_SDS_STAT_RPTR__MASK; -} -#define A6XX_CP_ROQ_SDS_STAT_WPTR__MASK 0x03ff0000 -#define A6XX_CP_ROQ_SDS_STAT_WPTR__SHIFT 16 -static inline uint32_t A6XX_CP_ROQ_SDS_STAT_WPTR(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_SDS_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_SDS_STAT_WPTR__MASK; -} - -#define REG_A6XX_CP_ROQ_MRB_STAT 0x0000093d -#define A6XX_CP_ROQ_MRB_STAT_RPTR__MASK 0x000003ff -#define A6XX_CP_ROQ_MRB_STAT_RPTR__SHIFT 0 -static inline uint32_t A6XX_CP_ROQ_MRB_STAT_RPTR(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_MRB_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_MRB_STAT_RPTR__MASK; -} -#define A6XX_CP_ROQ_MRB_STAT_WPTR__MASK 0x03ff0000 -#define A6XX_CP_ROQ_MRB_STAT_WPTR__SHIFT 16 -static inline uint32_t A6XX_CP_ROQ_MRB_STAT_WPTR(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_MRB_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_MRB_STAT_WPTR__MASK; -} - -#define REG_A6XX_CP_ROQ_VSD_STAT 0x0000093e -#define A6XX_CP_ROQ_VSD_STAT_RPTR__MASK 0x000003ff -#define A6XX_CP_ROQ_VSD_STAT_RPTR__SHIFT 0 -static inline uint32_t A6XX_CP_ROQ_VSD_STAT_RPTR(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_VSD_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_VSD_STAT_RPTR__MASK; -} -#define A6XX_CP_ROQ_VSD_STAT_WPTR__MASK 0x03ff0000 -#define A6XX_CP_ROQ_VSD_STAT_WPTR__SHIFT 16 -static inline uint32_t A6XX_CP_ROQ_VSD_STAT_WPTR(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_VSD_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_VSD_STAT_WPTR__MASK; -} - -#define REG_A6XX_CP_IB1_DWORDS 0x00000943 - -#define REG_A6XX_CP_IB2_DWORDS 0x00000944 - -#define REG_A6XX_CP_SDS_DWORDS 0x00000945 - -#define REG_A6XX_CP_MRB_DWORDS 0x00000946 - -#define REG_A6XX_CP_VSD_DWORDS 0x00000947 - -#define REG_A6XX_CP_ROQ_AVAIL_RB 0x00000948 -#define A6XX_CP_ROQ_AVAIL_RB_REM__MASK 0xffff0000 -#define A6XX_CP_ROQ_AVAIL_RB_REM__SHIFT 16 -static inline uint32_t A6XX_CP_ROQ_AVAIL_RB_REM(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_AVAIL_RB_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_RB_REM__MASK; -} - -#define REG_A6XX_CP_ROQ_AVAIL_IB1 0x00000949 -#define A6XX_CP_ROQ_AVAIL_IB1_REM__MASK 0xffff0000 -#define A6XX_CP_ROQ_AVAIL_IB1_REM__SHIFT 16 -static inline uint32_t A6XX_CP_ROQ_AVAIL_IB1_REM(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_AVAIL_IB1_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_IB1_REM__MASK; -} - -#define REG_A6XX_CP_ROQ_AVAIL_IB2 0x0000094a -#define A6XX_CP_ROQ_AVAIL_IB2_REM__MASK 0xffff0000 -#define A6XX_CP_ROQ_AVAIL_IB2_REM__SHIFT 16 -static inline uint32_t A6XX_CP_ROQ_AVAIL_IB2_REM(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_AVAIL_IB2_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_IB2_REM__MASK; -} - -#define REG_A6XX_CP_ROQ_AVAIL_SDS 0x0000094b -#define A6XX_CP_ROQ_AVAIL_SDS_REM__MASK 0xffff0000 -#define A6XX_CP_ROQ_AVAIL_SDS_REM__SHIFT 16 -static inline uint32_t A6XX_CP_ROQ_AVAIL_SDS_REM(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_AVAIL_SDS_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_SDS_REM__MASK; -} - -#define REG_A6XX_CP_ROQ_AVAIL_MRB 0x0000094c -#define A6XX_CP_ROQ_AVAIL_MRB_REM__MASK 0xffff0000 -#define A6XX_CP_ROQ_AVAIL_MRB_REM__SHIFT 16 -static inline uint32_t A6XX_CP_ROQ_AVAIL_MRB_REM(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_AVAIL_MRB_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_MRB_REM__MASK; -} - -#define REG_A6XX_CP_ROQ_AVAIL_VSD 0x0000094d -#define A6XX_CP_ROQ_AVAIL_VSD_REM__MASK 0xffff0000 -#define A6XX_CP_ROQ_AVAIL_VSD_REM__SHIFT 16 -static inline uint32_t A6XX_CP_ROQ_AVAIL_VSD_REM(uint32_t val) -{ - return ((val) << A6XX_CP_ROQ_AVAIL_VSD_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_VSD_REM__MASK; -} - -#define REG_A6XX_CP_ALWAYS_ON_COUNTER 0x00000980 - -#define REG_A6XX_CP_AHB_CNTL 0x0000098d - -#define REG_A6XX_CP_APERTURE_CNTL_HOST 0x00000a00 - -#define REG_A7XX_CP_APERTURE_CNTL_HOST 0x00000a00 -#define A7XX_CP_APERTURE_CNTL_HOST_PIPE__MASK 0x00003000 -#define A7XX_CP_APERTURE_CNTL_HOST_PIPE__SHIFT 12 -static inline uint32_t A7XX_CP_APERTURE_CNTL_HOST_PIPE(enum a7xx_pipe val) -{ - return ((val) << A7XX_CP_APERTURE_CNTL_HOST_PIPE__SHIFT) & A7XX_CP_APERTURE_CNTL_HOST_PIPE__MASK; -} -#define A7XX_CP_APERTURE_CNTL_HOST_CLUSTER__MASK 0x00000700 -#define A7XX_CP_APERTURE_CNTL_HOST_CLUSTER__SHIFT 8 -static inline uint32_t A7XX_CP_APERTURE_CNTL_HOST_CLUSTER(enum a7xx_cluster val) -{ - return ((val) << A7XX_CP_APERTURE_CNTL_HOST_CLUSTER__SHIFT) & A7XX_CP_APERTURE_CNTL_HOST_CLUSTER__MASK; -} -#define A7XX_CP_APERTURE_CNTL_HOST_CONTEXT__MASK 0x00000030 -#define A7XX_CP_APERTURE_CNTL_HOST_CONTEXT__SHIFT 4 -static inline uint32_t A7XX_CP_APERTURE_CNTL_HOST_CONTEXT(uint32_t val) -{ - return ((val) << A7XX_CP_APERTURE_CNTL_HOST_CONTEXT__SHIFT) & A7XX_CP_APERTURE_CNTL_HOST_CONTEXT__MASK; -} - -#define REG_A6XX_CP_APERTURE_CNTL_CD 0x00000a03 - -#define REG_A7XX_CP_APERTURE_CNTL_CD 0x00000a03 -#define A7XX_CP_APERTURE_CNTL_CD_PIPE__MASK 0x00003000 -#define A7XX_CP_APERTURE_CNTL_CD_PIPE__SHIFT 12 -static inline uint32_t A7XX_CP_APERTURE_CNTL_CD_PIPE(enum a7xx_pipe val) -{ - return ((val) << A7XX_CP_APERTURE_CNTL_CD_PIPE__SHIFT) & A7XX_CP_APERTURE_CNTL_CD_PIPE__MASK; -} -#define A7XX_CP_APERTURE_CNTL_CD_CLUSTER__MASK 0x00000700 -#define A7XX_CP_APERTURE_CNTL_CD_CLUSTER__SHIFT 8 -static inline uint32_t A7XX_CP_APERTURE_CNTL_CD_CLUSTER(enum a7xx_cluster val) -{ - return ((val) << A7XX_CP_APERTURE_CNTL_CD_CLUSTER__SHIFT) & A7XX_CP_APERTURE_CNTL_CD_CLUSTER__MASK; -} -#define A7XX_CP_APERTURE_CNTL_CD_CONTEXT__MASK 0x00000030 -#define A7XX_CP_APERTURE_CNTL_CD_CONTEXT__SHIFT 4 -static inline uint32_t A7XX_CP_APERTURE_CNTL_CD_CONTEXT(uint32_t val) -{ - return ((val) << A7XX_CP_APERTURE_CNTL_CD_CONTEXT__SHIFT) & A7XX_CP_APERTURE_CNTL_CD_CONTEXT__MASK; -} - -#define REG_A7XX_CP_BV_PROTECT_STATUS 0x00000a61 - -#define REG_A7XX_CP_BV_HW_FAULT 0x00000a64 - -#define REG_A7XX_CP_BV_DRAW_STATE_ADDR 0x00000a81 - -#define REG_A7XX_CP_BV_DRAW_STATE_DATA 0x00000a82 - -#define REG_A7XX_CP_BV_ROQ_DBG_ADDR 0x00000a83 - -#define REG_A7XX_CP_BV_ROQ_DBG_DATA 0x00000a84 - -#define REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR 0x00000a85 - -#define REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA 0x00000a86 - -#define REG_A7XX_CP_BV_SQE_STAT_ADDR 0x00000a87 - -#define REG_A7XX_CP_BV_SQE_STAT_DATA 0x00000a88 - -#define REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR 0x00000a96 - -#define REG_A7XX_CP_BV_MEM_POOL_DBG_DATA 0x00000a97 - -#define REG_A7XX_CP_BV_RB_RPTR_ADDR 0x00000a98 - -#define REG_A7XX_CP_RESOURCE_TBL_DBG_ADDR 0x00000a9a - -#define REG_A7XX_CP_RESOURCE_TBL_DBG_DATA 0x00000a9b - -#define REG_A7XX_CP_BV_APRIV_CNTL 0x00000ad0 - -#define REG_A7XX_CP_BV_CHICKEN_DBG 0x00000ada - -#define REG_A7XX_CP_LPAC_DRAW_STATE_ADDR 0x00000b0a - -#define REG_A7XX_CP_LPAC_DRAW_STATE_DATA 0x00000b0b - -#define REG_A7XX_CP_LPAC_ROQ_DBG_ADDR 0x00000b0c - -#define REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR 0x00000b27 - -#define REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA 0x00000b28 - -#define REG_A7XX_CP_SQE_AC_STAT_ADDR 0x00000b29 - -#define REG_A7XX_CP_SQE_AC_STAT_DATA 0x00000b2a - -#define REG_A7XX_CP_LPAC_APRIV_CNTL 0x00000b31 - -#define REG_A6XX_CP_LPAC_PROG_FIFO_SIZE 0x00000b34 - -#define REG_A7XX_CP_LPAC_ROQ_DBG_DATA 0x00000b35 - -#define REG_A7XX_CP_LPAC_FIFO_DBG_DATA 0x00000b36 - -#define REG_A7XX_CP_LPAC_FIFO_DBG_ADDR 0x00000b40 - -#define REG_A6XX_CP_LPAC_SQE_INSTR_BASE 0x00000b82 - -#define REG_A6XX_VSC_ADDR_MODE_CNTL 0x00000c01 - -#define REG_A6XX_RBBM_GPR0_CNTL 0x00000018 - -#define REG_A6XX_RBBM_INT_0_STATUS 0x00000201 -#define REG_A6XX_RBBM_STATUS 0x00000210 -#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x00800000 -#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x00400000 -#define A6XX_RBBM_STATUS_HLSQ_BUSY 0x00200000 -#define A6XX_RBBM_STATUS_VSC_BUSY 0x00100000 -#define A6XX_RBBM_STATUS_TPL1_BUSY 0x00080000 -#define A6XX_RBBM_STATUS_SP_BUSY 0x00040000 -#define A6XX_RBBM_STATUS_UCHE_BUSY 0x00020000 -#define A6XX_RBBM_STATUS_VPC_BUSY 0x00010000 -#define A6XX_RBBM_STATUS_VFD_BUSY 0x00008000 -#define A6XX_RBBM_STATUS_TESS_BUSY 0x00004000 -#define A6XX_RBBM_STATUS_PC_VSD_BUSY 0x00002000 -#define A6XX_RBBM_STATUS_PC_DCALL_BUSY 0x00001000 -#define A6XX_RBBM_STATUS_COM_DCOM_BUSY 0x00000800 -#define A6XX_RBBM_STATUS_LRZ_BUSY 0x00000400 -#define A6XX_RBBM_STATUS_A2D_BUSY 0x00000200 -#define A6XX_RBBM_STATUS_CCU_BUSY 0x00000100 -#define A6XX_RBBM_STATUS_RB_BUSY 0x00000080 -#define A6XX_RBBM_STATUS_RAS_BUSY 0x00000040 -#define A6XX_RBBM_STATUS_TSE_BUSY 0x00000020 -#define A6XX_RBBM_STATUS_VBIF_BUSY 0x00000010 -#define A6XX_RBBM_STATUS_GFX_DBGC_BUSY 0x00000008 -#define A6XX_RBBM_STATUS_CP_BUSY 0x00000004 -#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CP_MASTER 0x00000002 -#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER 0x00000001 - -#define REG_A6XX_RBBM_STATUS1 0x00000211 - -#define REG_A6XX_RBBM_STATUS2 0x00000212 - -#define REG_A6XX_RBBM_STATUS3 0x00000213 -#define A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT 0x01000000 - -#define REG_A6XX_RBBM_VBIF_GX_RESET_STATUS 0x00000215 - -#define REG_A7XX_RBBM_CLOCK_MODE_CP 0x00000260 - -#define REG_A7XX_RBBM_CLOCK_MODE_BV_LRZ 0x00000284 - -#define REG_A7XX_RBBM_CLOCK_MODE_BV_GRAS 0x00000285 - -#define REG_A7XX_RBBM_CLOCK_MODE2_GRAS 0x00000286 - -#define REG_A7XX_RBBM_CLOCK_MODE_BV_VFD 0x00000287 - -#define REG_A7XX_RBBM_CLOCK_MODE_BV_GPC 0x00000288 - -#define REG_A6XX_RBBM_PERFCTR_CP(i0) (0x00000400 + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_RBBM(i0) (0x0000041c + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_PC(i0) (0x00000424 + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_VFD(i0) (0x00000434 + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_HLSQ(i0) (0x00000444 + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_VPC(i0) (0x00000450 + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_CCU(i0) (0x0000045c + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_TSE(i0) (0x00000466 + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_RAS(i0) (0x0000046e + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_UCHE(i0) (0x00000476 + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_TP(i0) (0x0000048e + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_SP(i0) (0x000004a6 + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_RB(i0) (0x000004d6 + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_VSC(i0) (0x000004e6 + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_LRZ(i0) (0x000004ea + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_CMP(i0) (0x000004f2 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_CP(i0) (0x00000300 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_RBBM(i0) (0x0000031c + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_PC(i0) (0x00000324 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_VFD(i0) (0x00000334 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_HLSQ(i0) (0x00000344 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_VPC(i0) (0x00000350 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_CCU(i0) (0x0000035c + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_TSE(i0) (0x00000366 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_RAS(i0) (0x0000036e + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_UCHE(i0) (0x00000376 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_TP(i0) (0x0000038e + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_SP(i0) (0x000003a6 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_RB(i0) (0x000003d6 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_VSC(i0) (0x000003e6 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_LRZ(i0) (0x000003ea + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_CMP(i0) (0x000003f2 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_UFC(i0) (0x000003fa + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR2_HLSQ(i0) (0x00000410 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR2_CP(i0) (0x0000041c + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR2_SP(i0) (0x0000042a + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR2_TP(i0) (0x00000442 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR2_UFC(i0) (0x0000044e + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_BV_PC(i0) (0x00000460 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_BV_VFD(i0) (0x00000470 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_BV_VPC(i0) (0x00000480 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_BV_TSE(i0) (0x0000048c + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_BV_RAS(i0) (0x00000494 + 0x2*(i0)) - -#define REG_A7XX_RBBM_PERFCTR_BV_LRZ(i0) (0x0000049c + 0x2*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_CNTL 0x00000500 - -#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD0 0x00000501 - -#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD1 0x00000502 - -#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD2 0x00000503 - -#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD3 0x00000504 - -#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000505 - -#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000506 - -#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL(i0) (0x00000507 + 0x1*(i0)) - -#define REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000050b - -#define REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD 0x0000050e - -#define REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS 0x0000050f - -#define REG_A6XX_RBBM_ISDB_CNT 0x00000533 - -#define REG_A7XX_RBBM_NC_MODE_CNTL 0x00000534 - -#define REG_A7XX_RBBM_SNAPSHOT_STATUS 0x00000535 - -#define REG_A6XX_RBBM_PRIMCTR_0_LO 0x00000540 - -#define REG_A6XX_RBBM_PRIMCTR_0_HI 0x00000541 - -#define REG_A6XX_RBBM_PRIMCTR_1_LO 0x00000542 - -#define REG_A6XX_RBBM_PRIMCTR_1_HI 0x00000543 - -#define REG_A6XX_RBBM_PRIMCTR_2_LO 0x00000544 - -#define REG_A6XX_RBBM_PRIMCTR_2_HI 0x00000545 - -#define REG_A6XX_RBBM_PRIMCTR_3_LO 0x00000546 - -#define REG_A6XX_RBBM_PRIMCTR_3_HI 0x00000547 - -#define REG_A6XX_RBBM_PRIMCTR_4_LO 0x00000548 - -#define REG_A6XX_RBBM_PRIMCTR_4_HI 0x00000549 - -#define REG_A6XX_RBBM_PRIMCTR_5_LO 0x0000054a - -#define REG_A6XX_RBBM_PRIMCTR_5_HI 0x0000054b - -#define REG_A6XX_RBBM_PRIMCTR_6_LO 0x0000054c - -#define REG_A6XX_RBBM_PRIMCTR_6_HI 0x0000054d - -#define REG_A6XX_RBBM_PRIMCTR_7_LO 0x0000054e - -#define REG_A6XX_RBBM_PRIMCTR_7_HI 0x0000054f - -#define REG_A6XX_RBBM_PRIMCTR_8_LO 0x00000550 - -#define REG_A6XX_RBBM_PRIMCTR_8_HI 0x00000551 - -#define REG_A6XX_RBBM_PRIMCTR_9_LO 0x00000552 - -#define REG_A6XX_RBBM_PRIMCTR_9_HI 0x00000553 - -#define REG_A6XX_RBBM_PRIMCTR_10_LO 0x00000554 - -#define REG_A6XX_RBBM_PRIMCTR_10_HI 0x00000555 - -#define REG_A6XX_RBBM_SECVID_TRUST_CNTL 0x0000f400 - -#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE 0x0000f800 - -#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802 - -#define REG_A6XX_RBBM_SECVID_TSB_CNTL 0x0000f803 - -#define REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810 - -#define REG_A7XX_RBBM_SECVID_TSB_STATUS 0x0000fc00 - -#define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010 - -#define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011 - -#define REG_A6XX_RBBM_GBIF_HALT 0x00000016 - -#define REG_A6XX_RBBM_GBIF_HALT_ACK 0x00000017 - -#define REG_A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD 0x0000001c -#define A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD_WAIT_GPU_IDLE 0x00000001 - -#define REG_A7XX_RBBM_GBIF_HALT 0x00000016 - -#define REG_A7XX_RBBM_GBIF_HALT_ACK 0x00000017 - -#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f - -#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037 -#define REG_A6XX_RBBM_INT_0_MASK 0x00000038 -#define REG_A7XX_RBBM_INT_2_MASK 0x0000003a - -#define REG_A6XX_RBBM_SP_HYST_CNT 0x00000042 - -#define REG_A6XX_RBBM_SW_RESET_CMD 0x00000043 - -#define REG_A6XX_RBBM_RAC_THRESHOLD_CNT 0x00000044 - -#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045 - -#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046 - -#define REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL 0x000000ad - -#define REG_A6XX_RBBM_CLOCK_CNTL 0x000000ae - -#define REG_A6XX_RBBM_CLOCK_CNTL_SP0 0x000000b0 - -#define REG_A6XX_RBBM_CLOCK_CNTL_SP1 0x000000b1 - -#define REG_A6XX_RBBM_CLOCK_CNTL_SP2 0x000000b2 - -#define REG_A6XX_RBBM_CLOCK_CNTL_SP3 0x000000b3 - -#define REG_A6XX_RBBM_CLOCK_CNTL2_SP0 0x000000b4 - -#define REG_A6XX_RBBM_CLOCK_CNTL2_SP1 0x000000b5 - -#define REG_A6XX_RBBM_CLOCK_CNTL2_SP2 0x000000b6 - -#define REG_A6XX_RBBM_CLOCK_CNTL2_SP3 0x000000b7 - -#define REG_A6XX_RBBM_CLOCK_DELAY_SP0 0x000000b8 - -#define REG_A6XX_RBBM_CLOCK_DELAY_SP1 0x000000b9 - -#define REG_A6XX_RBBM_CLOCK_DELAY_SP2 0x000000ba - -#define REG_A6XX_RBBM_CLOCK_DELAY_SP3 0x000000bb - -#define REG_A6XX_RBBM_CLOCK_HYST_SP0 0x000000bc - -#define REG_A6XX_RBBM_CLOCK_HYST_SP1 0x000000bd - -#define REG_A6XX_RBBM_CLOCK_HYST_SP2 0x000000be - -#define REG_A6XX_RBBM_CLOCK_HYST_SP3 0x000000bf - -#define REG_A6XX_RBBM_CLOCK_CNTL_TP0 0x000000c0 - -#define REG_A6XX_RBBM_CLOCK_CNTL_TP1 0x000000c1 - -#define REG_A6XX_RBBM_CLOCK_CNTL_TP2 0x000000c2 - -#define REG_A6XX_RBBM_CLOCK_CNTL_TP3 0x000000c3 - -#define REG_A6XX_RBBM_CLOCK_CNTL2_TP0 0x000000c4 - -#define REG_A6XX_RBBM_CLOCK_CNTL2_TP1 0x000000c5 - -#define REG_A6XX_RBBM_CLOCK_CNTL2_TP2 0x000000c6 - -#define REG_A6XX_RBBM_CLOCK_CNTL2_TP3 0x000000c7 - -#define REG_A6XX_RBBM_CLOCK_CNTL3_TP0 0x000000c8 - -#define REG_A6XX_RBBM_CLOCK_CNTL3_TP1 0x000000c9 - -#define REG_A6XX_RBBM_CLOCK_CNTL3_TP2 0x000000ca - -#define REG_A6XX_RBBM_CLOCK_CNTL3_TP3 0x000000cb - -#define REG_A6XX_RBBM_CLOCK_CNTL4_TP0 0x000000cc - -#define REG_A6XX_RBBM_CLOCK_CNTL4_TP1 0x000000cd - -#define REG_A6XX_RBBM_CLOCK_CNTL4_TP2 0x000000ce - -#define REG_A6XX_RBBM_CLOCK_CNTL4_TP3 0x000000cf - -#define REG_A6XX_RBBM_CLOCK_DELAY_TP0 0x000000d0 - -#define REG_A6XX_RBBM_CLOCK_DELAY_TP1 0x000000d1 - -#define REG_A6XX_RBBM_CLOCK_DELAY_TP2 0x000000d2 - -#define REG_A6XX_RBBM_CLOCK_DELAY_TP3 0x000000d3 - -#define REG_A6XX_RBBM_CLOCK_DELAY2_TP0 0x000000d4 - -#define REG_A6XX_RBBM_CLOCK_DELAY2_TP1 0x000000d5 - -#define REG_A6XX_RBBM_CLOCK_DELAY2_TP2 0x000000d6 - -#define REG_A6XX_RBBM_CLOCK_DELAY2_TP3 0x000000d7 - -#define REG_A6XX_RBBM_CLOCK_DELAY3_TP0 0x000000d8 - -#define REG_A6XX_RBBM_CLOCK_DELAY3_TP1 0x000000d9 - -#define REG_A6XX_RBBM_CLOCK_DELAY3_TP2 0x000000da - -#define REG_A6XX_RBBM_CLOCK_DELAY3_TP3 0x000000db - -#define REG_A6XX_RBBM_CLOCK_DELAY4_TP0 0x000000dc - -#define REG_A6XX_RBBM_CLOCK_DELAY4_TP1 0x000000dd - -#define REG_A6XX_RBBM_CLOCK_DELAY4_TP2 0x000000de - -#define REG_A6XX_RBBM_CLOCK_DELAY4_TP3 0x000000df - -#define REG_A6XX_RBBM_CLOCK_HYST_TP0 0x000000e0 - -#define REG_A6XX_RBBM_CLOCK_HYST_TP1 0x000000e1 - -#define REG_A6XX_RBBM_CLOCK_HYST_TP2 0x000000e2 - -#define REG_A6XX_RBBM_CLOCK_HYST_TP3 0x000000e3 - -#define REG_A6XX_RBBM_CLOCK_HYST2_TP0 0x000000e4 - -#define REG_A6XX_RBBM_CLOCK_HYST2_TP1 0x000000e5 - -#define REG_A6XX_RBBM_CLOCK_HYST2_TP2 0x000000e6 - -#define REG_A6XX_RBBM_CLOCK_HYST2_TP3 0x000000e7 - -#define REG_A6XX_RBBM_CLOCK_HYST3_TP0 0x000000e8 - -#define REG_A6XX_RBBM_CLOCK_HYST3_TP1 0x000000e9 - -#define REG_A6XX_RBBM_CLOCK_HYST3_TP2 0x000000ea - -#define REG_A6XX_RBBM_CLOCK_HYST3_TP3 0x000000eb - -#define REG_A6XX_RBBM_CLOCK_HYST4_TP0 0x000000ec - -#define REG_A6XX_RBBM_CLOCK_HYST4_TP1 0x000000ed - -#define REG_A6XX_RBBM_CLOCK_HYST4_TP2 0x000000ee - -#define REG_A6XX_RBBM_CLOCK_HYST4_TP3 0x000000ef - -#define REG_A6XX_RBBM_CLOCK_CNTL_RB0 0x000000f0 - -#define REG_A6XX_RBBM_CLOCK_CNTL_RB1 0x000000f1 - -#define REG_A6XX_RBBM_CLOCK_CNTL_RB2 0x000000f2 - -#define REG_A6XX_RBBM_CLOCK_CNTL_RB3 0x000000f3 - -#define REG_A6XX_RBBM_CLOCK_CNTL2_RB0 0x000000f4 - -#define REG_A6XX_RBBM_CLOCK_CNTL2_RB1 0x000000f5 - -#define REG_A6XX_RBBM_CLOCK_CNTL2_RB2 0x000000f6 - -#define REG_A6XX_RBBM_CLOCK_CNTL2_RB3 0x000000f7 - -#define REG_A6XX_RBBM_CLOCK_CNTL_CCU0 0x000000f8 - -#define REG_A6XX_RBBM_CLOCK_CNTL_CCU1 0x000000f9 - -#define REG_A6XX_RBBM_CLOCK_CNTL_CCU2 0x000000fa - -#define REG_A6XX_RBBM_CLOCK_CNTL_CCU3 0x000000fb - -#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000100 - -#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000101 - -#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000102 - -#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000103 - -#define REG_A6XX_RBBM_CLOCK_CNTL_RAC 0x00000104 - -#define REG_A6XX_RBBM_CLOCK_CNTL2_RAC 0x00000105 - -#define REG_A6XX_RBBM_CLOCK_DELAY_RAC 0x00000106 - -#define REG_A6XX_RBBM_CLOCK_HYST_RAC 0x00000107 - -#define REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000108 - -#define REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000109 - -#define REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000010a - -#define REG_A6XX_RBBM_CLOCK_CNTL_UCHE 0x0000010b - -#define REG_A6XX_RBBM_CLOCK_CNTL2_UCHE 0x0000010c - -#define REG_A6XX_RBBM_CLOCK_CNTL3_UCHE 0x0000010d - -#define REG_A6XX_RBBM_CLOCK_CNTL4_UCHE 0x0000010e - -#define REG_A6XX_RBBM_CLOCK_DELAY_UCHE 0x0000010f - -#define REG_A6XX_RBBM_CLOCK_HYST_UCHE 0x00000110 - -#define REG_A6XX_RBBM_CLOCK_MODE_VFD 0x00000111 - -#define REG_A6XX_RBBM_CLOCK_DELAY_VFD 0x00000112 - -#define REG_A6XX_RBBM_CLOCK_HYST_VFD 0x00000113 - -#define REG_A6XX_RBBM_CLOCK_MODE_GPC 0x00000114 - -#define REG_A6XX_RBBM_CLOCK_DELAY_GPC 0x00000115 - -#define REG_A6XX_RBBM_CLOCK_HYST_GPC 0x00000116 - -#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2 0x00000117 - -#define REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX 0x00000118 - -#define REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX 0x00000119 - -#define REG_A6XX_RBBM_CLOCK_HYST_GMU_GX 0x0000011a - -#define REG_A6XX_RBBM_CLOCK_MODE_HLSQ 0x0000011b - -#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ 0x0000011c - -#define REG_A6XX_RBBM_CLOCK_HYST_HLSQ 0x0000011d - -#define REG_A7XX_RBBM_CGC_GLOBAL_LOAD_CMD 0x0000011e - -#define REG_A7XX_RBBM_CGC_P2S_TRIG_CMD 0x0000011f - -#define REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE 0x00000120 - -#define REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE 0x00000121 - -#define REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE 0x00000122 - -#define REG_A7XX_RBBM_CGC_P2S_STATUS 0x00000122 -#define A7XX_RBBM_CGC_P2S_STATUS_TXDONE 0x00000001 - -#define REG_A6XX_RBBM_CLOCK_CNTL_FCHE 0x00000123 - -#define REG_A6XX_RBBM_CLOCK_DELAY_FCHE 0x00000124 - -#define REG_A6XX_RBBM_CLOCK_HYST_FCHE 0x00000125 - -#define REG_A6XX_RBBM_CLOCK_CNTL_MHUB 0x00000126 - -#define REG_A6XX_RBBM_CLOCK_DELAY_MHUB 0x00000127 - -#define REG_A6XX_RBBM_CLOCK_HYST_MHUB 0x00000128 - -#define REG_A6XX_RBBM_CLOCK_DELAY_GLC 0x00000129 - -#define REG_A6XX_RBBM_CLOCK_HYST_GLC 0x0000012a - -#define REG_A6XX_RBBM_CLOCK_CNTL_GLC 0x0000012b - -#define REG_A7XX_RBBM_CLOCK_HYST2_VFD 0x0000012f - -#define REG_A6XX_RBBM_LPAC_GBIF_CLIENT_QOS_CNTL 0x000005ff - -#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_A 0x00000600 - -#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_B 0x00000601 - -#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_C 0x00000602 - -#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_D 0x00000603 -#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff -#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00 -#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK; -} - -#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLT 0x00000604 -#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f -#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000 -#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000 -#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK; -} - -#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLM 0x00000605 -#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000 -#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK; -} - -#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0 0x00000608 - -#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1 0x00000609 - -#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2 0x0000060a - -#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3 0x0000060b - -#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0 0x0000060c - -#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1 0x0000060d - -#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2 0x0000060e - -#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3 0x0000060f - -#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000610 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK; -} - -#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000611 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK; -} -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000 -#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28 -static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val) -{ - return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK; -} - -#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000062f - -#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000630 - -#define REG_A6XX_VSC_PERFCTR_VSC_SEL(i0) (0x00000cd8 + 0x1*(i0)) - -#define REG_A7XX_VSC_UNKNOWN_0CD8 0x00000cd8 -#define A7XX_VSC_UNKNOWN_0CD8_BINNING 0x00000001 - -#define REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000c800 - -#define REG_A6XX_HLSQ_DBG_READ_SEL 0x0000d000 - -#define REG_A6XX_UCHE_ADDR_MODE_CNTL 0x00000e00 - -#define REG_A6XX_UCHE_MODE_CNTL 0x00000e01 - -#define REG_A6XX_UCHE_WRITE_RANGE_MAX 0x00000e05 - -#define REG_A6XX_UCHE_WRITE_THRU_BASE 0x00000e07 - -#define REG_A6XX_UCHE_TRAP_BASE 0x00000e09 - -#define REG_A6XX_UCHE_GMEM_RANGE_MIN 0x00000e0b - -#define REG_A6XX_UCHE_GMEM_RANGE_MAX 0x00000e0d - -#define REG_A6XX_UCHE_CACHE_WAYS 0x00000e17 - -#define REG_A6XX_UCHE_FILTER_CNTL 0x00000e18 - -#define REG_A6XX_UCHE_CLIENT_PF 0x00000e19 -#define A6XX_UCHE_CLIENT_PF_PERFSEL__MASK 0x000000ff -#define A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT 0 -static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val) -{ - return ((val) << A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT) & A6XX_UCHE_CLIENT_PF_PERFSEL__MASK; -} - -#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL(i0) (0x00000e1c + 0x1*(i0)) - -#define REG_A6XX_UCHE_GBIF_GX_CONFIG 0x00000e3a - -#define REG_A6XX_UCHE_CMDQ_CONFIG 0x00000e3c - -#define REG_A6XX_VBIF_VERSION 0x00003000 - -#define REG_A6XX_VBIF_CLKON 0x00003001 -#define A6XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000002 - -#define REG_A6XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a - -#define REG_A6XX_VBIF_XIN_HALT_CTRL0 0x00003080 - -#define REG_A6XX_VBIF_XIN_HALT_CTRL1 0x00003081 - -#define REG_A6XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084 - -#define REG_A6XX_VBIF_TEST_BUS1_CTRL0 0x00003085 - -#define REG_A6XX_VBIF_TEST_BUS1_CTRL1 0x00003086 -#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK 0x0000000f -#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT 0 -static inline uint32_t A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL(uint32_t val) -{ - return ((val) << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK; -} - -#define REG_A6XX_VBIF_TEST_BUS2_CTRL0 0x00003087 - -#define REG_A6XX_VBIF_TEST_BUS2_CTRL1 0x00003088 -#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK 0x000001ff -#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT 0 -static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val) -{ - return ((val) << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK; -} - -#define REG_A6XX_VBIF_TEST_BUS_OUT 0x0000308c - -#define REG_A6XX_VBIF_PERF_CNT_SEL0 0x000030d0 - -#define REG_A6XX_VBIF_PERF_CNT_SEL1 0x000030d1 - -#define REG_A6XX_VBIF_PERF_CNT_SEL2 0x000030d2 - -#define REG_A6XX_VBIF_PERF_CNT_SEL3 0x000030d3 - -#define REG_A6XX_VBIF_PERF_CNT_LOW0 0x000030d8 - -#define REG_A6XX_VBIF_PERF_CNT_LOW1 0x000030d9 - -#define REG_A6XX_VBIF_PERF_CNT_LOW2 0x000030da - -#define REG_A6XX_VBIF_PERF_CNT_LOW3 0x000030db - -#define REG_A6XX_VBIF_PERF_CNT_HIGH0 0x000030e0 - -#define REG_A6XX_VBIF_PERF_CNT_HIGH1 0x000030e1 - -#define REG_A6XX_VBIF_PERF_CNT_HIGH2 0x000030e2 - -#define REG_A6XX_VBIF_PERF_CNT_HIGH3 0x000030e3 - -#define REG_A6XX_VBIF_PERF_PWR_CNT_EN0 0x00003100 - -#define REG_A6XX_VBIF_PERF_PWR_CNT_EN1 0x00003101 - -#define REG_A6XX_VBIF_PERF_PWR_CNT_EN2 0x00003102 - -#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110 - -#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111 - -#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112 - -#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118 - -#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119 - -#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a - -#define REG_A6XX_GBIF_SCACHE_CNTL0 0x00003c01 - -#define REG_A6XX_GBIF_SCACHE_CNTL1 0x00003c02 - -#define REG_A6XX_GBIF_QSB_SIDE0 0x00003c03 - -#define REG_A6XX_GBIF_QSB_SIDE1 0x00003c04 - -#define REG_A6XX_GBIF_QSB_SIDE2 0x00003c05 - -#define REG_A6XX_GBIF_QSB_SIDE3 0x00003c06 - -#define REG_A6XX_GBIF_HALT 0x00003c45 - -#define REG_A6XX_GBIF_HALT_ACK 0x00003c46 - -#define REG_A6XX_GBIF_PERF_PWR_CNT_EN 0x00003cc0 - -#define REG_A6XX_GBIF_PERF_PWR_CNT_CLR 0x00003cc1 - -#define REG_A6XX_GBIF_PERF_CNT_SEL 0x00003cc2 - -#define REG_A6XX_GBIF_PERF_PWR_CNT_SEL 0x00003cc3 - -#define REG_A6XX_GBIF_PERF_CNT_LOW0 0x00003cc4 - -#define REG_A6XX_GBIF_PERF_CNT_LOW1 0x00003cc5 - -#define REG_A6XX_GBIF_PERF_CNT_LOW2 0x00003cc6 - -#define REG_A6XX_GBIF_PERF_CNT_LOW3 0x00003cc7 - -#define REG_A6XX_GBIF_PERF_CNT_HIGH0 0x00003cc8 - -#define REG_A6XX_GBIF_PERF_CNT_HIGH1 0x00003cc9 - -#define REG_A6XX_GBIF_PERF_CNT_HIGH2 0x00003cca - -#define REG_A6XX_GBIF_PERF_CNT_HIGH3 0x00003ccb - -#define REG_A6XX_GBIF_PWR_CNT_LOW0 0x00003ccc - -#define REG_A6XX_GBIF_PWR_CNT_LOW1 0x00003ccd - -#define REG_A6XX_GBIF_PWR_CNT_LOW2 0x00003cce - -#define REG_A6XX_GBIF_PWR_CNT_HIGH0 0x00003ccf - -#define REG_A6XX_GBIF_PWR_CNT_HIGH1 0x00003cd0 - -#define REG_A6XX_GBIF_PWR_CNT_HIGH2 0x00003cd1 - -#define REG_A6XX_VSC_DBG_ECO_CNTL 0x00000c00 - -#define REG_A6XX_VSC_BIN_SIZE 0x00000c02 -#define A6XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff -#define A6XX_VSC_BIN_SIZE_WIDTH__SHIFT 0 -static inline uint32_t A6XX_VSC_BIN_SIZE_WIDTH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A6XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A6XX_VSC_BIN_SIZE_WIDTH__MASK; -} -#define A6XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001ff00 -#define A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT 8 -static inline uint32_t A6XX_VSC_BIN_SIZE_HEIGHT(uint32_t val) -{ - assert(!(val & 0xf)); - return (((val >> 4)) << A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A6XX_VSC_BIN_SIZE_HEIGHT__MASK; -} - -#define REG_A6XX_VSC_DRAW_STRM_SIZE_ADDRESS 0x00000c03 - -#define REG_A6XX_VSC_BIN_COUNT 0x00000c06 -#define A6XX_VSC_BIN_COUNT_NX__MASK 0x000007fe -#define A6XX_VSC_BIN_COUNT_NX__SHIFT 1 -static inline uint32_t A6XX_VSC_BIN_COUNT_NX(uint32_t val) -{ - return ((val) << A6XX_VSC_BIN_COUNT_NX__SHIFT) & A6XX_VSC_BIN_COUNT_NX__MASK; -} -#define A6XX_VSC_BIN_COUNT_NY__MASK 0x001ff800 -#define A6XX_VSC_BIN_COUNT_NY__SHIFT 11 -static inline uint32_t A6XX_VSC_BIN_COUNT_NY(uint32_t val) -{ - return ((val) << A6XX_VSC_BIN_COUNT_NY__SHIFT) & A6XX_VSC_BIN_COUNT_NY__MASK; -} - -#define REG_A6XX_VSC_PIPE_CONFIG(i0) (0x00000c10 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; } -#define A6XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff -#define A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0 -static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_X(uint32_t val) -{ - return ((val) << A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_X__MASK; -} -#define A6XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00 -#define A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10 -static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val) -{ - return ((val) << A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_Y__MASK; -} -#define A6XX_VSC_PIPE_CONFIG_REG_W__MASK 0x03f00000 -#define A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20 -static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_W(uint32_t val) -{ - return ((val) << A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_W__MASK; -} -#define A6XX_VSC_PIPE_CONFIG_REG_H__MASK 0xfc000000 -#define A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT 26 -static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val) -{ - return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK; -} - -#define REG_A6XX_VSC_PRIM_STRM_ADDRESS 0x00000c30 - -#define REG_A6XX_VSC_PRIM_STRM_PITCH 0x00000c32 - -#define REG_A6XX_VSC_PRIM_STRM_LIMIT 0x00000c33 - -#define REG_A6XX_VSC_DRAW_STRM_ADDRESS 0x00000c34 - -#define REG_A6XX_VSC_DRAW_STRM_PITCH 0x00000c36 - -#define REG_A6XX_VSC_DRAW_STRM_LIMIT 0x00000c37 - -#define REG_A6XX_VSC_STATE(i0) (0x00000c38 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_VSC_STATE_REG(uint32_t i0) { return 0x00000c38 + 0x1*i0; } - -#define REG_A6XX_VSC_PRIM_STRM_SIZE(i0) (0x00000c58 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_VSC_PRIM_STRM_SIZE_REG(uint32_t i0) { return 0x00000c58 + 0x1*i0; } - -#define REG_A6XX_VSC_DRAW_STRM_SIZE(i0) (0x00000c78 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_VSC_DRAW_STRM_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; } - -#define REG_A7XX_UCHE_UNKNOWN_0E10 0x00000e10 - -#define REG_A7XX_UCHE_UNKNOWN_0E11 0x00000e11 - -#define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12 - -#define REG_A6XX_GRAS_CL_CNTL 0x00008000 -#define A6XX_GRAS_CL_CNTL_CLIP_DISABLE 0x00000001 -#define A6XX_GRAS_CL_CNTL_ZNEAR_CLIP_DISABLE 0x00000002 -#define A6XX_GRAS_CL_CNTL_ZFAR_CLIP_DISABLE 0x00000004 -#define A6XX_GRAS_CL_CNTL_Z_CLAMP_ENABLE 0x00000020 -#define A6XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040 -#define A6XX_GRAS_CL_CNTL_VP_CLIP_CODE_IGNORE 0x00000080 -#define A6XX_GRAS_CL_CNTL_VP_XFORM_DISABLE 0x00000100 -#define A6XX_GRAS_CL_CNTL_PERSP_DIVISION_DISABLE 0x00000200 - -#define REG_A6XX_GRAS_VS_CL_CNTL 0x00008001 -#define A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK 0x000000ff -#define A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT 0 -static inline uint32_t A6XX_GRAS_VS_CL_CNTL_CLIP_MASK(uint32_t val) -{ - return ((val) << A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK; -} -#define A6XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK 0x0000ff00 -#define A6XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT 8 -static inline uint32_t A6XX_GRAS_VS_CL_CNTL_CULL_MASK(uint32_t val) -{ - return ((val) << A6XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK; -} - -#define REG_A6XX_GRAS_DS_CL_CNTL 0x00008002 -#define A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__MASK 0x000000ff -#define A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__SHIFT 0 -static inline uint32_t A6XX_GRAS_DS_CL_CNTL_CLIP_MASK(uint32_t val) -{ - return ((val) << A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__MASK; -} -#define A6XX_GRAS_DS_CL_CNTL_CULL_MASK__MASK 0x0000ff00 -#define A6XX_GRAS_DS_CL_CNTL_CULL_MASK__SHIFT 8 -static inline uint32_t A6XX_GRAS_DS_CL_CNTL_CULL_MASK(uint32_t val) -{ - return ((val) << A6XX_GRAS_DS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_DS_CL_CNTL_CULL_MASK__MASK; -} - -#define REG_A6XX_GRAS_GS_CL_CNTL 0x00008003 -#define A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__MASK 0x000000ff -#define A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__SHIFT 0 -static inline uint32_t A6XX_GRAS_GS_CL_CNTL_CLIP_MASK(uint32_t val) -{ - return ((val) << A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__MASK; -} -#define A6XX_GRAS_GS_CL_CNTL_CULL_MASK__MASK 0x0000ff00 -#define A6XX_GRAS_GS_CL_CNTL_CULL_MASK__SHIFT 8 -static inline uint32_t A6XX_GRAS_GS_CL_CNTL_CULL_MASK(uint32_t val) -{ - return ((val) << A6XX_GRAS_GS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_GS_CL_CNTL_CULL_MASK__MASK; -} - -#define REG_A6XX_GRAS_MAX_LAYER_INDEX 0x00008004 - -#define REG_A6XX_GRAS_CNTL 0x00008005 -#define A6XX_GRAS_CNTL_IJ_PERSP_PIXEL 0x00000001 -#define A6XX_GRAS_CNTL_IJ_PERSP_CENTROID 0x00000002 -#define A6XX_GRAS_CNTL_IJ_PERSP_SAMPLE 0x00000004 -#define A6XX_GRAS_CNTL_IJ_LINEAR_PIXEL 0x00000008 -#define A6XX_GRAS_CNTL_IJ_LINEAR_CENTROID 0x00000010 -#define A6XX_GRAS_CNTL_IJ_LINEAR_SAMPLE 0x00000020 -#define A6XX_GRAS_CNTL_COORD_MASK__MASK 0x000003c0 -#define A6XX_GRAS_CNTL_COORD_MASK__SHIFT 6 -static inline uint32_t A6XX_GRAS_CNTL_COORD_MASK(uint32_t val) -{ - return ((val) << A6XX_GRAS_CNTL_COORD_MASK__SHIFT) & A6XX_GRAS_CNTL_COORD_MASK__MASK; -} -#define A6XX_GRAS_CNTL_UNK10 0x00000400 -#define A6XX_GRAS_CNTL_UNK11 0x00000800 - -#define REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x00008006 -#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000001ff -#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0 -static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val) -{ - return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK; -} -#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x0007fc00 -#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10 -static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val) -{ - return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK; -} - -#define REG_A7XX_GRAS_UNKNOWN_8007 0x00008007 - -#define REG_A7XX_GRAS_UNKNOWN_8008 0x00008008 - -#define REG_A7XX_GRAS_UNKNOWN_8009 0x00008009 - -#define REG_A7XX_GRAS_UNKNOWN_800A 0x0000800a - -#define REG_A7XX_GRAS_UNKNOWN_800B 0x0000800b - -#define REG_A7XX_GRAS_UNKNOWN_800C 0x0000800c - -#define REG_A6XX_GRAS_CL_VPORT(i0) (0x00008010 + 0x6*(i0)) - -static inline uint32_t REG_A6XX_GRAS_CL_VPORT_XOFFSET(uint32_t i0) { return 0x00008010 + 0x6*i0; } -#define A6XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff -#define A6XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0 -static inline uint32_t A6XX_GRAS_CL_VPORT_XOFFSET(float val) -{ - return ((fui(val)) << A6XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_XOFFSET__MASK; -} - -static inline uint32_t REG_A6XX_GRAS_CL_VPORT_XSCALE(uint32_t i0) { return 0x00008011 + 0x6*i0; } -#define A6XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff -#define A6XX_GRAS_CL_VPORT_XSCALE__SHIFT 0 -static inline uint32_t A6XX_GRAS_CL_VPORT_XSCALE(float val) -{ - return ((fui(val)) << A6XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_XSCALE__MASK; -} - -static inline uint32_t REG_A6XX_GRAS_CL_VPORT_YOFFSET(uint32_t i0) { return 0x00008012 + 0x6*i0; } -#define A6XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff -#define A6XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0 -static inline uint32_t A6XX_GRAS_CL_VPORT_YOFFSET(float val) -{ - return ((fui(val)) << A6XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_YOFFSET__MASK; -} - -static inline uint32_t REG_A6XX_GRAS_CL_VPORT_YSCALE(uint32_t i0) { return 0x00008013 + 0x6*i0; } -#define A6XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff -#define A6XX_GRAS_CL_VPORT_YSCALE__SHIFT 0 -static inline uint32_t A6XX_GRAS_CL_VPORT_YSCALE(float val) -{ - return ((fui(val)) << A6XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_YSCALE__MASK; -} - -static inline uint32_t REG_A6XX_GRAS_CL_VPORT_ZOFFSET(uint32_t i0) { return 0x00008014 + 0x6*i0; } -#define A6XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff -#define A6XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0 -static inline uint32_t A6XX_GRAS_CL_VPORT_ZOFFSET(float val) -{ - return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_ZOFFSET__MASK; -} - -static inline uint32_t REG_A6XX_GRAS_CL_VPORT_ZSCALE(uint32_t i0) { return 0x00008015 + 0x6*i0; } -#define A6XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff -#define A6XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0 -static inline uint32_t A6XX_GRAS_CL_VPORT_ZSCALE(float val) -{ - return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_ZSCALE__MASK; -} - -#define REG_A6XX_GRAS_CL_Z_CLAMP(i0) (0x00008070 + 0x2*(i0)) - -static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP_MIN(uint32_t i0) { return 0x00008070 + 0x2*i0; } -#define A6XX_GRAS_CL_Z_CLAMP_MIN__MASK 0xffffffff -#define A6XX_GRAS_CL_Z_CLAMP_MIN__SHIFT 0 -static inline uint32_t A6XX_GRAS_CL_Z_CLAMP_MIN(float val) -{ - return ((fui(val)) << A6XX_GRAS_CL_Z_CLAMP_MIN__SHIFT) & A6XX_GRAS_CL_Z_CLAMP_MIN__MASK; -} - -static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP_MAX(uint32_t i0) { return 0x00008071 + 0x2*i0; } -#define A6XX_GRAS_CL_Z_CLAMP_MAX__MASK 0xffffffff -#define A6XX_GRAS_CL_Z_CLAMP_MAX__SHIFT 0 -static inline uint32_t A6XX_GRAS_CL_Z_CLAMP_MAX(float val) -{ - return ((fui(val)) << A6XX_GRAS_CL_Z_CLAMP_MAX__SHIFT) & A6XX_GRAS_CL_Z_CLAMP_MAX__MASK; -} - -#define REG_A6XX_GRAS_SU_CNTL 0x00008090 -#define A6XX_GRAS_SU_CNTL_CULL_FRONT 0x00000001 -#define A6XX_GRAS_SU_CNTL_CULL_BACK 0x00000002 -#define A6XX_GRAS_SU_CNTL_FRONT_CW 0x00000004 -#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8 -#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3 -static inline uint32_t A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val) -{ - return ((((int32_t)(val * 4.0))) << A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK; -} -#define A6XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800 -#define A6XX_GRAS_SU_CNTL_UNK12 0x00001000 -#define A6XX_GRAS_SU_CNTL_LINE_MODE__MASK 0x00002000 -#define A6XX_GRAS_SU_CNTL_LINE_MODE__SHIFT 13 -static inline uint32_t A6XX_GRAS_SU_CNTL_LINE_MODE(enum a5xx_line_mode val) -{ - return ((val) << A6XX_GRAS_SU_CNTL_LINE_MODE__SHIFT) & A6XX_GRAS_SU_CNTL_LINE_MODE__MASK; -} -#define A6XX_GRAS_SU_CNTL_UNK15__MASK 0x00018000 -#define A6XX_GRAS_SU_CNTL_UNK15__SHIFT 15 -static inline uint32_t A6XX_GRAS_SU_CNTL_UNK15(uint32_t val) -{ - return ((val) << A6XX_GRAS_SU_CNTL_UNK15__SHIFT) & A6XX_GRAS_SU_CNTL_UNK15__MASK; -} -#define A6XX_GRAS_SU_CNTL_MULTIVIEW_ENABLE 0x00020000 -#define A6XX_GRAS_SU_CNTL_RENDERTARGETINDEXINCR 0x00040000 -#define A6XX_GRAS_SU_CNTL_VIEWPORTINDEXINCR 0x00080000 -#define A6XX_GRAS_SU_CNTL_UNK20__MASK 0x00700000 -#define A6XX_GRAS_SU_CNTL_UNK20__SHIFT 20 -static inline uint32_t A6XX_GRAS_SU_CNTL_UNK20(uint32_t val) -{ - return ((val) << A6XX_GRAS_SU_CNTL_UNK20__SHIFT) & A6XX_GRAS_SU_CNTL_UNK20__MASK; -} - -#define REG_A6XX_GRAS_SU_POINT_MINMAX 0x00008091 -#define A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff -#define A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0 -static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MIN(float val) -{ - return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK; -} -#define A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000 -#define A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16 -static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MAX(float val) -{ - return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK; -} - -#define REG_A6XX_GRAS_SU_POINT_SIZE 0x00008092 -#define A6XX_GRAS_SU_POINT_SIZE__MASK 0x0000ffff -#define A6XX_GRAS_SU_POINT_SIZE__SHIFT 0 -static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_SIZE__SHIFT) & A6XX_GRAS_SU_POINT_SIZE__MASK; -} - -#define REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL 0x00008094 -#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__MASK 0x00000003 -#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__SHIFT 0 -static inline uint32_t A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE(enum a6xx_ztest_mode val) -{ - return ((val) << A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__SHIFT) & A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__MASK; -} - -#define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE 0x00008095 -#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff -#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0 -static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_SCALE(float val) -{ - return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK; -} - -#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00008096 -#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff -#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0 -static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) -{ - return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; -} - -#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x00008097 -#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff -#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0 -static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val) -{ - return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK; -} - -#define REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO 0x00008098 -#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007 -#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0 -static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val) -{ - return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK; -} -#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3 0x00000008 - -#define REG_A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x00008099 -#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_CONSERVATIVERASEN 0x00000001 -#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__MASK 0x00000006 -#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__SHIFT 1 -static inline uint32_t A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT(uint32_t val) -{ - return ((val) << A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__SHIFT) & A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__MASK; -} -#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_INNERCONSERVATIVERASEN 0x00000008 -#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__MASK 0x00000030 -#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__SHIFT 4 -static inline uint32_t A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4(uint32_t val) -{ - return ((val) << A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__SHIFT) & A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__MASK; -} - -#define REG_A6XX_GRAS_SU_PATH_RENDERING_CNTL 0x0000809a -#define A6XX_GRAS_SU_PATH_RENDERING_CNTL_UNK0 0x00000001 -#define A6XX_GRAS_SU_PATH_RENDERING_CNTL_LINELENGTHEN 0x00000002 - -#define REG_A6XX_GRAS_VS_LAYER_CNTL 0x0000809b -#define A6XX_GRAS_VS_LAYER_CNTL_WRITES_LAYER 0x00000001 -#define A6XX_GRAS_VS_LAYER_CNTL_WRITES_VIEW 0x00000002 - -#define REG_A6XX_GRAS_GS_LAYER_CNTL 0x0000809c -#define A6XX_GRAS_GS_LAYER_CNTL_WRITES_LAYER 0x00000001 -#define A6XX_GRAS_GS_LAYER_CNTL_WRITES_VIEW 0x00000002 - -#define REG_A6XX_GRAS_DS_LAYER_CNTL 0x0000809d -#define A6XX_GRAS_DS_LAYER_CNTL_WRITES_LAYER 0x00000001 -#define A6XX_GRAS_DS_LAYER_CNTL_WRITES_VIEW 0x00000002 - -#define REG_A6XX_GRAS_SC_CNTL 0x000080a0 -#define A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__MASK 0x00000007 -#define A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__SHIFT 0 -static inline uint32_t A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE(uint32_t val) -{ - return ((val) << A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__SHIFT) & A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__MASK; -} -#define A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__MASK 0x00000018 -#define A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__SHIFT 3 -static inline uint32_t A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE(enum a6xx_single_prim_mode val) -{ - return ((val) << A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__SHIFT) & A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__MASK; -} -#define A6XX_GRAS_SC_CNTL_RASTER_MODE__MASK 0x00000020 -#define A6XX_GRAS_SC_CNTL_RASTER_MODE__SHIFT 5 -static inline uint32_t A6XX_GRAS_SC_CNTL_RASTER_MODE(enum a6xx_raster_mode val) -{ - return ((val) << A6XX_GRAS_SC_CNTL_RASTER_MODE__SHIFT) & A6XX_GRAS_SC_CNTL_RASTER_MODE__MASK; -} -#define A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__MASK 0x000000c0 -#define A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__SHIFT 6 -static inline uint32_t A6XX_GRAS_SC_CNTL_RASTER_DIRECTION(enum a6xx_raster_direction val) -{ - return ((val) << A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__SHIFT) & A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__MASK; -} -#define A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__MASK 0x00000100 -#define A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__SHIFT 8 -static inline uint32_t A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION(enum a6xx_sequenced_thread_dist val) -{ - return ((val) << A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__SHIFT) & A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__MASK; -} -#define A6XX_GRAS_SC_CNTL_UNK9 0x00000200 -#define A6XX_GRAS_SC_CNTL_ROTATION__MASK 0x00000c00 -#define A6XX_GRAS_SC_CNTL_ROTATION__SHIFT 10 -static inline uint32_t A6XX_GRAS_SC_CNTL_ROTATION(uint32_t val) -{ - return ((val) << A6XX_GRAS_SC_CNTL_ROTATION__SHIFT) & A6XX_GRAS_SC_CNTL_ROTATION__MASK; -} -#define A6XX_GRAS_SC_CNTL_EARLYVIZOUTEN 0x00001000 - -#define REG_A6XX_GRAS_BIN_CONTROL 0x000080a1 -#define A6XX_GRAS_BIN_CONTROL_BINW__MASK 0x0000003f -#define A6XX_GRAS_BIN_CONTROL_BINW__SHIFT 0 -static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINW(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A6XX_GRAS_BIN_CONTROL_BINW__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINW__MASK; -} -#define A6XX_GRAS_BIN_CONTROL_BINH__MASK 0x00007f00 -#define A6XX_GRAS_BIN_CONTROL_BINH__SHIFT 8 -static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINH(uint32_t val) -{ - assert(!(val & 0xf)); - return (((val >> 4)) << A6XX_GRAS_BIN_CONTROL_BINH__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINH__MASK; -} -#define A6XX_GRAS_BIN_CONTROL_RENDER_MODE__MASK 0x001c0000 -#define A6XX_GRAS_BIN_CONTROL_RENDER_MODE__SHIFT 18 -static inline uint32_t A6XX_GRAS_BIN_CONTROL_RENDER_MODE(enum a6xx_render_mode val) -{ - return ((val) << A6XX_GRAS_BIN_CONTROL_RENDER_MODE__SHIFT) & A6XX_GRAS_BIN_CONTROL_RENDER_MODE__MASK; -} -#define A6XX_GRAS_BIN_CONTROL_FORCE_LRZ_WRITE_DIS 0x00200000 -#define A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__MASK 0x00c00000 -#define A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__SHIFT 22 -static inline uint32_t A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION(enum a6xx_buffers_location val) -{ - return ((val) << A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__SHIFT) & A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__MASK; -} -#define A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK 0x07000000 -#define A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT 24 -static inline uint32_t A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(uint32_t val) -{ - return ((val) << A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT) & A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK; -} -#define A6XX_GRAS_BIN_CONTROL_UNK27 0x08000000 - -#define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2 -#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 -#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 -static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK; -} -#define A6XX_GRAS_RAS_MSAA_CNTL_UNK2 0x00000004 -#define A6XX_GRAS_RAS_MSAA_CNTL_UNK3 0x00000008 - -#define REG_A6XX_GRAS_DEST_MSAA_CNTL 0x000080a3 -#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003 -#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT 0 -static inline uint32_t A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK; -} -#define A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 - -#define REG_A6XX_GRAS_SAMPLE_CONFIG 0x000080a4 -#define A6XX_GRAS_SAMPLE_CONFIG_UNK0 0x00000001 -#define A6XX_GRAS_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002 - -#define REG_A6XX_GRAS_SAMPLE_LOCATION_0 0x000080a5 -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK; -} -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0 -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK; -} -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00 -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK; -} -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000 -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK; -} -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000 -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK; -} -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000 -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK; -} -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000 -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK; -} -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000 -#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK; -} - -#define REG_A6XX_GRAS_SAMPLE_LOCATION_1 0x000080a6 -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK; -} -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0 -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK; -} -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00 -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK; -} -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000 -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK; -} -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000 -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK; -} -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000 -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK; -} -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000 -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK; -} -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000 -#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28 -static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK; -} - -#define REG_A7XX_GRAS_UNKNOWN_80A7 0x000080a7 - -#define REG_A6XX_GRAS_UNKNOWN_80AF 0x000080af - -#define REG_A6XX_GRAS_SC_SCREEN_SCISSOR(i0) (0x000080b0 + 0x2*(i0)) - -static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL(uint32_t i0) { return 0x000080b0 + 0x2*i0; } -#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x0000ffff -#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0 -static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val) -{ - return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK; -} -#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0xffff0000 -#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16 -static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val) -{ - return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK; -} - -static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR_BR(uint32_t i0) { return 0x000080b1 + 0x2*i0; } -#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x0000ffff -#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0 -static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val) -{ - return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK; -} -#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0xffff0000 -#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16 -static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val) -{ - return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK; -} - -#define REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR(i0) (0x000080d0 + 0x2*(i0)) - -static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL(uint32_t i0) { return 0x000080d0 + 0x2*i0; } -#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK 0x0000ffff -#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__SHIFT 0 -static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X(uint32_t val) -{ - return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK; -} -#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__MASK 0xffff0000 -#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__SHIFT 16 -static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y(uint32_t val) -{ - return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__MASK; -} - -static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR(uint32_t i0) { return 0x000080d1 + 0x2*i0; } -#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__MASK 0x0000ffff -#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__SHIFT 0 -static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X(uint32_t val) -{ - return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__MASK; -} -#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__MASK 0xffff0000 -#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__SHIFT 16 -static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y(uint32_t val) -{ - return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__MASK; -} - -#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL 0x000080f0 -#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00003fff -#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0 -static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val) -{ - return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK; -} -#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x3fff0000 -#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16 -static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val) -{ - return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK; -} - -#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_BR 0x000080f1 -#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00003fff -#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0 -static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val) -{ - return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK; -} -#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x3fff0000 -#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16 -static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val) -{ - return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK; -} - -#define REG_A7XX_GRAS_UNKNOWN_80F4 0x000080f4 - -#define REG_A7XX_GRAS_UNKNOWN_80F5 0x000080f5 - -#define REG_A7XX_GRAS_UNKNOWN_80F6 0x000080f6 - -#define REG_A7XX_GRAS_UNKNOWN_80F8 0x000080f8 - -#define REG_A7XX_GRAS_UNKNOWN_80F9 0x000080f9 - -#define REG_A7XX_GRAS_UNKNOWN_80FA 0x000080fa - -#define REG_A6XX_GRAS_LRZ_CNTL 0x00008100 -#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001 -#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002 -#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004 -#define A6XX_GRAS_LRZ_CNTL_FC_ENABLE 0x00000008 -#define A6XX_GRAS_LRZ_CNTL_Z_TEST_ENABLE 0x00000010 -#define A6XX_GRAS_LRZ_CNTL_Z_BOUNDS_ENABLE 0x00000020 -#define A6XX_GRAS_LRZ_CNTL_DIR__MASK 0x000000c0 -#define A6XX_GRAS_LRZ_CNTL_DIR__SHIFT 6 -static inline uint32_t A6XX_GRAS_LRZ_CNTL_DIR(enum a6xx_lrz_dir_status val) -{ - return ((val) << A6XX_GRAS_LRZ_CNTL_DIR__SHIFT) & A6XX_GRAS_LRZ_CNTL_DIR__MASK; -} -#define A6XX_GRAS_LRZ_CNTL_DIR_WRITE 0x00000100 -#define A6XX_GRAS_LRZ_CNTL_DISABLE_ON_WRONG_DIR 0x00000200 -#define A6XX_GRAS_LRZ_CNTL_Z_FUNC__MASK 0x00003800 -#define A6XX_GRAS_LRZ_CNTL_Z_FUNC__SHIFT 11 -static inline uint32_t A6XX_GRAS_LRZ_CNTL_Z_FUNC(enum adreno_compare_func val) -{ - return ((val) << A6XX_GRAS_LRZ_CNTL_Z_FUNC__SHIFT) & A6XX_GRAS_LRZ_CNTL_Z_FUNC__MASK; -} - -#define REG_A6XX_GRAS_LRZ_PS_INPUT_CNTL 0x00008101 -#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_SAMPLEID 0x00000001 -#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__MASK 0x00000006 -#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__SHIFT 1 -static inline uint32_t A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE(enum a6xx_fragcoord_sample_mode val) -{ - return ((val) << A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__SHIFT) & A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__MASK; -} - -#define REG_A6XX_GRAS_LRZ_MRT_BUF_INFO_0 0x00008102 -#define A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__MASK 0x000000ff -#define A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__SHIFT 0 -static inline uint32_t A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT(enum a6xx_format val) -{ - return ((val) << A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__SHIFT) & A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__MASK; -} - -#define REG_A6XX_GRAS_LRZ_BUFFER_BASE 0x00008103 - -#define REG_A6XX_GRAS_LRZ_BUFFER_PITCH 0x00008105 -#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK 0x000000ff -#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT 0 -static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK; -} -#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK 0x1ffffc00 -#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT 10 -static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0xf)); - return (((val >> 4)) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK; -} - -#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE 0x00008106 - -#define REG_A6XX_GRAS_SAMPLE_CNTL 0x00008109 -#define A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE 0x00000001 - -#define REG_A6XX_GRAS_LRZ_DEPTH_VIEW 0x0000810a -#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__MASK 0x000007ff -#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__SHIFT 0 -static inline uint32_t A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER(uint32_t val) -{ - return ((val) << A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__SHIFT) & A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__MASK; -} -#define A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__MASK 0x07ff0000 -#define A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__SHIFT 16 -static inline uint32_t A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT(uint32_t val) -{ - return ((val) << A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__SHIFT) & A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__MASK; -} -#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__MASK 0xf0000000 -#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__SHIFT 28 -static inline uint32_t A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL(uint32_t val) -{ - return ((val) << A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__SHIFT) & A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__MASK; -} - -#define REG_A7XX_GRAS_UNKNOWN_810B 0x0000810b - -#define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110 - -#define REG_A7XX_GRAS_LRZ_CLEAR_DEPTH_F32 0x00008111 -#define A7XX_GRAS_LRZ_CLEAR_DEPTH_F32__MASK 0xffffffff -#define A7XX_GRAS_LRZ_CLEAR_DEPTH_F32__SHIFT 0 -static inline uint32_t A7XX_GRAS_LRZ_CLEAR_DEPTH_F32(float val) -{ - return ((fui(val)) << A7XX_GRAS_LRZ_CLEAR_DEPTH_F32__SHIFT) & A7XX_GRAS_LRZ_CLEAR_DEPTH_F32__MASK; -} - -#define REG_A7XX_GRAS_UNKNOWN_8113 0x00008113 - -#define REG_A7XX_GRAS_UNKNOWN_8120 0x00008120 - -#define REG_A7XX_GRAS_UNKNOWN_8121 0x00008121 - -#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400 -#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__MASK 0x00000007 -#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__SHIFT 0 -static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_ROTATE(enum a6xx_rotation val) -{ - return ((val) << A6XX_GRAS_2D_BLIT_CNTL_ROTATE__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_ROTATE__MASK; -} -#define A6XX_GRAS_2D_BLIT_CNTL_OVERWRITEEN 0x00000008 -#define A6XX_GRAS_2D_BLIT_CNTL_UNK4__MASK 0x00000070 -#define A6XX_GRAS_2D_BLIT_CNTL_UNK4__SHIFT 4 -static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK4(uint32_t val) -{ - return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK4__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK4__MASK; -} -#define A6XX_GRAS_2D_BLIT_CNTL_SOLID_COLOR 0x00000080 -#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00 -#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8 -static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_format val) -{ - return ((val) << A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK; -} -#define A6XX_GRAS_2D_BLIT_CNTL_SCISSOR 0x00010000 -#define A6XX_GRAS_2D_BLIT_CNTL_UNK17__MASK 0x00060000 -#define A6XX_GRAS_2D_BLIT_CNTL_UNK17__SHIFT 17 -static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK17(uint32_t val) -{ - return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK17__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK17__MASK; -} -#define A6XX_GRAS_2D_BLIT_CNTL_D24S8 0x00080000 -#define A6XX_GRAS_2D_BLIT_CNTL_MASK__MASK 0x00f00000 -#define A6XX_GRAS_2D_BLIT_CNTL_MASK__SHIFT 20 -static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_MASK(uint32_t val) -{ - return ((val) << A6XX_GRAS_2D_BLIT_CNTL_MASK__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_MASK__MASK; -} -#define A6XX_GRAS_2D_BLIT_CNTL_IFMT__MASK 0x1f000000 -#define A6XX_GRAS_2D_BLIT_CNTL_IFMT__SHIFT 24 -static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_IFMT(enum a6xx_2d_ifmt val) -{ - return ((val) << A6XX_GRAS_2D_BLIT_CNTL_IFMT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_IFMT__MASK; -} -#define A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__MASK 0x20000000 -#define A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__SHIFT 29 -static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE(enum a6xx_raster_mode val) -{ - return ((val) << A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__MASK; -} -#define A6XX_GRAS_2D_BLIT_CNTL_UNK30 0x40000000 - -#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401 -#define A6XX_GRAS_2D_SRC_TL_X__MASK 0x01ffff00 -#define A6XX_GRAS_2D_SRC_TL_X__SHIFT 8 -static inline uint32_t A6XX_GRAS_2D_SRC_TL_X(int32_t val) -{ - return ((val) << A6XX_GRAS_2D_SRC_TL_X__SHIFT) & A6XX_GRAS_2D_SRC_TL_X__MASK; -} - -#define REG_A6XX_GRAS_2D_SRC_BR_X 0x00008402 -#define A6XX_GRAS_2D_SRC_BR_X__MASK 0x01ffff00 -#define A6XX_GRAS_2D_SRC_BR_X__SHIFT 8 -static inline uint32_t A6XX_GRAS_2D_SRC_BR_X(int32_t val) -{ - return ((val) << A6XX_GRAS_2D_SRC_BR_X__SHIFT) & A6XX_GRAS_2D_SRC_BR_X__MASK; -} - -#define REG_A6XX_GRAS_2D_SRC_TL_Y 0x00008403 -#define A6XX_GRAS_2D_SRC_TL_Y__MASK 0x01ffff00 -#define A6XX_GRAS_2D_SRC_TL_Y__SHIFT 8 -static inline uint32_t A6XX_GRAS_2D_SRC_TL_Y(int32_t val) -{ - return ((val) << A6XX_GRAS_2D_SRC_TL_Y__SHIFT) & A6XX_GRAS_2D_SRC_TL_Y__MASK; -} - -#define REG_A6XX_GRAS_2D_SRC_BR_Y 0x00008404 -#define A6XX_GRAS_2D_SRC_BR_Y__MASK 0x01ffff00 -#define A6XX_GRAS_2D_SRC_BR_Y__SHIFT 8 -static inline uint32_t A6XX_GRAS_2D_SRC_BR_Y(int32_t val) -{ - return ((val) << A6XX_GRAS_2D_SRC_BR_Y__SHIFT) & A6XX_GRAS_2D_SRC_BR_Y__MASK; -} - -#define REG_A6XX_GRAS_2D_DST_TL 0x00008405 -#define A6XX_GRAS_2D_DST_TL_X__MASK 0x00003fff -#define A6XX_GRAS_2D_DST_TL_X__SHIFT 0 -static inline uint32_t A6XX_GRAS_2D_DST_TL_X(uint32_t val) -{ - return ((val) << A6XX_GRAS_2D_DST_TL_X__SHIFT) & A6XX_GRAS_2D_DST_TL_X__MASK; -} -#define A6XX_GRAS_2D_DST_TL_Y__MASK 0x3fff0000 -#define A6XX_GRAS_2D_DST_TL_Y__SHIFT 16 -static inline uint32_t A6XX_GRAS_2D_DST_TL_Y(uint32_t val) -{ - return ((val) << A6XX_GRAS_2D_DST_TL_Y__SHIFT) & A6XX_GRAS_2D_DST_TL_Y__MASK; -} - -#define REG_A6XX_GRAS_2D_DST_BR 0x00008406 -#define A6XX_GRAS_2D_DST_BR_X__MASK 0x00003fff -#define A6XX_GRAS_2D_DST_BR_X__SHIFT 0 -static inline uint32_t A6XX_GRAS_2D_DST_BR_X(uint32_t val) -{ - return ((val) << A6XX_GRAS_2D_DST_BR_X__SHIFT) & A6XX_GRAS_2D_DST_BR_X__MASK; -} -#define A6XX_GRAS_2D_DST_BR_Y__MASK 0x3fff0000 -#define A6XX_GRAS_2D_DST_BR_Y__SHIFT 16 -static inline uint32_t A6XX_GRAS_2D_DST_BR_Y(uint32_t val) -{ - return ((val) << A6XX_GRAS_2D_DST_BR_Y__SHIFT) & A6XX_GRAS_2D_DST_BR_Y__MASK; -} - -#define REG_A6XX_GRAS_2D_UNKNOWN_8407 0x00008407 - -#define REG_A6XX_GRAS_2D_UNKNOWN_8408 0x00008408 - -#define REG_A6XX_GRAS_2D_UNKNOWN_8409 0x00008409 - -#define REG_A6XX_GRAS_2D_RESOLVE_CNTL_1 0x0000840a -#define A6XX_GRAS_2D_RESOLVE_CNTL_1_X__MASK 0x00003fff -#define A6XX_GRAS_2D_RESOLVE_CNTL_1_X__SHIFT 0 -static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_1_X(uint32_t val) -{ - return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_1_X__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_1_X__MASK; -} -#define A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__MASK 0x3fff0000 -#define A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__SHIFT 16 -static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_1_Y(uint32_t val) -{ - return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__MASK; -} - -#define REG_A6XX_GRAS_2D_RESOLVE_CNTL_2 0x0000840b -#define A6XX_GRAS_2D_RESOLVE_CNTL_2_X__MASK 0x00003fff -#define A6XX_GRAS_2D_RESOLVE_CNTL_2_X__SHIFT 0 -static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_X(uint32_t val) -{ - return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_2_X__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_2_X__MASK; -} -#define A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__MASK 0x3fff0000 -#define A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__SHIFT 16 -static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_Y(uint32_t val) -{ - return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__MASK; -} - -#define REG_A6XX_GRAS_DBG_ECO_CNTL 0x00008600 -#define A6XX_GRAS_DBG_ECO_CNTL_UNK7 0x00000080 -#define A6XX_GRAS_DBG_ECO_CNTL_LRZCACHELOCKDIS 0x00000800 - -#define REG_A6XX_GRAS_ADDR_MODE_CNTL 0x00008601 - -#define REG_A7XX_GRAS_NC_MODE_CNTL 0x00008602 - -#define REG_A6XX_GRAS_PERFCTR_TSE_SEL(i0) (0x00008610 + 0x1*(i0)) - -#define REG_A6XX_GRAS_PERFCTR_RAS_SEL(i0) (0x00008614 + 0x1*(i0)) - -#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL(i0) (0x00008618 + 0x1*(i0)) - -#define REG_A6XX_RB_BIN_CONTROL 0x00008800 -#define A6XX_RB_BIN_CONTROL_BINW__MASK 0x0000003f -#define A6XX_RB_BIN_CONTROL_BINW__SHIFT 0 -static inline uint32_t A6XX_RB_BIN_CONTROL_BINW(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A6XX_RB_BIN_CONTROL_BINW__SHIFT) & A6XX_RB_BIN_CONTROL_BINW__MASK; -} -#define A6XX_RB_BIN_CONTROL_BINH__MASK 0x00007f00 -#define A6XX_RB_BIN_CONTROL_BINH__SHIFT 8 -static inline uint32_t A6XX_RB_BIN_CONTROL_BINH(uint32_t val) -{ - assert(!(val & 0xf)); - return (((val >> 4)) << A6XX_RB_BIN_CONTROL_BINH__SHIFT) & A6XX_RB_BIN_CONTROL_BINH__MASK; -} -#define A6XX_RB_BIN_CONTROL_RENDER_MODE__MASK 0x001c0000 -#define A6XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT 18 -static inline uint32_t A6XX_RB_BIN_CONTROL_RENDER_MODE(enum a6xx_render_mode val) -{ - return ((val) << A6XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT) & A6XX_RB_BIN_CONTROL_RENDER_MODE__MASK; -} -#define A6XX_RB_BIN_CONTROL_FORCE_LRZ_WRITE_DIS 0x00200000 -#define A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__MASK 0x00c00000 -#define A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__SHIFT 22 -static inline uint32_t A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION(enum a6xx_buffers_location val) -{ - return ((val) << A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__SHIFT) & A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__MASK; -} -#define A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK 0x07000000 -#define A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT 24 -static inline uint32_t A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(uint32_t val) -{ - return ((val) << A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT) & A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK; -} - -#define REG_A7XX_RB_BIN_CONTROL 0x00008800 -#define A7XX_RB_BIN_CONTROL_BINW__MASK 0x0000003f -#define A7XX_RB_BIN_CONTROL_BINW__SHIFT 0 -static inline uint32_t A7XX_RB_BIN_CONTROL_BINW(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A7XX_RB_BIN_CONTROL_BINW__SHIFT) & A7XX_RB_BIN_CONTROL_BINW__MASK; -} -#define A7XX_RB_BIN_CONTROL_BINH__MASK 0x00007f00 -#define A7XX_RB_BIN_CONTROL_BINH__SHIFT 8 -static inline uint32_t A7XX_RB_BIN_CONTROL_BINH(uint32_t val) -{ - assert(!(val & 0xf)); - return (((val >> 4)) << A7XX_RB_BIN_CONTROL_BINH__SHIFT) & A7XX_RB_BIN_CONTROL_BINH__MASK; -} -#define A7XX_RB_BIN_CONTROL_RENDER_MODE__MASK 0x001c0000 -#define A7XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT 18 -static inline uint32_t A7XX_RB_BIN_CONTROL_RENDER_MODE(enum a6xx_render_mode val) -{ - return ((val) << A7XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT) & A7XX_RB_BIN_CONTROL_RENDER_MODE__MASK; -} -#define A7XX_RB_BIN_CONTROL_FORCE_LRZ_WRITE_DIS 0x00200000 -#define A7XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK 0x07000000 -#define A7XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT 24 -static inline uint32_t A7XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(uint32_t val) -{ - return ((val) << A7XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT) & A7XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK; -} - -#define REG_A6XX_RB_RENDER_CNTL 0x00008801 -#define A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__MASK 0x00000038 -#define A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__SHIFT 3 -static inline uint32_t A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE(uint32_t val) -{ - return ((val) << A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__SHIFT) & A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__MASK; -} -#define A6XX_RB_RENDER_CNTL_EARLYVIZOUTEN 0x00000040 -#define A6XX_RB_RENDER_CNTL_BINNING 0x00000080 -#define A6XX_RB_RENDER_CNTL_UNK8__MASK 0x00000700 -#define A6XX_RB_RENDER_CNTL_UNK8__SHIFT 8 -static inline uint32_t A6XX_RB_RENDER_CNTL_UNK8(uint32_t val) -{ - return ((val) << A6XX_RB_RENDER_CNTL_UNK8__SHIFT) & A6XX_RB_RENDER_CNTL_UNK8__MASK; -} -#define A6XX_RB_RENDER_CNTL_RASTER_MODE__MASK 0x00000100 -#define A6XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT 8 -static inline uint32_t A6XX_RB_RENDER_CNTL_RASTER_MODE(enum a6xx_raster_mode val) -{ - return ((val) << A6XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT) & A6XX_RB_RENDER_CNTL_RASTER_MODE__MASK; -} -#define A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK 0x00000600 -#define A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT 9 -static inline uint32_t A6XX_RB_RENDER_CNTL_RASTER_DIRECTION(enum a6xx_raster_direction val) -{ - return ((val) << A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT) & A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK; -} -#define A6XX_RB_RENDER_CNTL_CONSERVATIVERASEN 0x00000800 -#define A6XX_RB_RENDER_CNTL_INNERCONSERVATIVERASEN 0x00001000 -#define A6XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000 -#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000 -#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16 -static inline uint32_t A6XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val) -{ - return ((val) << A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK; -} - -#define REG_A7XX_RB_RENDER_CNTL 0x00008801 -#define A7XX_RB_RENDER_CNTL_EARLYVIZOUTEN 0x00000040 -#define A7XX_RB_RENDER_CNTL_BINNING 0x00000080 -#define A7XX_RB_RENDER_CNTL_RASTER_MODE__MASK 0x00000100 -#define A7XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT 8 -static inline uint32_t A7XX_RB_RENDER_CNTL_RASTER_MODE(enum a6xx_raster_mode val) -{ - return ((val) << A7XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT) & A7XX_RB_RENDER_CNTL_RASTER_MODE__MASK; -} -#define A7XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK 0x00000600 -#define A7XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT 9 -static inline uint32_t A7XX_RB_RENDER_CNTL_RASTER_DIRECTION(enum a6xx_raster_direction val) -{ - return ((val) << A7XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT) & A7XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK; -} -#define A7XX_RB_RENDER_CNTL_CONSERVATIVERASEN 0x00000800 -#define A7XX_RB_RENDER_CNTL_INNERCONSERVATIVERASEN 0x00001000 - -#define REG_A7XX_GRAS_SU_RENDER_CNTL 0x00008116 -#define A7XX_GRAS_SU_RENDER_CNTL_BINNING 0x00000080 - -#define REG_A6XX_RB_RAS_MSAA_CNTL 0x00008802 -#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 -#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 -static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK; -} -#define A6XX_RB_RAS_MSAA_CNTL_UNK2 0x00000004 -#define A6XX_RB_RAS_MSAA_CNTL_UNK3 0x00000008 - -#define REG_A6XX_RB_DEST_MSAA_CNTL 0x00008803 -#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003 -#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0 -static inline uint32_t A6XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK; -} -#define A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 - -#define REG_A6XX_RB_SAMPLE_CONFIG 0x00008804 -#define A6XX_RB_SAMPLE_CONFIG_UNK0 0x00000001 -#define A6XX_RB_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002 - -#define REG_A6XX_RB_SAMPLE_LOCATION_0 0x00008805 -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK; -} -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0 -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK; -} -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00 -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK; -} -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000 -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK; -} -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000 -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK; -} -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000 -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK; -} -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000 -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK; -} -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000 -#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK; -} - -#define REG_A6XX_RB_SAMPLE_LOCATION_1 0x00008806 -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK; -} -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0 -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK; -} -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00 -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK; -} -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000 -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK; -} -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000 -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK; -} -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000 -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK; -} -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000 -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK; -} -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000 -#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28 -static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK; -} - -#define REG_A6XX_RB_RENDER_CONTROL0 0x00008809 -#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL 0x00000001 -#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID 0x00000002 -#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE 0x00000004 -#define A6XX_RB_RENDER_CONTROL0_IJ_LINEAR_PIXEL 0x00000008 -#define A6XX_RB_RENDER_CONTROL0_IJ_LINEAR_CENTROID 0x00000010 -#define A6XX_RB_RENDER_CONTROL0_IJ_LINEAR_SAMPLE 0x00000020 -#define A6XX_RB_RENDER_CONTROL0_COORD_MASK__MASK 0x000003c0 -#define A6XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT 6 -static inline uint32_t A6XX_RB_RENDER_CONTROL0_COORD_MASK(uint32_t val) -{ - return ((val) << A6XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT) & A6XX_RB_RENDER_CONTROL0_COORD_MASK__MASK; -} -#define A6XX_RB_RENDER_CONTROL0_UNK10 0x00000400 - -#define REG_A6XX_RB_RENDER_CONTROL1 0x0000880a -#define A6XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001 -#define A6XX_RB_RENDER_CONTROL1_POSTDEPTHCOVERAGE 0x00000002 -#define A6XX_RB_RENDER_CONTROL1_FACENESS 0x00000004 -#define A6XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000008 -#define A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__MASK 0x00000030 -#define A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__SHIFT 4 -static inline uint32_t A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE(enum a6xx_fragcoord_sample_mode val) -{ - return ((val) << A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__SHIFT) & A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__MASK; -} -#define A6XX_RB_RENDER_CONTROL1_CENTERRHW 0x00000040 -#define A6XX_RB_RENDER_CONTROL1_LINELENGTHEN 0x00000080 -#define A6XX_RB_RENDER_CONTROL1_FOVEATION 0x00000100 - -#define REG_A6XX_RB_FS_OUTPUT_CNTL0 0x0000880b -#define A6XX_RB_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE 0x00000001 -#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z 0x00000002 -#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK 0x00000004 -#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_STENCILREF 0x00000008 - -#define REG_A6XX_RB_FS_OUTPUT_CNTL1 0x0000880c -#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f -#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT 0 -static inline uint32_t A6XX_RB_FS_OUTPUT_CNTL1_MRT(uint32_t val) -{ - return ((val) << A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK; -} - -#define REG_A6XX_RB_RENDER_COMPONENTS 0x0000880d -#define A6XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f -#define A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0 -static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT0(uint32_t val) -{ - return ((val) << A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT0__MASK; -} -#define A6XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0 -#define A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4 -static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT1(uint32_t val) -{ - return ((val) << A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT1__MASK; -} -#define A6XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00 -#define A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8 -static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT2(uint32_t val) -{ - return ((val) << A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT2__MASK; -} -#define A6XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000 -#define A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12 -static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT3(uint32_t val) -{ - return ((val) << A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT3__MASK; -} -#define A6XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000 -#define A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16 -static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT4(uint32_t val) -{ - return ((val) << A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT4__MASK; -} -#define A6XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000 -#define A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20 -static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT5(uint32_t val) -{ - return ((val) << A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT5__MASK; -} -#define A6XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000 -#define A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24 -static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT6(uint32_t val) -{ - return ((val) << A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT6__MASK; -} -#define A6XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000 -#define A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28 -static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT7(uint32_t val) -{ - return ((val) << A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT7__MASK; -} - -#define REG_A6XX_RB_DITHER_CNTL 0x0000880e -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK 0x00000003 -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT 0 -static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0(enum adreno_rb_dither_mode val) -{ - return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK; -} -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK 0x0000000c -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT 2 -static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1(enum adreno_rb_dither_mode val) -{ - return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK; -} -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK 0x00000030 -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT 4 -static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2(enum adreno_rb_dither_mode val) -{ - return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK; -} -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK 0x000000c0 -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT 6 -static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3(enum adreno_rb_dither_mode val) -{ - return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK; -} -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK 0x00000300 -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT 8 -static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4(enum adreno_rb_dither_mode val) -{ - return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK; -} -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK 0x00000c00 -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT 10 -static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5(enum adreno_rb_dither_mode val) -{ - return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK; -} -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK 0x00003000 -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT 12 -static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6(enum adreno_rb_dither_mode val) -{ - return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK; -} -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK 0x0000c000 -#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT 14 -static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dither_mode val) -{ - return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK; -} - -#define REG_A6XX_RB_SRGB_CNTL 0x0000880f -#define A6XX_RB_SRGB_CNTL_SRGB_MRT0 0x00000001 -#define A6XX_RB_SRGB_CNTL_SRGB_MRT1 0x00000002 -#define A6XX_RB_SRGB_CNTL_SRGB_MRT2 0x00000004 -#define A6XX_RB_SRGB_CNTL_SRGB_MRT3 0x00000008 -#define A6XX_RB_SRGB_CNTL_SRGB_MRT4 0x00000010 -#define A6XX_RB_SRGB_CNTL_SRGB_MRT5 0x00000020 -#define A6XX_RB_SRGB_CNTL_SRGB_MRT6 0x00000040 -#define A6XX_RB_SRGB_CNTL_SRGB_MRT7 0x00000080 - -#define REG_A6XX_RB_SAMPLE_CNTL 0x00008810 -#define A6XX_RB_SAMPLE_CNTL_PER_SAMP_MODE 0x00000001 - -#define REG_A6XX_RB_UNKNOWN_8811 0x00008811 - -#define REG_A7XX_RB_UNKNOWN_8812 0x00008812 - -#define REG_A6XX_RB_UNKNOWN_8818 0x00008818 - -#define REG_A6XX_RB_UNKNOWN_8819 0x00008819 - -#define REG_A6XX_RB_UNKNOWN_881A 0x0000881a - -#define REG_A6XX_RB_UNKNOWN_881B 0x0000881b - -#define REG_A6XX_RB_UNKNOWN_881C 0x0000881c - -#define REG_A6XX_RB_UNKNOWN_881D 0x0000881d - -#define REG_A6XX_RB_UNKNOWN_881E 0x0000881e - -#define REG_A6XX_RB_MRT(i0) (0x00008820 + 0x8*(i0)) - -static inline uint32_t REG_A6XX_RB_MRT_CONTROL(uint32_t i0) { return 0x00008820 + 0x8*i0; } -#define A6XX_RB_MRT_CONTROL_BLEND 0x00000001 -#define A6XX_RB_MRT_CONTROL_BLEND2 0x00000002 -#define A6XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004 -#define A6XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078 -#define A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3 -static inline uint32_t A6XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val) -{ - return ((val) << A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A6XX_RB_MRT_CONTROL_ROP_CODE__MASK; -} -#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780 -#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7 -static inline uint32_t A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val) -{ - return ((val) << A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK; -} - -static inline uint32_t REG_A6XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x00008821 + 0x8*i0; } -#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f -#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0 -static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK; -} -#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0 -#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5 -static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) -{ - return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK; -} -#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00 -#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8 -static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK; -} -#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000 -#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16 -static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK; -} -#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000 -#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21 -static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) -{ - return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK; -} -#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000 -#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24 -static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val) -{ - return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK; -} - -static inline uint32_t REG_A6XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x00008822 + 0x8*i0; } -#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff -#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0 -static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_format val) -{ - return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK; -} -#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300 -#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8 -static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a6xx_tile_mode val) -{ - return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK; -} -#define A6XX_RB_MRT_BUF_INFO_UNK10 0x00000400 -#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000 -#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13 -static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; -} - -static inline uint32_t REG_A7XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x00008822 + 0x8*i0; } -#define A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff -#define A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0 -static inline uint32_t A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_format val) -{ - return ((val) << A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A7XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK; -} -#define A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300 -#define A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8 -static inline uint32_t A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a6xx_tile_mode val) -{ - return ((val) << A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A7XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK; -} -#define A7XX_RB_MRT_BUF_INFO_UNK10 0x00000400 -#define A7XX_RB_MRT_BUF_INFO_LOSSLESSCOMPEN 0x00000800 -#define A7XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000 -#define A7XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13 -static inline uint32_t A7XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A7XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A7XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; -} - -static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; } -#define A6XX_RB_MRT_PITCH__MASK 0xffffffff -#define A6XX_RB_MRT_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_MRT_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_MRT_PITCH__SHIFT) & A6XX_RB_MRT_PITCH__MASK; -} - -static inline uint32_t REG_A6XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x00008824 + 0x8*i0; } -#define A6XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff -#define A6XX_RB_MRT_ARRAY_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_MRT_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_MRT_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_ARRAY_PITCH__MASK; -} - -static inline uint32_t REG_A6XX_RB_MRT_BASE(uint32_t i0) { return 0x00008825 + 0x8*i0; } - -static inline uint32_t REG_A6XX_RB_MRT_BASE_GMEM(uint32_t i0) { return 0x00008827 + 0x8*i0; } - -#define REG_A6XX_RB_BLEND_RED_F32 0x00008860 -#define A6XX_RB_BLEND_RED_F32__MASK 0xffffffff -#define A6XX_RB_BLEND_RED_F32__SHIFT 0 -static inline uint32_t A6XX_RB_BLEND_RED_F32(float val) -{ - return ((fui(val)) << A6XX_RB_BLEND_RED_F32__SHIFT) & A6XX_RB_BLEND_RED_F32__MASK; -} - -#define REG_A6XX_RB_BLEND_GREEN_F32 0x00008861 -#define A6XX_RB_BLEND_GREEN_F32__MASK 0xffffffff -#define A6XX_RB_BLEND_GREEN_F32__SHIFT 0 -static inline uint32_t A6XX_RB_BLEND_GREEN_F32(float val) -{ - return ((fui(val)) << A6XX_RB_BLEND_GREEN_F32__SHIFT) & A6XX_RB_BLEND_GREEN_F32__MASK; -} - -#define REG_A6XX_RB_BLEND_BLUE_F32 0x00008862 -#define A6XX_RB_BLEND_BLUE_F32__MASK 0xffffffff -#define A6XX_RB_BLEND_BLUE_F32__SHIFT 0 -static inline uint32_t A6XX_RB_BLEND_BLUE_F32(float val) -{ - return ((fui(val)) << A6XX_RB_BLEND_BLUE_F32__SHIFT) & A6XX_RB_BLEND_BLUE_F32__MASK; -} - -#define REG_A6XX_RB_BLEND_ALPHA_F32 0x00008863 -#define A6XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff -#define A6XX_RB_BLEND_ALPHA_F32__SHIFT 0 -static inline uint32_t A6XX_RB_BLEND_ALPHA_F32(float val) -{ - return ((fui(val)) << A6XX_RB_BLEND_ALPHA_F32__SHIFT) & A6XX_RB_BLEND_ALPHA_F32__MASK; -} - -#define REG_A6XX_RB_ALPHA_CONTROL 0x00008864 -#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff -#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0 -static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val) -{ - return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK; -} -#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100 -#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00 -#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9 -static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val) -{ - return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK; -} - -#define REG_A6XX_RB_BLEND_CNTL 0x00008865 -#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff -#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0 -static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val) -{ - return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK; -} -#define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100 -#define A6XX_RB_BLEND_CNTL_DUAL_COLOR_IN_ENABLE 0x00000200 -#define A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400 -#define A6XX_RB_BLEND_CNTL_ALPHA_TO_ONE 0x00000800 -#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000 -#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16 -static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val) -{ - return ((val) << A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK; -} - -#define REG_A6XX_RB_DEPTH_PLANE_CNTL 0x00008870 -#define A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__MASK 0x00000003 -#define A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__SHIFT 0 -static inline uint32_t A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE(enum a6xx_ztest_mode val) -{ - return ((val) << A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__SHIFT) & A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__MASK; -} - -#define REG_A6XX_RB_DEPTH_CNTL 0x00008871 -#define A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000001 -#define A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002 -#define A6XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c -#define A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2 -static inline uint32_t A6XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val) -{ - return ((val) << A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A6XX_RB_DEPTH_CNTL_ZFUNC__MASK; -} -#define A6XX_RB_DEPTH_CNTL_Z_CLAMP_ENABLE 0x00000020 -#define A6XX_RB_DEPTH_CNTL_Z_READ_ENABLE 0x00000040 -#define A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE 0x00000080 - -#define REG_A6XX_GRAS_SU_DEPTH_CNTL 0x00008114 -#define A6XX_GRAS_SU_DEPTH_CNTL_Z_TEST_ENABLE 0x00000001 - -#define REG_A6XX_RB_DEPTH_BUFFER_INFO 0x00008872 -#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007 -#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0 -static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val) -{ - return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK; -} -#define A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK 0x00000018 -#define A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT 3 -static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_UNK3(uint32_t val) -{ - return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK; -} - -#define REG_A7XX_RB_DEPTH_BUFFER_INFO 0x00008872 -#define A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007 -#define A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0 -static inline uint32_t A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val) -{ - return ((val) << A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A7XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK; -} -#define A7XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK 0x00000018 -#define A7XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT 3 -static inline uint32_t A7XX_RB_DEPTH_BUFFER_INFO_UNK3(uint32_t val) -{ - return ((val) << A7XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A7XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK; -} -#define A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE__MASK 0x00000060 -#define A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE__SHIFT 5 -static inline uint32_t A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE(enum a6xx_tile_mode val) -{ - return ((val) << A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE__SHIFT) & A7XX_RB_DEPTH_BUFFER_INFO_TILEMODE__MASK; -} -#define A7XX_RB_DEPTH_BUFFER_INFO_LOSSLESSCOMPEN 0x00000080 - -#define REG_A6XX_RB_DEPTH_BUFFER_PITCH 0x00008873 -#define A6XX_RB_DEPTH_BUFFER_PITCH__MASK 0x00003fff -#define A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_DEPTH_BUFFER_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_PITCH__MASK; -} - -#define REG_A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x00008874 -#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0x0fffffff -#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK; -} - -#define REG_A6XX_RB_DEPTH_BUFFER_BASE 0x00008875 - -#define REG_A6XX_RB_DEPTH_BUFFER_BASE_GMEM 0x00008877 - -#define REG_A6XX_RB_Z_BOUNDS_MIN 0x00008878 -#define A6XX_RB_Z_BOUNDS_MIN__MASK 0xffffffff -#define A6XX_RB_Z_BOUNDS_MIN__SHIFT 0 -static inline uint32_t A6XX_RB_Z_BOUNDS_MIN(float val) -{ - return ((fui(val)) << A6XX_RB_Z_BOUNDS_MIN__SHIFT) & A6XX_RB_Z_BOUNDS_MIN__MASK; -} - -#define REG_A6XX_RB_Z_BOUNDS_MAX 0x00008879 -#define A6XX_RB_Z_BOUNDS_MAX__MASK 0xffffffff -#define A6XX_RB_Z_BOUNDS_MAX__SHIFT 0 -static inline uint32_t A6XX_RB_Z_BOUNDS_MAX(float val) -{ - return ((fui(val)) << A6XX_RB_Z_BOUNDS_MAX__SHIFT) & A6XX_RB_Z_BOUNDS_MAX__MASK; -} - -#define REG_A6XX_RB_STENCIL_CONTROL 0x00008880 -#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001 -#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002 -#define A6XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004 -#define A6XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700 -#define A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8 -static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val) -{ - return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC__MASK; -} -#define A6XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800 -#define A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11 -static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val) -{ - return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL__MASK; -} -#define A6XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000 -#define A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14 -static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val) -{ - return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS__MASK; -} -#define A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000 -#define A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17 -static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val) -{ - return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK; -} -#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000 -#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20 -static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val) -{ - return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK; -} -#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000 -#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23 -static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val) -{ - return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK; -} -#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000 -#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26 -static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val) -{ - return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK; -} -#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000 -#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29 -static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val) -{ - return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK; -} - -#define REG_A6XX_GRAS_SU_STENCIL_CNTL 0x00008115 -#define A6XX_GRAS_SU_STENCIL_CNTL_STENCIL_ENABLE 0x00000001 - -#define REG_A6XX_RB_STENCIL_INFO 0x00008881 -#define A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001 -#define A6XX_RB_STENCIL_INFO_UNK1 0x00000002 - -#define REG_A7XX_RB_STENCIL_INFO 0x00008881 -#define A7XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001 -#define A7XX_RB_STENCIL_INFO_UNK1 0x00000002 -#define A7XX_RB_STENCIL_INFO_TILEMODE__MASK 0x0000000c -#define A7XX_RB_STENCIL_INFO_TILEMODE__SHIFT 2 -static inline uint32_t A7XX_RB_STENCIL_INFO_TILEMODE(enum a6xx_tile_mode val) -{ - return ((val) << A7XX_RB_STENCIL_INFO_TILEMODE__SHIFT) & A7XX_RB_STENCIL_INFO_TILEMODE__MASK; -} - -#define REG_A6XX_RB_STENCIL_BUFFER_PITCH 0x00008882 -#define A6XX_RB_STENCIL_BUFFER_PITCH__MASK 0x00000fff -#define A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_STENCIL_BUFFER_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_PITCH__MASK; -} - -#define REG_A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH 0x00008883 -#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK 0x00ffffff -#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK; -} - -#define REG_A6XX_RB_STENCIL_BUFFER_BASE 0x00008884 - -#define REG_A6XX_RB_STENCIL_BUFFER_BASE_GMEM 0x00008886 - -#define REG_A6XX_RB_STENCILREF 0x00008887 -#define A6XX_RB_STENCILREF_REF__MASK 0x000000ff -#define A6XX_RB_STENCILREF_REF__SHIFT 0 -static inline uint32_t A6XX_RB_STENCILREF_REF(uint32_t val) -{ - return ((val) << A6XX_RB_STENCILREF_REF__SHIFT) & A6XX_RB_STENCILREF_REF__MASK; -} -#define A6XX_RB_STENCILREF_BFREF__MASK 0x0000ff00 -#define A6XX_RB_STENCILREF_BFREF__SHIFT 8 -static inline uint32_t A6XX_RB_STENCILREF_BFREF(uint32_t val) -{ - return ((val) << A6XX_RB_STENCILREF_BFREF__SHIFT) & A6XX_RB_STENCILREF_BFREF__MASK; -} - -#define REG_A6XX_RB_STENCILMASK 0x00008888 -#define A6XX_RB_STENCILMASK_MASK__MASK 0x000000ff -#define A6XX_RB_STENCILMASK_MASK__SHIFT 0 -static inline uint32_t A6XX_RB_STENCILMASK_MASK(uint32_t val) -{ - return ((val) << A6XX_RB_STENCILMASK_MASK__SHIFT) & A6XX_RB_STENCILMASK_MASK__MASK; -} -#define A6XX_RB_STENCILMASK_BFMASK__MASK 0x0000ff00 -#define A6XX_RB_STENCILMASK_BFMASK__SHIFT 8 -static inline uint32_t A6XX_RB_STENCILMASK_BFMASK(uint32_t val) -{ - return ((val) << A6XX_RB_STENCILMASK_BFMASK__SHIFT) & A6XX_RB_STENCILMASK_BFMASK__MASK; -} - -#define REG_A6XX_RB_STENCILWRMASK 0x00008889 -#define A6XX_RB_STENCILWRMASK_WRMASK__MASK 0x000000ff -#define A6XX_RB_STENCILWRMASK_WRMASK__SHIFT 0 -static inline uint32_t A6XX_RB_STENCILWRMASK_WRMASK(uint32_t val) -{ - return ((val) << A6XX_RB_STENCILWRMASK_WRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_WRMASK__MASK; -} -#define A6XX_RB_STENCILWRMASK_BFWRMASK__MASK 0x0000ff00 -#define A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT 8 -static inline uint32_t A6XX_RB_STENCILWRMASK_BFWRMASK(uint32_t val) -{ - return ((val) << A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_BFWRMASK__MASK; -} - -#define REG_A6XX_RB_WINDOW_OFFSET 0x00008890 -#define A6XX_RB_WINDOW_OFFSET_X__MASK 0x00003fff -#define A6XX_RB_WINDOW_OFFSET_X__SHIFT 0 -static inline uint32_t A6XX_RB_WINDOW_OFFSET_X(uint32_t val) -{ - return ((val) << A6XX_RB_WINDOW_OFFSET_X__SHIFT) & A6XX_RB_WINDOW_OFFSET_X__MASK; -} -#define A6XX_RB_WINDOW_OFFSET_Y__MASK 0x3fff0000 -#define A6XX_RB_WINDOW_OFFSET_Y__SHIFT 16 -static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val) -{ - return ((val) << A6XX_RB_WINDOW_OFFSET_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET_Y__MASK; -} - -#define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891 -#define A6XX_RB_SAMPLE_COUNT_CONTROL_DISABLE 0x00000001 -#define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002 - -#define REG_A6XX_RB_LRZ_CNTL 0x00008898 -#define A6XX_RB_LRZ_CNTL_ENABLE 0x00000001 - -#define REG_A7XX_RB_UNKNOWN_8899 0x00008899 - -#define REG_A6XX_RB_Z_CLAMP_MIN 0x000088c0 -#define A6XX_RB_Z_CLAMP_MIN__MASK 0xffffffff -#define A6XX_RB_Z_CLAMP_MIN__SHIFT 0 -static inline uint32_t A6XX_RB_Z_CLAMP_MIN(float val) -{ - return ((fui(val)) << A6XX_RB_Z_CLAMP_MIN__SHIFT) & A6XX_RB_Z_CLAMP_MIN__MASK; -} - -#define REG_A6XX_RB_Z_CLAMP_MAX 0x000088c1 -#define A6XX_RB_Z_CLAMP_MAX__MASK 0xffffffff -#define A6XX_RB_Z_CLAMP_MAX__SHIFT 0 -static inline uint32_t A6XX_RB_Z_CLAMP_MAX(float val) -{ - return ((fui(val)) << A6XX_RB_Z_CLAMP_MAX__SHIFT) & A6XX_RB_Z_CLAMP_MAX__MASK; -} - -#define REG_A6XX_RB_UNKNOWN_88D0 0x000088d0 -#define A6XX_RB_UNKNOWN_88D0_UNK0__MASK 0x00001fff -#define A6XX_RB_UNKNOWN_88D0_UNK0__SHIFT 0 -static inline uint32_t A6XX_RB_UNKNOWN_88D0_UNK0(uint32_t val) -{ - return ((val) << A6XX_RB_UNKNOWN_88D0_UNK0__SHIFT) & A6XX_RB_UNKNOWN_88D0_UNK0__MASK; -} -#define A6XX_RB_UNKNOWN_88D0_UNK16__MASK 0x07ff0000 -#define A6XX_RB_UNKNOWN_88D0_UNK16__SHIFT 16 -static inline uint32_t A6XX_RB_UNKNOWN_88D0_UNK16(uint32_t val) -{ - return ((val) << A6XX_RB_UNKNOWN_88D0_UNK16__SHIFT) & A6XX_RB_UNKNOWN_88D0_UNK16__MASK; -} - -#define REG_A6XX_RB_BLIT_SCISSOR_TL 0x000088d1 -#define A6XX_RB_BLIT_SCISSOR_TL_X__MASK 0x00003fff -#define A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT 0 -static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_X(uint32_t val) -{ - return ((val) << A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_X__MASK; -} -#define A6XX_RB_BLIT_SCISSOR_TL_Y__MASK 0x3fff0000 -#define A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT 16 -static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_Y(uint32_t val) -{ - return ((val) << A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_Y__MASK; -} - -#define REG_A6XX_RB_BLIT_SCISSOR_BR 0x000088d2 -#define A6XX_RB_BLIT_SCISSOR_BR_X__MASK 0x00003fff -#define A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT 0 -static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_X(uint32_t val) -{ - return ((val) << A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_X__MASK; -} -#define A6XX_RB_BLIT_SCISSOR_BR_Y__MASK 0x3fff0000 -#define A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT 16 -static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val) -{ - return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK; -} - -#define REG_A6XX_RB_BIN_CONTROL2 0x000088d3 -#define A6XX_RB_BIN_CONTROL2_BINW__MASK 0x0000003f -#define A6XX_RB_BIN_CONTROL2_BINW__SHIFT 0 -static inline uint32_t A6XX_RB_BIN_CONTROL2_BINW(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A6XX_RB_BIN_CONTROL2_BINW__SHIFT) & A6XX_RB_BIN_CONTROL2_BINW__MASK; -} -#define A6XX_RB_BIN_CONTROL2_BINH__MASK 0x00007f00 -#define A6XX_RB_BIN_CONTROL2_BINH__SHIFT 8 -static inline uint32_t A6XX_RB_BIN_CONTROL2_BINH(uint32_t val) -{ - assert(!(val & 0xf)); - return (((val >> 4)) << A6XX_RB_BIN_CONTROL2_BINH__SHIFT) & A6XX_RB_BIN_CONTROL2_BINH__MASK; -} - -#define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4 -#define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00003fff -#define A6XX_RB_WINDOW_OFFSET2_X__SHIFT 0 -static inline uint32_t A6XX_RB_WINDOW_OFFSET2_X(uint32_t val) -{ - return ((val) << A6XX_RB_WINDOW_OFFSET2_X__SHIFT) & A6XX_RB_WINDOW_OFFSET2_X__MASK; -} -#define A6XX_RB_WINDOW_OFFSET2_Y__MASK 0x3fff0000 -#define A6XX_RB_WINDOW_OFFSET2_Y__SHIFT 16 -static inline uint32_t A6XX_RB_WINDOW_OFFSET2_Y(uint32_t val) -{ - return ((val) << A6XX_RB_WINDOW_OFFSET2_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET2_Y__MASK; -} - -#define REG_A6XX_RB_BLIT_GMEM_MSAA_CNTL 0x000088d5 -#define A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__MASK 0x00000018 -#define A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__SHIFT 3 -static inline uint32_t A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__MASK; -} - -#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6 - -#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7 -#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK 0x00000003 -#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT 0 -static inline uint32_t A6XX_RB_BLIT_DST_INFO_TILE_MODE(enum a6xx_tile_mode val) -{ - return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK; -} -#define A6XX_RB_BLIT_DST_INFO_FLAGS 0x00000004 -#define A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK 0x00000018 -#define A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT 3 -static inline uint32_t A6XX_RB_BLIT_DST_INFO_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK; -} -#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK 0x00000060 -#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT 5 -static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK; -} -#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK 0x00007f80 -#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT 7 -static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_format val) -{ - return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK; -} -#define A6XX_RB_BLIT_DST_INFO_UNK15 0x00008000 - -#define REG_A6XX_RB_BLIT_DST 0x000088d8 - -#define REG_A6XX_RB_BLIT_DST_PITCH 0x000088da -#define A6XX_RB_BLIT_DST_PITCH__MASK 0x0000ffff -#define A6XX_RB_BLIT_DST_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_BLIT_DST_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_BLIT_DST_PITCH__SHIFT) & A6XX_RB_BLIT_DST_PITCH__MASK; -} - -#define REG_A6XX_RB_BLIT_DST_ARRAY_PITCH 0x000088db -#define A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0x1fffffff -#define A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK; -} - -#define REG_A6XX_RB_BLIT_FLAG_DST 0x000088dc - -#define REG_A6XX_RB_BLIT_FLAG_DST_PITCH 0x000088de -#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK 0x000007ff -#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK; -} -#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK 0x0ffff800 -#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT 11 -static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x7f)); - return (((val >> 7)) << A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK; -} - -#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0 0x000088df - -#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW1 0x000088e0 - -#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW2 0x000088e1 - -#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW3 0x000088e2 - -#define REG_A6XX_RB_BLIT_INFO 0x000088e3 -#define A6XX_RB_BLIT_INFO_UNK0 0x00000001 -#define A6XX_RB_BLIT_INFO_GMEM 0x00000002 -#define A6XX_RB_BLIT_INFO_SAMPLE_0 0x00000004 -#define A6XX_RB_BLIT_INFO_DEPTH 0x00000008 -#define A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK 0x000000f0 -#define A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT 4 -static inline uint32_t A6XX_RB_BLIT_INFO_CLEAR_MASK(uint32_t val) -{ - return ((val) << A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT) & A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK; -} -#define A6XX_RB_BLIT_INFO_LAST__MASK 0x00000300 -#define A6XX_RB_BLIT_INFO_LAST__SHIFT 8 -static inline uint32_t A6XX_RB_BLIT_INFO_LAST(uint32_t val) -{ - return ((val) << A6XX_RB_BLIT_INFO_LAST__SHIFT) & A6XX_RB_BLIT_INFO_LAST__MASK; -} -#define A6XX_RB_BLIT_INFO_BUFFER_ID__MASK 0x0000f000 -#define A6XX_RB_BLIT_INFO_BUFFER_ID__SHIFT 12 -static inline uint32_t A6XX_RB_BLIT_INFO_BUFFER_ID(uint32_t val) -{ - return ((val) << A6XX_RB_BLIT_INFO_BUFFER_ID__SHIFT) & A6XX_RB_BLIT_INFO_BUFFER_ID__MASK; -} - -#define REG_A7XX_RB_UNKNOWN_88E4 0x000088e4 -#define A7XX_RB_UNKNOWN_88E4_UNK0 0x00000001 - -#define REG_A7XX_RB_CCU_CNTL2 0x000088e5 -#define A7XX_RB_CCU_CNTL2_DEPTH_OFFSET_HI__MASK 0x00000001 -#define A7XX_RB_CCU_CNTL2_DEPTH_OFFSET_HI__SHIFT 0 -static inline uint32_t A7XX_RB_CCU_CNTL2_DEPTH_OFFSET_HI(uint32_t val) -{ - return ((val) << A7XX_RB_CCU_CNTL2_DEPTH_OFFSET_HI__SHIFT) & A7XX_RB_CCU_CNTL2_DEPTH_OFFSET_HI__MASK; -} -#define A7XX_RB_CCU_CNTL2_COLOR_OFFSET_HI__MASK 0x00000004 -#define A7XX_RB_CCU_CNTL2_COLOR_OFFSET_HI__SHIFT 2 -static inline uint32_t A7XX_RB_CCU_CNTL2_COLOR_OFFSET_HI(uint32_t val) -{ - return ((val) << A7XX_RB_CCU_CNTL2_COLOR_OFFSET_HI__SHIFT) & A7XX_RB_CCU_CNTL2_COLOR_OFFSET_HI__MASK; -} -#define A7XX_RB_CCU_CNTL2_DEPTH_CACHE_SIZE__MASK 0x00000c00 -#define A7XX_RB_CCU_CNTL2_DEPTH_CACHE_SIZE__SHIFT 10 -static inline uint32_t A7XX_RB_CCU_CNTL2_DEPTH_CACHE_SIZE(enum a6xx_ccu_cache_size val) -{ - return ((val) << A7XX_RB_CCU_CNTL2_DEPTH_CACHE_SIZE__SHIFT) & A7XX_RB_CCU_CNTL2_DEPTH_CACHE_SIZE__MASK; -} -#define A7XX_RB_CCU_CNTL2_DEPTH_OFFSET__MASK 0x001ff000 -#define A7XX_RB_CCU_CNTL2_DEPTH_OFFSET__SHIFT 12 -static inline uint32_t A7XX_RB_CCU_CNTL2_DEPTH_OFFSET(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A7XX_RB_CCU_CNTL2_DEPTH_OFFSET__SHIFT) & A7XX_RB_CCU_CNTL2_DEPTH_OFFSET__MASK; -} -#define A7XX_RB_CCU_CNTL2_COLOR_CACHE_SIZE__MASK 0x00600000 -#define A7XX_RB_CCU_CNTL2_COLOR_CACHE_SIZE__SHIFT 21 -static inline uint32_t A7XX_RB_CCU_CNTL2_COLOR_CACHE_SIZE(enum a6xx_ccu_cache_size val) -{ - return ((val) << A7XX_RB_CCU_CNTL2_COLOR_CACHE_SIZE__SHIFT) & A7XX_RB_CCU_CNTL2_COLOR_CACHE_SIZE__MASK; -} -#define A7XX_RB_CCU_CNTL2_COLOR_OFFSET__MASK 0xff800000 -#define A7XX_RB_CCU_CNTL2_COLOR_OFFSET__SHIFT 23 -static inline uint32_t A7XX_RB_CCU_CNTL2_COLOR_OFFSET(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A7XX_RB_CCU_CNTL2_COLOR_OFFSET__SHIFT) & A7XX_RB_CCU_CNTL2_COLOR_OFFSET__MASK; -} - -#define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0 - -#define REG_A6XX_RB_UNK_FLAG_BUFFER_BASE 0x000088f1 - -#define REG_A6XX_RB_UNK_FLAG_BUFFER_PITCH 0x000088f3 -#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff -#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK; -} -#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x00fff800 -#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11 -static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x7f)); - return (((val >> 7)) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK; -} - -#define REG_A6XX_RB_UNKNOWN_88F4 0x000088f4 - -#define REG_A7XX_RB_UNKNOWN_88F5 0x000088f5 - -#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE 0x00008900 - -#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x00008902 -#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK 0x0000007f -#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK; -} -#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK 0x00000700 -#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT 8 -static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8(uint32_t val) -{ - return ((val) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK; -} -#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x0ffff800 -#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11 -static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x7f)); - return (((val >> 7)) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK; -} - -#define REG_A6XX_RB_MRT_FLAG_BUFFER(i0) (0x00008903 + 0x3*(i0)) - -static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR(uint32_t i0) { return 0x00008903 + 0x3*i0; } - -static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x00008905 + 0x3*i0; } -#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff -#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK; -} -#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x1ffff800 -#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11 -static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0x7f)); - return (((val >> 7)) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK; -} - -#define REG_A6XX_RB_SAMPLE_COUNT_ADDR 0x00008927 - -#define REG_A6XX_RB_UNKNOWN_8A00 0x00008a00 - -#define REG_A6XX_RB_UNKNOWN_8A10 0x00008a10 - -#define REG_A6XX_RB_UNKNOWN_8A20 0x00008a20 - -#define REG_A6XX_RB_UNKNOWN_8A30 0x00008a30 - -#define REG_A6XX_RB_2D_BLIT_CNTL 0x00008c00 -#define A6XX_RB_2D_BLIT_CNTL_ROTATE__MASK 0x00000007 -#define A6XX_RB_2D_BLIT_CNTL_ROTATE__SHIFT 0 -static inline uint32_t A6XX_RB_2D_BLIT_CNTL_ROTATE(enum a6xx_rotation val) -{ - return ((val) << A6XX_RB_2D_BLIT_CNTL_ROTATE__SHIFT) & A6XX_RB_2D_BLIT_CNTL_ROTATE__MASK; -} -#define A6XX_RB_2D_BLIT_CNTL_OVERWRITEEN 0x00000008 -#define A6XX_RB_2D_BLIT_CNTL_UNK4__MASK 0x00000070 -#define A6XX_RB_2D_BLIT_CNTL_UNK4__SHIFT 4 -static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK4(uint32_t val) -{ - return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK4__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK4__MASK; -} -#define A6XX_RB_2D_BLIT_CNTL_SOLID_COLOR 0x00000080 -#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00 -#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8 -static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_format val) -{ - return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK; -} -#define A6XX_RB_2D_BLIT_CNTL_SCISSOR 0x00010000 -#define A6XX_RB_2D_BLIT_CNTL_UNK17__MASK 0x00060000 -#define A6XX_RB_2D_BLIT_CNTL_UNK17__SHIFT 17 -static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK17(uint32_t val) -{ - return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK17__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK17__MASK; -} -#define A6XX_RB_2D_BLIT_CNTL_D24S8 0x00080000 -#define A6XX_RB_2D_BLIT_CNTL_MASK__MASK 0x00f00000 -#define A6XX_RB_2D_BLIT_CNTL_MASK__SHIFT 20 -static inline uint32_t A6XX_RB_2D_BLIT_CNTL_MASK(uint32_t val) -{ - return ((val) << A6XX_RB_2D_BLIT_CNTL_MASK__SHIFT) & A6XX_RB_2D_BLIT_CNTL_MASK__MASK; -} -#define A6XX_RB_2D_BLIT_CNTL_IFMT__MASK 0x1f000000 -#define A6XX_RB_2D_BLIT_CNTL_IFMT__SHIFT 24 -static inline uint32_t A6XX_RB_2D_BLIT_CNTL_IFMT(enum a6xx_2d_ifmt val) -{ - return ((val) << A6XX_RB_2D_BLIT_CNTL_IFMT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_IFMT__MASK; -} -#define A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__MASK 0x20000000 -#define A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__SHIFT 29 -static inline uint32_t A6XX_RB_2D_BLIT_CNTL_RASTER_MODE(enum a6xx_raster_mode val) -{ - return ((val) << A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__SHIFT) & A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__MASK; -} -#define A6XX_RB_2D_BLIT_CNTL_UNK30 0x40000000 - -#define REG_A6XX_RB_2D_UNKNOWN_8C01 0x00008c01 - -#define REG_A6XX_RB_2D_DST_INFO 0x00008c17 -#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff -#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0 -static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a6xx_format val) -{ - return ((val) << A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK; -} -#define A6XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300 -#define A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8 -static inline uint32_t A6XX_RB_2D_DST_INFO_TILE_MODE(enum a6xx_tile_mode val) -{ - return ((val) << A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_2D_DST_INFO_TILE_MODE__MASK; -} -#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00 -#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10 -static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK; -} -#define A6XX_RB_2D_DST_INFO_FLAGS 0x00001000 -#define A6XX_RB_2D_DST_INFO_SRGB 0x00002000 -#define A6XX_RB_2D_DST_INFO_SAMPLES__MASK 0x0000c000 -#define A6XX_RB_2D_DST_INFO_SAMPLES__SHIFT 14 -static inline uint32_t A6XX_RB_2D_DST_INFO_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A6XX_RB_2D_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_2D_DST_INFO_SAMPLES__MASK; -} -#define A6XX_RB_2D_DST_INFO_FILTER 0x00010000 -#define A6XX_RB_2D_DST_INFO_UNK17 0x00020000 -#define A6XX_RB_2D_DST_INFO_SAMPLES_AVERAGE 0x00040000 -#define A6XX_RB_2D_DST_INFO_UNK19 0x00080000 -#define A6XX_RB_2D_DST_INFO_UNK20 0x00100000 -#define A6XX_RB_2D_DST_INFO_UNK21 0x00200000 -#define A6XX_RB_2D_DST_INFO_UNK22 0x00400000 -#define A6XX_RB_2D_DST_INFO_UNK23__MASK 0x07800000 -#define A6XX_RB_2D_DST_INFO_UNK23__SHIFT 23 -static inline uint32_t A6XX_RB_2D_DST_INFO_UNK23(uint32_t val) -{ - return ((val) << A6XX_RB_2D_DST_INFO_UNK23__SHIFT) & A6XX_RB_2D_DST_INFO_UNK23__MASK; -} -#define A6XX_RB_2D_DST_INFO_UNK28 0x10000000 - -#define REG_A6XX_RB_2D_DST 0x00008c18 - -#define REG_A6XX_RB_2D_DST_PITCH 0x00008c1a -#define A6XX_RB_2D_DST_PITCH__MASK 0x0000ffff -#define A6XX_RB_2D_DST_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_2D_DST_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_2D_DST_PITCH__SHIFT) & A6XX_RB_2D_DST_PITCH__MASK; -} - -#define REG_A6XX_RB_2D_DST_PLANE1 0x00008c1b - -#define REG_A6XX_RB_2D_DST_PLANE_PITCH 0x00008c1d -#define A6XX_RB_2D_DST_PLANE_PITCH__MASK 0x0000ffff -#define A6XX_RB_2D_DST_PLANE_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_2D_DST_PLANE_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_2D_DST_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_PLANE_PITCH__MASK; -} - -#define REG_A6XX_RB_2D_DST_PLANE2 0x00008c1e - -#define REG_A6XX_RB_2D_DST_FLAGS 0x00008c20 - -#define REG_A6XX_RB_2D_DST_FLAGS_PITCH 0x00008c22 -#define A6XX_RB_2D_DST_FLAGS_PITCH__MASK 0x000000ff -#define A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_2D_DST_FLAGS_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PITCH__MASK; -} - -#define REG_A6XX_RB_2D_DST_FLAGS_PLANE 0x00008c23 - -#define REG_A6XX_RB_2D_DST_FLAGS_PLANE_PITCH 0x00008c25 -#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK 0x000000ff -#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT 0 -static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK; -} - -#define REG_A6XX_RB_2D_SRC_SOLID_C0 0x00008c2c - -#define REG_A6XX_RB_2D_SRC_SOLID_C1 0x00008c2d - -#define REG_A6XX_RB_2D_SRC_SOLID_C2 0x00008c2e - -#define REG_A6XX_RB_2D_SRC_SOLID_C3 0x00008c2f - -#define REG_A6XX_RB_UNKNOWN_8E01 0x00008e01 - -#define REG_A6XX_RB_DBG_ECO_CNTL 0x00008e04 - -#define REG_A6XX_RB_ADDR_MODE_CNTL 0x00008e05 - -#define REG_A7XX_RB_UNKNOWN_8E06 0x00008e06 - -#define REG_A6XX_RB_CCU_CNTL 0x00008e07 -#define A6XX_RB_CCU_CNTL_GMEM_FAST_CLEAR_DISABLE 0x00000001 -#define A6XX_RB_CCU_CNTL_CONCURRENT_RESOLVE 0x00000004 -#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__MASK 0x00000080 -#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__SHIFT 7 -static inline uint32_t A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI(uint32_t val) -{ - return ((val) << A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__SHIFT) & A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__MASK; -} -#define A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__MASK 0x00000200 -#define A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__SHIFT 9 -static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI(uint32_t val) -{ - return ((val) << A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__MASK; -} -#define A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE__MASK 0x00000c00 -#define A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE__SHIFT 10 -static inline uint32_t A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE(enum a6xx_ccu_cache_size val) -{ - return ((val) << A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE__SHIFT) & A6XX_RB_CCU_CNTL_DEPTH_CACHE_SIZE__MASK; -} -#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK 0x001ff000 -#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT 12 -static inline uint32_t A6XX_RB_CCU_CNTL_DEPTH_OFFSET(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK; -} -#define A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE__MASK 0x00600000 -#define A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE__SHIFT 21 -static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE(enum a6xx_ccu_cache_size val) -{ - return ((val) << A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_CACHE_SIZE__MASK; -} -#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK 0xff800000 -#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT 23 -static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_OFFSET(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK; -} - -#define REG_A7XX_RB_CCU_CNTL 0x00008e07 -#define A7XX_RB_CCU_CNTL_GMEM_FAST_CLEAR_DISABLE 0x00000001 -#define A7XX_RB_CCU_CNTL_CONCURRENT_RESOLVE 0x00000004 - -#define REG_A6XX_RB_NC_MODE_CNTL 0x00008e08 -#define A6XX_RB_NC_MODE_CNTL_MODE 0x00000001 -#define A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK 0x00000006 -#define A6XX_RB_NC_MODE_CNTL_LOWER_BIT__SHIFT 1 -static inline uint32_t A6XX_RB_NC_MODE_CNTL_LOWER_BIT(uint32_t val) -{ - return ((val) << A6XX_RB_NC_MODE_CNTL_LOWER_BIT__SHIFT) & A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK; -} -#define A6XX_RB_NC_MODE_CNTL_MIN_ACCESS_LENGTH 0x00000008 -#define A6XX_RB_NC_MODE_CNTL_AMSBC 0x00000010 -#define A6XX_RB_NC_MODE_CNTL_UPPER_BIT__MASK 0x00000400 -#define A6XX_RB_NC_MODE_CNTL_UPPER_BIT__SHIFT 10 -static inline uint32_t A6XX_RB_NC_MODE_CNTL_UPPER_BIT(uint32_t val) -{ - return ((val) << A6XX_RB_NC_MODE_CNTL_UPPER_BIT__SHIFT) & A6XX_RB_NC_MODE_CNTL_UPPER_BIT__MASK; -} -#define A6XX_RB_NC_MODE_CNTL_RGB565_PREDICATOR 0x00000800 -#define A6XX_RB_NC_MODE_CNTL_UNK12__MASK 0x00003000 -#define A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT 12 -static inline uint32_t A6XX_RB_NC_MODE_CNTL_UNK12(uint32_t val) -{ - return ((val) << A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT) & A6XX_RB_NC_MODE_CNTL_UNK12__MASK; -} - -#define REG_A7XX_RB_UNKNOWN_8E09 0x00008e09 - -#define REG_A6XX_RB_PERFCTR_RB_SEL(i0) (0x00008e10 + 0x1*(i0)) - -#define REG_A6XX_RB_PERFCTR_CCU_SEL(i0) (0x00008e18 + 0x1*(i0)) - -#define REG_A6XX_RB_UNKNOWN_8E28 0x00008e28 - -#define REG_A6XX_RB_PERFCTR_CMP_SEL(i0) (0x00008e2c + 0x1*(i0)) - -#define REG_A7XX_RB_PERFCTR_UFC_SEL(i0) (0x00008e30 + 0x1*(i0)) - -#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST 0x00008e3b - -#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD 0x00008e3d - -#define REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE 0x00008e50 - -#define REG_A6XX_RB_UNKNOWN_8E51 0x00008e51 - -#define REG_A7XX_RB_UNKNOWN_8E79 0x00008e79 - -#define REG_A6XX_VPC_GS_PARAM 0x00009100 -#define A6XX_VPC_GS_PARAM_LINELENGTHLOC__MASK 0x000000ff -#define A6XX_VPC_GS_PARAM_LINELENGTHLOC__SHIFT 0 -static inline uint32_t A6XX_VPC_GS_PARAM_LINELENGTHLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_PARAM_LINELENGTHLOC__SHIFT) & A6XX_VPC_GS_PARAM_LINELENGTHLOC__MASK; -} - -#define REG_A6XX_VPC_VS_CLIP_CNTL 0x00009101 -#define A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff -#define A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__SHIFT 0 -static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__MASK; -} -#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00 -#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8 -static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK; -} -#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000 -#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16 -static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK; -} - -#define REG_A6XX_VPC_GS_CLIP_CNTL 0x00009102 -#define A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff -#define A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__SHIFT 0 -static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__MASK; -} -#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00 -#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8 -static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK; -} -#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000 -#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16 -static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK; -} - -#define REG_A6XX_VPC_DS_CLIP_CNTL 0x00009103 -#define A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff -#define A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__SHIFT 0 -static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__MASK; -} -#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00 -#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8 -static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK; -} -#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000 -#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16 -static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK; -} - -#define REG_A6XX_VPC_VS_CLIP_CNTL_V2 0x00009311 -#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_MASK__MASK 0x000000ff -#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_MASK__SHIFT 0 -static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_MASK(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_MASK__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_MASK__MASK; -} -#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK 0x0000ff00 -#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT 8 -static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_03_LOC(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK; -} -#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK 0x00ff0000 -#define A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT 16 -static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_47_LOC(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK; -} - -#define REG_A6XX_VPC_GS_CLIP_CNTL_V2 0x00009312 -#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_MASK__MASK 0x000000ff -#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_MASK__SHIFT 0 -static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_MASK(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_MASK__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_MASK__MASK; -} -#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK 0x0000ff00 -#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT 8 -static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_03_LOC(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK; -} -#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK 0x00ff0000 -#define A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT 16 -static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_47_LOC(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK; -} - -#define REG_A6XX_VPC_DS_CLIP_CNTL_V2 0x00009313 -#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_MASK__MASK 0x000000ff -#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_MASK__SHIFT 0 -static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_MASK(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_MASK__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_MASK__MASK; -} -#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK 0x0000ff00 -#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT 8 -static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_03_LOC(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_03_LOC__MASK; -} -#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK 0x00ff0000 -#define A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT 16 -static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_47_LOC(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_V2_CLIP_DIST_47_LOC__MASK; -} - -#define REG_A6XX_VPC_VS_LAYER_CNTL 0x00009104 -#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff -#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__SHIFT 0 -static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_LAYERLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__MASK; -} -#define A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00 -#define A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__SHIFT 8 -static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_VIEWLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__MASK; -} -#define A6XX_VPC_VS_LAYER_CNTL_SHADINGRATELOC__MASK 0x00ff0000 -#define A6XX_VPC_VS_LAYER_CNTL_SHADINGRATELOC__SHIFT 16 -static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_SHADINGRATELOC(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_LAYER_CNTL_SHADINGRATELOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_SHADINGRATELOC__MASK; -} - -#define REG_A6XX_VPC_GS_LAYER_CNTL 0x00009105 -#define A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff -#define A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__SHIFT 0 -static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_LAYERLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__MASK; -} -#define A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00 -#define A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__SHIFT 8 -static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_VIEWLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__MASK; -} -#define A6XX_VPC_GS_LAYER_CNTL_SHADINGRATELOC__MASK 0x00ff0000 -#define A6XX_VPC_GS_LAYER_CNTL_SHADINGRATELOC__SHIFT 16 -static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_SHADINGRATELOC(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_LAYER_CNTL_SHADINGRATELOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_SHADINGRATELOC__MASK; -} - -#define REG_A6XX_VPC_DS_LAYER_CNTL 0x00009106 -#define A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff -#define A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__SHIFT 0 -static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_LAYERLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__MASK; -} -#define A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00 -#define A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__SHIFT 8 -static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_VIEWLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__MASK; -} -#define A6XX_VPC_DS_LAYER_CNTL_SHADINGRATELOC__MASK 0x00ff0000 -#define A6XX_VPC_DS_LAYER_CNTL_SHADINGRATELOC__SHIFT 16 -static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_SHADINGRATELOC(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_LAYER_CNTL_SHADINGRATELOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_SHADINGRATELOC__MASK; -} - -#define REG_A6XX_VPC_VS_LAYER_CNTL_V2 0x00009314 -#define A6XX_VPC_VS_LAYER_CNTL_V2_LAYERLOC__MASK 0x000000ff -#define A6XX_VPC_VS_LAYER_CNTL_V2_LAYERLOC__SHIFT 0 -static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_V2_LAYERLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_LAYER_CNTL_V2_LAYERLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_V2_LAYERLOC__MASK; -} -#define A6XX_VPC_VS_LAYER_CNTL_V2_VIEWLOC__MASK 0x0000ff00 -#define A6XX_VPC_VS_LAYER_CNTL_V2_VIEWLOC__SHIFT 8 -static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_V2_VIEWLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_LAYER_CNTL_V2_VIEWLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_V2_VIEWLOC__MASK; -} -#define A6XX_VPC_VS_LAYER_CNTL_V2_SHADINGRATELOC__MASK 0x00ff0000 -#define A6XX_VPC_VS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT 16 -static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_V2_SHADINGRATELOC(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_V2_SHADINGRATELOC__MASK; -} - -#define REG_A6XX_VPC_GS_LAYER_CNTL_V2 0x00009315 -#define A6XX_VPC_GS_LAYER_CNTL_V2_LAYERLOC__MASK 0x000000ff -#define A6XX_VPC_GS_LAYER_CNTL_V2_LAYERLOC__SHIFT 0 -static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_V2_LAYERLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_LAYER_CNTL_V2_LAYERLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_V2_LAYERLOC__MASK; -} -#define A6XX_VPC_GS_LAYER_CNTL_V2_VIEWLOC__MASK 0x0000ff00 -#define A6XX_VPC_GS_LAYER_CNTL_V2_VIEWLOC__SHIFT 8 -static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_V2_VIEWLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_LAYER_CNTL_V2_VIEWLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_V2_VIEWLOC__MASK; -} -#define A6XX_VPC_GS_LAYER_CNTL_V2_SHADINGRATELOC__MASK 0x00ff0000 -#define A6XX_VPC_GS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT 16 -static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_V2_SHADINGRATELOC(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_V2_SHADINGRATELOC__MASK; -} - -#define REG_A6XX_VPC_DS_LAYER_CNTL_V2 0x00009316 -#define A6XX_VPC_DS_LAYER_CNTL_V2_LAYERLOC__MASK 0x000000ff -#define A6XX_VPC_DS_LAYER_CNTL_V2_LAYERLOC__SHIFT 0 -static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_V2_LAYERLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_LAYER_CNTL_V2_LAYERLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_V2_LAYERLOC__MASK; -} -#define A6XX_VPC_DS_LAYER_CNTL_V2_VIEWLOC__MASK 0x0000ff00 -#define A6XX_VPC_DS_LAYER_CNTL_V2_VIEWLOC__SHIFT 8 -static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_V2_VIEWLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_LAYER_CNTL_V2_VIEWLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_V2_VIEWLOC__MASK; -} -#define A6XX_VPC_DS_LAYER_CNTL_V2_SHADINGRATELOC__MASK 0x00ff0000 -#define A6XX_VPC_DS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT 16 -static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_V2_SHADINGRATELOC(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_LAYER_CNTL_V2_SHADINGRATELOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_V2_SHADINGRATELOC__MASK; -} - -#define REG_A6XX_VPC_UNKNOWN_9107 0x00009107 -#define A6XX_VPC_UNKNOWN_9107_RASTER_DISCARD 0x00000001 -#define A6XX_VPC_UNKNOWN_9107_UNK2 0x00000004 - -#define REG_A6XX_VPC_POLYGON_MODE 0x00009108 -#define A6XX_VPC_POLYGON_MODE_MODE__MASK 0x00000003 -#define A6XX_VPC_POLYGON_MODE_MODE__SHIFT 0 -static inline uint32_t A6XX_VPC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val) -{ - return ((val) << A6XX_VPC_POLYGON_MODE_MODE__SHIFT) & A6XX_VPC_POLYGON_MODE_MODE__MASK; -} - -#define REG_A7XX_VPC_PRIMITIVE_CNTL_0 0x00009109 -#define A7XX_VPC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001 -#define A7XX_VPC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002 -#define A7XX_VPC_PRIMITIVE_CNTL_0_D3D_VERTEX_ORDERING 0x00000004 -#define A7XX_VPC_PRIMITIVE_CNTL_0_UNK3 0x00000008 - -#define REG_A7XX_VPC_PRIMITIVE_CNTL_5 0x0000910a -#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK 0x000000ff -#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT 0 -static inline uint32_t A7XX_VPC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(uint32_t val) -{ - return ((val) << A7XX_VPC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT) & A7XX_VPC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK; -} -#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK 0x00007c00 -#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT 10 -static inline uint32_t A7XX_VPC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(uint32_t val) -{ - return ((val) << A7XX_VPC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT) & A7XX_VPC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK; -} -#define A7XX_VPC_PRIMITIVE_CNTL_5_LINELENGTHEN 0x00008000 -#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK 0x00030000 -#define A7XX_VPC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT 16 -static inline uint32_t A7XX_VPC_PRIMITIVE_CNTL_5_GS_OUTPUT(enum a6xx_tess_output val) -{ - return ((val) << A7XX_VPC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT) & A7XX_VPC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK; -} -#define A7XX_VPC_PRIMITIVE_CNTL_5_UNK18 0x00040000 - -#define REG_A7XX_VPC_MULTIVIEW_MASK 0x0000910b - -#define REG_A7XX_VPC_MULTIVIEW_CNTL 0x0000910c -#define A7XX_VPC_MULTIVIEW_CNTL_ENABLE 0x00000001 -#define A7XX_VPC_MULTIVIEW_CNTL_DISABLEMULTIPOS 0x00000002 -#define A7XX_VPC_MULTIVIEW_CNTL_VIEWS__MASK 0x0000007c -#define A7XX_VPC_MULTIVIEW_CNTL_VIEWS__SHIFT 2 -static inline uint32_t A7XX_VPC_MULTIVIEW_CNTL_VIEWS(uint32_t val) -{ - return ((val) << A7XX_VPC_MULTIVIEW_CNTL_VIEWS__SHIFT) & A7XX_VPC_MULTIVIEW_CNTL_VIEWS__MASK; -} - -#define REG_A6XX_VPC_VARYING_INTERP(i0) (0x00009200 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00009200 + 0x1*i0; } - -#define REG_A6XX_VPC_VARYING_PS_REPL(i0) (0x00009208 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00009208 + 0x1*i0; } - -#define REG_A6XX_VPC_UNKNOWN_9210 0x00009210 - -#define REG_A6XX_VPC_UNKNOWN_9211 0x00009211 - -#define REG_A6XX_VPC_VAR(i0) (0x00009212 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x00009212 + 0x1*i0; } - -#define REG_A6XX_VPC_SO_CNTL 0x00009216 -#define A6XX_VPC_SO_CNTL_ADDR__MASK 0x000000ff -#define A6XX_VPC_SO_CNTL_ADDR__SHIFT 0 -static inline uint32_t A6XX_VPC_SO_CNTL_ADDR(uint32_t val) -{ - return ((val) << A6XX_VPC_SO_CNTL_ADDR__SHIFT) & A6XX_VPC_SO_CNTL_ADDR__MASK; -} -#define A6XX_VPC_SO_CNTL_RESET 0x00010000 - -#define REG_A6XX_VPC_SO_PROG 0x00009217 -#define A6XX_VPC_SO_PROG_A_BUF__MASK 0x00000003 -#define A6XX_VPC_SO_PROG_A_BUF__SHIFT 0 -static inline uint32_t A6XX_VPC_SO_PROG_A_BUF(uint32_t val) -{ - return ((val) << A6XX_VPC_SO_PROG_A_BUF__SHIFT) & A6XX_VPC_SO_PROG_A_BUF__MASK; -} -#define A6XX_VPC_SO_PROG_A_OFF__MASK 0x000007fc -#define A6XX_VPC_SO_PROG_A_OFF__SHIFT 2 -static inline uint32_t A6XX_VPC_SO_PROG_A_OFF(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_VPC_SO_PROG_A_OFF__SHIFT) & A6XX_VPC_SO_PROG_A_OFF__MASK; -} -#define A6XX_VPC_SO_PROG_A_EN 0x00000800 -#define A6XX_VPC_SO_PROG_B_BUF__MASK 0x00003000 -#define A6XX_VPC_SO_PROG_B_BUF__SHIFT 12 -static inline uint32_t A6XX_VPC_SO_PROG_B_BUF(uint32_t val) -{ - return ((val) << A6XX_VPC_SO_PROG_B_BUF__SHIFT) & A6XX_VPC_SO_PROG_B_BUF__MASK; -} -#define A6XX_VPC_SO_PROG_B_OFF__MASK 0x007fc000 -#define A6XX_VPC_SO_PROG_B_OFF__SHIFT 14 -static inline uint32_t A6XX_VPC_SO_PROG_B_OFF(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_VPC_SO_PROG_B_OFF__SHIFT) & A6XX_VPC_SO_PROG_B_OFF__MASK; -} -#define A6XX_VPC_SO_PROG_B_EN 0x00800000 - -#define REG_A6XX_VPC_SO_STREAM_COUNTS 0x00009218 - -#define REG_A6XX_VPC_SO(i0) (0x0000921a + 0x7*(i0)) - -static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE(uint32_t i0) { return 0x0000921a + 0x7*i0; } - -static inline uint32_t REG_A6XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000921c + 0x7*i0; } - -static inline uint32_t REG_A6XX_VPC_SO_BUFFER_STRIDE(uint32_t i0) { return 0x0000921d + 0x7*i0; } - -static inline uint32_t REG_A6XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000921e + 0x7*i0; } - -static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE(uint32_t i0) { return 0x0000921f + 0x7*i0; } - -#define REG_A6XX_VPC_POINT_COORD_INVERT 0x00009236 -#define A6XX_VPC_POINT_COORD_INVERT_INVERT 0x00000001 - -#define REG_A6XX_VPC_UNKNOWN_9300 0x00009300 - -#define REG_A6XX_VPC_VS_PACK 0x00009301 -#define A6XX_VPC_VS_PACK_STRIDE_IN_VPC__MASK 0x000000ff -#define A6XX_VPC_VS_PACK_STRIDE_IN_VPC__SHIFT 0 -static inline uint32_t A6XX_VPC_VS_PACK_STRIDE_IN_VPC(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_VS_PACK_STRIDE_IN_VPC__MASK; -} -#define A6XX_VPC_VS_PACK_POSITIONLOC__MASK 0x0000ff00 -#define A6XX_VPC_VS_PACK_POSITIONLOC__SHIFT 8 -static inline uint32_t A6XX_VPC_VS_PACK_POSITIONLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_VS_PACK_POSITIONLOC__MASK; -} -#define A6XX_VPC_VS_PACK_PSIZELOC__MASK 0x00ff0000 -#define A6XX_VPC_VS_PACK_PSIZELOC__SHIFT 16 -static inline uint32_t A6XX_VPC_VS_PACK_PSIZELOC(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_VS_PACK_PSIZELOC__MASK; -} -#define A6XX_VPC_VS_PACK_EXTRAPOS__MASK 0x0f000000 -#define A6XX_VPC_VS_PACK_EXTRAPOS__SHIFT 24 -static inline uint32_t A6XX_VPC_VS_PACK_EXTRAPOS(uint32_t val) -{ - return ((val) << A6XX_VPC_VS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_VS_PACK_EXTRAPOS__MASK; -} - -#define REG_A6XX_VPC_GS_PACK 0x00009302 -#define A6XX_VPC_GS_PACK_STRIDE_IN_VPC__MASK 0x000000ff -#define A6XX_VPC_GS_PACK_STRIDE_IN_VPC__SHIFT 0 -static inline uint32_t A6XX_VPC_GS_PACK_STRIDE_IN_VPC(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_GS_PACK_STRIDE_IN_VPC__MASK; -} -#define A6XX_VPC_GS_PACK_POSITIONLOC__MASK 0x0000ff00 -#define A6XX_VPC_GS_PACK_POSITIONLOC__SHIFT 8 -static inline uint32_t A6XX_VPC_GS_PACK_POSITIONLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_GS_PACK_POSITIONLOC__MASK; -} -#define A6XX_VPC_GS_PACK_PSIZELOC__MASK 0x00ff0000 -#define A6XX_VPC_GS_PACK_PSIZELOC__SHIFT 16 -static inline uint32_t A6XX_VPC_GS_PACK_PSIZELOC(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_GS_PACK_PSIZELOC__MASK; -} -#define A6XX_VPC_GS_PACK_EXTRAPOS__MASK 0x0f000000 -#define A6XX_VPC_GS_PACK_EXTRAPOS__SHIFT 24 -static inline uint32_t A6XX_VPC_GS_PACK_EXTRAPOS(uint32_t val) -{ - return ((val) << A6XX_VPC_GS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_GS_PACK_EXTRAPOS__MASK; -} - -#define REG_A6XX_VPC_DS_PACK 0x00009303 -#define A6XX_VPC_DS_PACK_STRIDE_IN_VPC__MASK 0x000000ff -#define A6XX_VPC_DS_PACK_STRIDE_IN_VPC__SHIFT 0 -static inline uint32_t A6XX_VPC_DS_PACK_STRIDE_IN_VPC(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_DS_PACK_STRIDE_IN_VPC__MASK; -} -#define A6XX_VPC_DS_PACK_POSITIONLOC__MASK 0x0000ff00 -#define A6XX_VPC_DS_PACK_POSITIONLOC__SHIFT 8 -static inline uint32_t A6XX_VPC_DS_PACK_POSITIONLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_DS_PACK_POSITIONLOC__MASK; -} -#define A6XX_VPC_DS_PACK_PSIZELOC__MASK 0x00ff0000 -#define A6XX_VPC_DS_PACK_PSIZELOC__SHIFT 16 -static inline uint32_t A6XX_VPC_DS_PACK_PSIZELOC(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_DS_PACK_PSIZELOC__MASK; -} -#define A6XX_VPC_DS_PACK_EXTRAPOS__MASK 0x0f000000 -#define A6XX_VPC_DS_PACK_EXTRAPOS__SHIFT 24 -static inline uint32_t A6XX_VPC_DS_PACK_EXTRAPOS(uint32_t val) -{ - return ((val) << A6XX_VPC_DS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_DS_PACK_EXTRAPOS__MASK; -} - -#define REG_A6XX_VPC_CNTL_0 0x00009304 -#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK 0x000000ff -#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT 0 -static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val) -{ - return ((val) << A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT) & A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK; -} -#define A6XX_VPC_CNTL_0_PRIMIDLOC__MASK 0x0000ff00 -#define A6XX_VPC_CNTL_0_PRIMIDLOC__SHIFT 8 -static inline uint32_t A6XX_VPC_CNTL_0_PRIMIDLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_CNTL_0_PRIMIDLOC__SHIFT) & A6XX_VPC_CNTL_0_PRIMIDLOC__MASK; -} -#define A6XX_VPC_CNTL_0_VARYING 0x00010000 -#define A6XX_VPC_CNTL_0_VIEWIDLOC__MASK 0xff000000 -#define A6XX_VPC_CNTL_0_VIEWIDLOC__SHIFT 24 -static inline uint32_t A6XX_VPC_CNTL_0_VIEWIDLOC(uint32_t val) -{ - return ((val) << A6XX_VPC_CNTL_0_VIEWIDLOC__SHIFT) & A6XX_VPC_CNTL_0_VIEWIDLOC__MASK; -} - -#define REG_A6XX_VPC_SO_STREAM_CNTL 0x00009305 -#define A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__MASK 0x00000007 -#define A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__SHIFT 0 -static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM(uint32_t val) -{ - return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__MASK; -} -#define A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__MASK 0x00000038 -#define A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__SHIFT 3 -static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM(uint32_t val) -{ - return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__MASK; -} -#define A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__MASK 0x000001c0 -#define A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__SHIFT 6 -static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM(uint32_t val) -{ - return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__MASK; -} -#define A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__MASK 0x00000e00 -#define A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__SHIFT 9 -static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM(uint32_t val) -{ - return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__MASK; -} -#define A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__MASK 0x00078000 -#define A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT 15 -static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE(uint32_t val) -{ - return ((val) << A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__MASK; -} - -#define REG_A6XX_VPC_SO_DISABLE 0x00009306 -#define A6XX_VPC_SO_DISABLE_DISABLE 0x00000001 - -#define REG_A7XX_VPC_POLYGON_MODE2 0x00009307 -#define A7XX_VPC_POLYGON_MODE2_MODE__MASK 0x00000003 -#define A7XX_VPC_POLYGON_MODE2_MODE__SHIFT 0 -static inline uint32_t A7XX_VPC_POLYGON_MODE2_MODE(enum a6xx_polygon_mode val) -{ - return ((val) << A7XX_VPC_POLYGON_MODE2_MODE__SHIFT) & A7XX_VPC_POLYGON_MODE2_MODE__MASK; -} - -#define REG_A7XX_VPC_ATTR_BUF_SIZE_GMEM 0x00009308 -#define A7XX_VPC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__MASK 0xffffffff -#define A7XX_VPC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__SHIFT 0 -static inline uint32_t A7XX_VPC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM(uint32_t val) -{ - return ((val) << A7XX_VPC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__SHIFT) & A7XX_VPC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__MASK; -} - -#define REG_A7XX_VPC_ATTR_BUF_BASE_GMEM 0x00009309 -#define A7XX_VPC_ATTR_BUF_BASE_GMEM_BASE_GMEM__MASK 0xffffffff -#define A7XX_VPC_ATTR_BUF_BASE_GMEM_BASE_GMEM__SHIFT 0 -static inline uint32_t A7XX_VPC_ATTR_BUF_BASE_GMEM_BASE_GMEM(uint32_t val) -{ - return ((val) << A7XX_VPC_ATTR_BUF_BASE_GMEM_BASE_GMEM__SHIFT) & A7XX_VPC_ATTR_BUF_BASE_GMEM_BASE_GMEM__MASK; -} - -#define REG_A7XX_PC_ATTR_BUF_SIZE_GMEM 0x00009b09 -#define A7XX_PC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__MASK 0xffffffff -#define A7XX_PC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__SHIFT 0 -static inline uint32_t A7XX_PC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM(uint32_t val) -{ - return ((val) << A7XX_PC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__SHIFT) & A7XX_PC_ATTR_BUF_SIZE_GMEM_SIZE_GMEM__MASK; -} - -#define REG_A6XX_VPC_DBG_ECO_CNTL 0x00009600 - -#define REG_A6XX_VPC_ADDR_MODE_CNTL 0x00009601 - -#define REG_A6XX_VPC_UNKNOWN_9602 0x00009602 - -#define REG_A6XX_VPC_UNKNOWN_9603 0x00009603 - -#define REG_A6XX_VPC_PERFCTR_VPC_SEL(i0) (0x00009604 + 0x1*(i0)) - -#define REG_A7XX_VPC_PERFCTR_VPC_SEL(i0) (0x0000960b + 0x1*(i0)) - -#define REG_A6XX_PC_TESS_NUM_VERTEX 0x00009800 - -#define REG_A6XX_PC_HS_INPUT_SIZE 0x00009801 -#define A6XX_PC_HS_INPUT_SIZE_SIZE__MASK 0x000007ff -#define A6XX_PC_HS_INPUT_SIZE_SIZE__SHIFT 0 -static inline uint32_t A6XX_PC_HS_INPUT_SIZE_SIZE(uint32_t val) -{ - return ((val) << A6XX_PC_HS_INPUT_SIZE_SIZE__SHIFT) & A6XX_PC_HS_INPUT_SIZE_SIZE__MASK; -} -#define A6XX_PC_HS_INPUT_SIZE_UNK13 0x00002000 - -#define REG_A6XX_PC_TESS_CNTL 0x00009802 -#define A6XX_PC_TESS_CNTL_SPACING__MASK 0x00000003 -#define A6XX_PC_TESS_CNTL_SPACING__SHIFT 0 -static inline uint32_t A6XX_PC_TESS_CNTL_SPACING(enum a6xx_tess_spacing val) -{ - return ((val) << A6XX_PC_TESS_CNTL_SPACING__SHIFT) & A6XX_PC_TESS_CNTL_SPACING__MASK; -} -#define A6XX_PC_TESS_CNTL_OUTPUT__MASK 0x0000000c -#define A6XX_PC_TESS_CNTL_OUTPUT__SHIFT 2 -static inline uint32_t A6XX_PC_TESS_CNTL_OUTPUT(enum a6xx_tess_output val) -{ - return ((val) << A6XX_PC_TESS_CNTL_OUTPUT__SHIFT) & A6XX_PC_TESS_CNTL_OUTPUT__MASK; -} - -#define REG_A6XX_PC_RESTART_INDEX 0x00009803 - -#define REG_A6XX_PC_MODE_CNTL 0x00009804 - -#define REG_A6XX_PC_POWER_CNTL 0x00009805 - -#define REG_A6XX_PC_PS_CNTL 0x00009806 -#define A6XX_PC_PS_CNTL_PRIMITIVEIDEN 0x00000001 - -#define REG_A6XX_PC_SO_STREAM_CNTL 0x00009808 -#define A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__MASK 0x00078000 -#define A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT 15 -static inline uint32_t A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE(uint32_t val) -{ - return ((val) << A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT) & A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__MASK; -} - -#define REG_A6XX_PC_DGEN_SU_CONSERVATIVE_RAS_CNTL 0x0000980a -#define A6XX_PC_DGEN_SU_CONSERVATIVE_RAS_CNTL_CONSERVATIVERASEN 0x00000001 - -#define REG_A6XX_PC_DRAW_CMD 0x00009840 -#define A6XX_PC_DRAW_CMD_STATE_ID__MASK 0x000000ff -#define A6XX_PC_DRAW_CMD_STATE_ID__SHIFT 0 -static inline uint32_t A6XX_PC_DRAW_CMD_STATE_ID(uint32_t val) -{ - return ((val) << A6XX_PC_DRAW_CMD_STATE_ID__SHIFT) & A6XX_PC_DRAW_CMD_STATE_ID__MASK; -} - -#define REG_A6XX_PC_DISPATCH_CMD 0x00009841 -#define A6XX_PC_DISPATCH_CMD_STATE_ID__MASK 0x000000ff -#define A6XX_PC_DISPATCH_CMD_STATE_ID__SHIFT 0 -static inline uint32_t A6XX_PC_DISPATCH_CMD_STATE_ID(uint32_t val) -{ - return ((val) << A6XX_PC_DISPATCH_CMD_STATE_ID__SHIFT) & A6XX_PC_DISPATCH_CMD_STATE_ID__MASK; -} - -#define REG_A6XX_PC_EVENT_CMD 0x00009842 -#define A6XX_PC_EVENT_CMD_STATE_ID__MASK 0x00ff0000 -#define A6XX_PC_EVENT_CMD_STATE_ID__SHIFT 16 -static inline uint32_t A6XX_PC_EVENT_CMD_STATE_ID(uint32_t val) -{ - return ((val) << A6XX_PC_EVENT_CMD_STATE_ID__SHIFT) & A6XX_PC_EVENT_CMD_STATE_ID__MASK; -} -#define A6XX_PC_EVENT_CMD_EVENT__MASK 0x0000007f -#define A6XX_PC_EVENT_CMD_EVENT__SHIFT 0 -static inline uint32_t A6XX_PC_EVENT_CMD_EVENT(enum vgt_event_type val) -{ - return ((val) << A6XX_PC_EVENT_CMD_EVENT__SHIFT) & A6XX_PC_EVENT_CMD_EVENT__MASK; -} - -#define REG_A6XX_PC_MARKER 0x00009880 - -#define REG_A6XX_PC_POLYGON_MODE 0x00009981 -#define A6XX_PC_POLYGON_MODE_MODE__MASK 0x00000003 -#define A6XX_PC_POLYGON_MODE_MODE__SHIFT 0 -static inline uint32_t A6XX_PC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val) -{ - return ((val) << A6XX_PC_POLYGON_MODE_MODE__SHIFT) & A6XX_PC_POLYGON_MODE_MODE__MASK; -} - -#define REG_A7XX_PC_POLYGON_MODE 0x00009809 -#define A7XX_PC_POLYGON_MODE_MODE__MASK 0x00000003 -#define A7XX_PC_POLYGON_MODE_MODE__SHIFT 0 -static inline uint32_t A7XX_PC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val) -{ - return ((val) << A7XX_PC_POLYGON_MODE_MODE__SHIFT) & A7XX_PC_POLYGON_MODE_MODE__MASK; -} - -#define REG_A6XX_PC_RASTER_CNTL 0x00009980 -#define A6XX_PC_RASTER_CNTL_STREAM__MASK 0x00000003 -#define A6XX_PC_RASTER_CNTL_STREAM__SHIFT 0 -static inline uint32_t A6XX_PC_RASTER_CNTL_STREAM(uint32_t val) -{ - return ((val) << A6XX_PC_RASTER_CNTL_STREAM__SHIFT) & A6XX_PC_RASTER_CNTL_STREAM__MASK; -} -#define A6XX_PC_RASTER_CNTL_DISCARD 0x00000004 - -#define REG_A7XX_PC_RASTER_CNTL 0x00009107 -#define A7XX_PC_RASTER_CNTL_STREAM__MASK 0x00000003 -#define A7XX_PC_RASTER_CNTL_STREAM__SHIFT 0 -static inline uint32_t A7XX_PC_RASTER_CNTL_STREAM(uint32_t val) -{ - return ((val) << A7XX_PC_RASTER_CNTL_STREAM__SHIFT) & A7XX_PC_RASTER_CNTL_STREAM__MASK; -} -#define A7XX_PC_RASTER_CNTL_DISCARD 0x00000004 - -#define REG_A7XX_PC_RASTER_CNTL_V2 0x00009317 -#define A7XX_PC_RASTER_CNTL_V2_STREAM__MASK 0x00000003 -#define A7XX_PC_RASTER_CNTL_V2_STREAM__SHIFT 0 -static inline uint32_t A7XX_PC_RASTER_CNTL_V2_STREAM(uint32_t val) -{ - return ((val) << A7XX_PC_RASTER_CNTL_V2_STREAM__SHIFT) & A7XX_PC_RASTER_CNTL_V2_STREAM__MASK; -} -#define A7XX_PC_RASTER_CNTL_V2_DISCARD 0x00000004 - -#define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00 -#define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001 -#define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002 -#define A6XX_PC_PRIMITIVE_CNTL_0_D3D_VERTEX_ORDERING 0x00000004 -#define A6XX_PC_PRIMITIVE_CNTL_0_UNK3 0x00000008 - -#define REG_A6XX_PC_VS_OUT_CNTL 0x00009b01 -#define A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff -#define A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0 -static inline uint32_t A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val) -{ - return ((val) << A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__MASK; -} -#define A6XX_PC_VS_OUT_CNTL_PSIZE 0x00000100 -#define A6XX_PC_VS_OUT_CNTL_LAYER 0x00000200 -#define A6XX_PC_VS_OUT_CNTL_VIEW 0x00000400 -#define A6XX_PC_VS_OUT_CNTL_PRIMITIVE_ID 0x00000800 -#define A6XX_PC_VS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000 -#define A6XX_PC_VS_OUT_CNTL_CLIP_MASK__SHIFT 16 -static inline uint32_t A6XX_PC_VS_OUT_CNTL_CLIP_MASK(uint32_t val) -{ - return ((val) << A6XX_PC_VS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_VS_OUT_CNTL_CLIP_MASK__MASK; -} -#define A6XX_PC_VS_OUT_CNTL_SHADINGRATE 0x01000000 - -#define REG_A6XX_PC_GS_OUT_CNTL 0x00009b02 -#define A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff -#define A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0 -static inline uint32_t A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val) -{ - return ((val) << A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__MASK; -} -#define A6XX_PC_GS_OUT_CNTL_PSIZE 0x00000100 -#define A6XX_PC_GS_OUT_CNTL_LAYER 0x00000200 -#define A6XX_PC_GS_OUT_CNTL_VIEW 0x00000400 -#define A6XX_PC_GS_OUT_CNTL_PRIMITIVE_ID 0x00000800 -#define A6XX_PC_GS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000 -#define A6XX_PC_GS_OUT_CNTL_CLIP_MASK__SHIFT 16 -static inline uint32_t A6XX_PC_GS_OUT_CNTL_CLIP_MASK(uint32_t val) -{ - return ((val) << A6XX_PC_GS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_GS_OUT_CNTL_CLIP_MASK__MASK; -} -#define A6XX_PC_GS_OUT_CNTL_SHADINGRATE 0x01000000 - -#define REG_A6XX_PC_HS_OUT_CNTL 0x00009b03 -#define A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff -#define A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0 -static inline uint32_t A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val) -{ - return ((val) << A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__MASK; -} -#define A6XX_PC_HS_OUT_CNTL_PSIZE 0x00000100 -#define A6XX_PC_HS_OUT_CNTL_LAYER 0x00000200 -#define A6XX_PC_HS_OUT_CNTL_VIEW 0x00000400 -#define A6XX_PC_HS_OUT_CNTL_PRIMITIVE_ID 0x00000800 -#define A6XX_PC_HS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000 -#define A6XX_PC_HS_OUT_CNTL_CLIP_MASK__SHIFT 16 -static inline uint32_t A6XX_PC_HS_OUT_CNTL_CLIP_MASK(uint32_t val) -{ - return ((val) << A6XX_PC_HS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_HS_OUT_CNTL_CLIP_MASK__MASK; -} -#define A6XX_PC_HS_OUT_CNTL_SHADINGRATE 0x01000000 - -#define REG_A6XX_PC_DS_OUT_CNTL 0x00009b04 -#define A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff -#define A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0 -static inline uint32_t A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val) -{ - return ((val) << A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__MASK; -} -#define A6XX_PC_DS_OUT_CNTL_PSIZE 0x00000100 -#define A6XX_PC_DS_OUT_CNTL_LAYER 0x00000200 -#define A6XX_PC_DS_OUT_CNTL_VIEW 0x00000400 -#define A6XX_PC_DS_OUT_CNTL_PRIMITIVE_ID 0x00000800 -#define A6XX_PC_DS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000 -#define A6XX_PC_DS_OUT_CNTL_CLIP_MASK__SHIFT 16 -static inline uint32_t A6XX_PC_DS_OUT_CNTL_CLIP_MASK(uint32_t val) -{ - return ((val) << A6XX_PC_DS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_DS_OUT_CNTL_CLIP_MASK__MASK; -} -#define A6XX_PC_DS_OUT_CNTL_SHADINGRATE 0x01000000 - -#define REG_A6XX_PC_PRIMITIVE_CNTL_5 0x00009b05 -#define A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK 0x000000ff -#define A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT 0 -static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(uint32_t val) -{ - return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK; -} -#define A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK 0x00007c00 -#define A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT 10 -static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(uint32_t val) -{ - return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK; -} -#define A6XX_PC_PRIMITIVE_CNTL_5_LINELENGTHEN 0x00008000 -#define A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK 0x00030000 -#define A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT 16 -static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(enum a6xx_tess_output val) -{ - return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK; -} -#define A6XX_PC_PRIMITIVE_CNTL_5_UNK18 0x00040000 - -#define REG_A6XX_PC_PRIMITIVE_CNTL_6 0x00009b06 -#define A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK 0x000007ff -#define A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT 0 -static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(uint32_t val) -{ - return ((val) << A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK; -} - -#define REG_A6XX_PC_MULTIVIEW_CNTL 0x00009b07 -#define A6XX_PC_MULTIVIEW_CNTL_ENABLE 0x00000001 -#define A6XX_PC_MULTIVIEW_CNTL_DISABLEMULTIPOS 0x00000002 -#define A6XX_PC_MULTIVIEW_CNTL_VIEWS__MASK 0x0000007c -#define A6XX_PC_MULTIVIEW_CNTL_VIEWS__SHIFT 2 -static inline uint32_t A6XX_PC_MULTIVIEW_CNTL_VIEWS(uint32_t val) -{ - return ((val) << A6XX_PC_MULTIVIEW_CNTL_VIEWS__SHIFT) & A6XX_PC_MULTIVIEW_CNTL_VIEWS__MASK; -} - -#define REG_A6XX_PC_MULTIVIEW_MASK 0x00009b08 - -#define REG_A6XX_PC_2D_EVENT_CMD 0x00009c00 -#define A6XX_PC_2D_EVENT_CMD_EVENT__MASK 0x0000007f -#define A6XX_PC_2D_EVENT_CMD_EVENT__SHIFT 0 -static inline uint32_t A6XX_PC_2D_EVENT_CMD_EVENT(enum vgt_event_type val) -{ - return ((val) << A6XX_PC_2D_EVENT_CMD_EVENT__SHIFT) & A6XX_PC_2D_EVENT_CMD_EVENT__MASK; -} -#define A6XX_PC_2D_EVENT_CMD_STATE_ID__MASK 0x0000ff00 -#define A6XX_PC_2D_EVENT_CMD_STATE_ID__SHIFT 8 -static inline uint32_t A6XX_PC_2D_EVENT_CMD_STATE_ID(uint32_t val) -{ - return ((val) << A6XX_PC_2D_EVENT_CMD_STATE_ID__SHIFT) & A6XX_PC_2D_EVENT_CMD_STATE_ID__MASK; -} - -#define REG_A6XX_PC_DBG_ECO_CNTL 0x00009e00 - -#define REG_A6XX_PC_ADDR_MODE_CNTL 0x00009e01 - -#define REG_A6XX_PC_DRAW_INDX_BASE 0x00009e04 - -#define REG_A6XX_PC_DRAW_FIRST_INDX 0x00009e06 - -#define REG_A6XX_PC_DRAW_MAX_INDICES 0x00009e07 - -#define REG_A6XX_PC_TESSFACTOR_ADDR 0x00009e08 - -#define REG_A7XX_PC_TESSFACTOR_ADDR 0x00009810 - -#define REG_A6XX_PC_DRAW_INITIATOR 0x00009e0b -#define A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f -#define A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0 -static inline uint32_t A6XX_PC_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val) -{ - return ((val) << A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__MASK; -} -#define A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0 -#define A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6 -static inline uint32_t A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val) -{ - return ((val) << A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__MASK; -} -#define A6XX_PC_DRAW_INITIATOR_VIS_CULL__MASK 0x00000300 -#define A6XX_PC_DRAW_INITIATOR_VIS_CULL__SHIFT 8 -static inline uint32_t A6XX_PC_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val) -{ - return ((val) << A6XX_PC_DRAW_INITIATOR_VIS_CULL__SHIFT) & A6XX_PC_DRAW_INITIATOR_VIS_CULL__MASK; -} -#define A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000c00 -#define A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__SHIFT 10 -static inline uint32_t A6XX_PC_DRAW_INITIATOR_INDEX_SIZE(enum a4xx_index_size val) -{ - return ((val) << A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__MASK; -} -#define A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__MASK 0x00003000 -#define A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__SHIFT 12 -static inline uint32_t A6XX_PC_DRAW_INITIATOR_PATCH_TYPE(enum a6xx_patch_type val) -{ - return ((val) << A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__SHIFT) & A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__MASK; -} -#define A6XX_PC_DRAW_INITIATOR_GS_ENABLE 0x00010000 -#define A6XX_PC_DRAW_INITIATOR_TESS_ENABLE 0x00020000 - -#define REG_A6XX_PC_DRAW_NUM_INSTANCES 0x00009e0c - -#define REG_A6XX_PC_DRAW_NUM_INDICES 0x00009e0d - -#define REG_A6XX_PC_VSTREAM_CONTROL 0x00009e11 -#define A6XX_PC_VSTREAM_CONTROL_UNK0__MASK 0x0000ffff -#define A6XX_PC_VSTREAM_CONTROL_UNK0__SHIFT 0 -static inline uint32_t A6XX_PC_VSTREAM_CONTROL_UNK0(uint32_t val) -{ - return ((val) << A6XX_PC_VSTREAM_CONTROL_UNK0__SHIFT) & A6XX_PC_VSTREAM_CONTROL_UNK0__MASK; -} -#define A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__MASK 0x003f0000 -#define A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__SHIFT 16 -static inline uint32_t A6XX_PC_VSTREAM_CONTROL_VSC_SIZE(uint32_t val) -{ - return ((val) << A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__SHIFT) & A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__MASK; -} -#define A6XX_PC_VSTREAM_CONTROL_VSC_N__MASK 0x07c00000 -#define A6XX_PC_VSTREAM_CONTROL_VSC_N__SHIFT 22 -static inline uint32_t A6XX_PC_VSTREAM_CONTROL_VSC_N(uint32_t val) -{ - return ((val) << A6XX_PC_VSTREAM_CONTROL_VSC_N__SHIFT) & A6XX_PC_VSTREAM_CONTROL_VSC_N__MASK; -} - -#define REG_A6XX_PC_BIN_PRIM_STRM 0x00009e12 - -#define REG_A6XX_PC_BIN_DRAW_STRM 0x00009e14 - -#define REG_A6XX_PC_VISIBILITY_OVERRIDE 0x00009e1c -#define A6XX_PC_VISIBILITY_OVERRIDE_OVERRIDE 0x00000001 - -#define REG_A7XX_PC_UNKNOWN_9E24 0x00009e24 - -#define REG_A6XX_PC_PERFCTR_PC_SEL(i0) (0x00009e34 + 0x1*(i0)) - -#define REG_A7XX_PC_PERFCTR_PC_SEL(i0) (0x00009e42 + 0x1*(i0)) - -#define REG_A6XX_PC_UNKNOWN_9E72 0x00009e72 - -#define REG_A6XX_VFD_CONTROL_0 0x0000a000 -#define A6XX_VFD_CONTROL_0_FETCH_CNT__MASK 0x0000003f -#define A6XX_VFD_CONTROL_0_FETCH_CNT__SHIFT 0 -static inline uint32_t A6XX_VFD_CONTROL_0_FETCH_CNT(uint32_t val) -{ - return ((val) << A6XX_VFD_CONTROL_0_FETCH_CNT__SHIFT) & A6XX_VFD_CONTROL_0_FETCH_CNT__MASK; -} -#define A6XX_VFD_CONTROL_0_DECODE_CNT__MASK 0x00003f00 -#define A6XX_VFD_CONTROL_0_DECODE_CNT__SHIFT 8 -static inline uint32_t A6XX_VFD_CONTROL_0_DECODE_CNT(uint32_t val) -{ - return ((val) << A6XX_VFD_CONTROL_0_DECODE_CNT__SHIFT) & A6XX_VFD_CONTROL_0_DECODE_CNT__MASK; -} - -#define REG_A6XX_VFD_CONTROL_1 0x0000a001 -#define A6XX_VFD_CONTROL_1_REGID4VTX__MASK 0x000000ff -#define A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT 0 -static inline uint32_t A6XX_VFD_CONTROL_1_REGID4VTX(uint32_t val) -{ - return ((val) << A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A6XX_VFD_CONTROL_1_REGID4VTX__MASK; -} -#define A6XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00 -#define A6XX_VFD_CONTROL_1_REGID4INST__SHIFT 8 -static inline uint32_t A6XX_VFD_CONTROL_1_REGID4INST(uint32_t val) -{ - return ((val) << A6XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A6XX_VFD_CONTROL_1_REGID4INST__MASK; -} -#define A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000 -#define A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16 -static inline uint32_t A6XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val) -{ - return ((val) << A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK; -} -#define A6XX_VFD_CONTROL_1_REGID4VIEWID__MASK 0xff000000 -#define A6XX_VFD_CONTROL_1_REGID4VIEWID__SHIFT 24 -static inline uint32_t A6XX_VFD_CONTROL_1_REGID4VIEWID(uint32_t val) -{ - return ((val) << A6XX_VFD_CONTROL_1_REGID4VIEWID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4VIEWID__MASK; -} - -#define REG_A6XX_VFD_CONTROL_2 0x0000a002 -#define A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__MASK 0x000000ff -#define A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__SHIFT 0 -static inline uint32_t A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID(uint32_t val) -{ - return ((val) << A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__MASK; -} -#define A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__MASK 0x0000ff00 -#define A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__SHIFT 8 -static inline uint32_t A6XX_VFD_CONTROL_2_REGID_INVOCATIONID(uint32_t val) -{ - return ((val) << A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__MASK; -} - -#define REG_A6XX_VFD_CONTROL_3 0x0000a003 -#define A6XX_VFD_CONTROL_3_REGID_DSPRIMID__MASK 0x000000ff -#define A6XX_VFD_CONTROL_3_REGID_DSPRIMID__SHIFT 0 -static inline uint32_t A6XX_VFD_CONTROL_3_REGID_DSPRIMID(uint32_t val) -{ - return ((val) << A6XX_VFD_CONTROL_3_REGID_DSPRIMID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_DSPRIMID__MASK; -} -#define A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__MASK 0x0000ff00 -#define A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__SHIFT 8 -static inline uint32_t A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID(uint32_t val) -{ - return ((val) << A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__MASK; -} -#define A6XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000 -#define A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16 -static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val) -{ - return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSX__MASK; -} -#define A6XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000 -#define A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24 -static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val) -{ - return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSY__MASK; -} - -#define REG_A6XX_VFD_CONTROL_4 0x0000a004 -#define A6XX_VFD_CONTROL_4_UNK0__MASK 0x000000ff -#define A6XX_VFD_CONTROL_4_UNK0__SHIFT 0 -static inline uint32_t A6XX_VFD_CONTROL_4_UNK0(uint32_t val) -{ - return ((val) << A6XX_VFD_CONTROL_4_UNK0__SHIFT) & A6XX_VFD_CONTROL_4_UNK0__MASK; -} - -#define REG_A6XX_VFD_CONTROL_5 0x0000a005 -#define A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK 0x000000ff -#define A6XX_VFD_CONTROL_5_REGID_GSHEADER__SHIFT 0 -static inline uint32_t A6XX_VFD_CONTROL_5_REGID_GSHEADER(uint32_t val) -{ - return ((val) << A6XX_VFD_CONTROL_5_REGID_GSHEADER__SHIFT) & A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK; -} -#define A6XX_VFD_CONTROL_5_UNK8__MASK 0x0000ff00 -#define A6XX_VFD_CONTROL_5_UNK8__SHIFT 8 -static inline uint32_t A6XX_VFD_CONTROL_5_UNK8(uint32_t val) -{ - return ((val) << A6XX_VFD_CONTROL_5_UNK8__SHIFT) & A6XX_VFD_CONTROL_5_UNK8__MASK; -} - -#define REG_A6XX_VFD_CONTROL_6 0x0000a006 -#define A6XX_VFD_CONTROL_6_PRIMID4PSEN 0x00000001 - -#define REG_A6XX_VFD_MODE_CNTL 0x0000a007 -#define A6XX_VFD_MODE_CNTL_RENDER_MODE__MASK 0x00000007 -#define A6XX_VFD_MODE_CNTL_RENDER_MODE__SHIFT 0 -static inline uint32_t A6XX_VFD_MODE_CNTL_RENDER_MODE(enum a6xx_render_mode val) -{ - return ((val) << A6XX_VFD_MODE_CNTL_RENDER_MODE__SHIFT) & A6XX_VFD_MODE_CNTL_RENDER_MODE__MASK; -} - -#define REG_A6XX_VFD_MULTIVIEW_CNTL 0x0000a008 -#define A6XX_VFD_MULTIVIEW_CNTL_ENABLE 0x00000001 -#define A6XX_VFD_MULTIVIEW_CNTL_DISABLEMULTIPOS 0x00000002 -#define A6XX_VFD_MULTIVIEW_CNTL_VIEWS__MASK 0x0000007c -#define A6XX_VFD_MULTIVIEW_CNTL_VIEWS__SHIFT 2 -static inline uint32_t A6XX_VFD_MULTIVIEW_CNTL_VIEWS(uint32_t val) -{ - return ((val) << A6XX_VFD_MULTIVIEW_CNTL_VIEWS__SHIFT) & A6XX_VFD_MULTIVIEW_CNTL_VIEWS__MASK; -} - -#define REG_A6XX_VFD_ADD_OFFSET 0x0000a009 -#define A6XX_VFD_ADD_OFFSET_VERTEX 0x00000001 -#define A6XX_VFD_ADD_OFFSET_INSTANCE 0x00000002 - -#define REG_A6XX_VFD_INDEX_OFFSET 0x0000a00e - -#define REG_A6XX_VFD_INSTANCE_START_OFFSET 0x0000a00f - -#define REG_A6XX_VFD_FETCH(i0) (0x0000a010 + 0x4*(i0)) - -static inline uint32_t REG_A6XX_VFD_FETCH_BASE(uint32_t i0) { return 0x0000a010 + 0x4*i0; } - -static inline uint32_t REG_A6XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000a012 + 0x4*i0; } - -static inline uint32_t REG_A6XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000a013 + 0x4*i0; } - -#define REG_A6XX_VFD_DECODE(i0) (0x0000a090 + 0x2*(i0)) - -static inline uint32_t REG_A6XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000a090 + 0x2*i0; } -#define A6XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f -#define A6XX_VFD_DECODE_INSTR_IDX__SHIFT 0 -static inline uint32_t A6XX_VFD_DECODE_INSTR_IDX(uint32_t val) -{ - return ((val) << A6XX_VFD_DECODE_INSTR_IDX__SHIFT) & A6XX_VFD_DECODE_INSTR_IDX__MASK; -} -#define A6XX_VFD_DECODE_INSTR_OFFSET__MASK 0x0001ffe0 -#define A6XX_VFD_DECODE_INSTR_OFFSET__SHIFT 5 -static inline uint32_t A6XX_VFD_DECODE_INSTR_OFFSET(uint32_t val) -{ - return ((val) << A6XX_VFD_DECODE_INSTR_OFFSET__SHIFT) & A6XX_VFD_DECODE_INSTR_OFFSET__MASK; -} -#define A6XX_VFD_DECODE_INSTR_INSTANCED 0x00020000 -#define A6XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000 -#define A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20 -static inline uint32_t A6XX_VFD_DECODE_INSTR_FORMAT(enum a6xx_format val) -{ - return ((val) << A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A6XX_VFD_DECODE_INSTR_FORMAT__MASK; -} -#define A6XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000 -#define A6XX_VFD_DECODE_INSTR_SWAP__SHIFT 28 -static inline uint32_t A6XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A6XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A6XX_VFD_DECODE_INSTR_SWAP__MASK; -} -#define A6XX_VFD_DECODE_INSTR_UNK30 0x40000000 -#define A6XX_VFD_DECODE_INSTR_FLOAT 0x80000000 - -static inline uint32_t REG_A6XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000a091 + 0x2*i0; } - -#define REG_A6XX_VFD_DEST_CNTL(i0) (0x0000a0d0 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; } -#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f -#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0 -static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val) -{ - return ((val) << A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK; -} -#define A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0 -#define A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4 -static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val) -{ - return ((val) << A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK; -} - -#define REG_A6XX_VFD_POWER_CNTL 0x0000a0f8 - -#define REG_A7XX_VFD_UNKNOWN_A600 0x0000a600 - -#define REG_A6XX_VFD_ADDR_MODE_CNTL 0x0000a601 - -#define REG_A6XX_VFD_PERFCTR_VFD_SEL(i0) (0x0000a610 + 0x1*(i0)) - -#define REG_A7XX_VFD_PERFCTR_VFD_SEL(i0) (0x0000a610 + 0x1*(i0)) - -#define REG_A6XX_SP_VS_CTRL_REG0 0x0000a800 -#define A6XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001 -#define A6XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0 -static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) -{ - return ((val) << A6XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_VS_CTRL_REG0_THREADMODE__MASK; -} -#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e -#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1 -static inline uint32_t A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80 -#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7 -static inline uint32_t A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A6XX_SP_VS_CTRL_REG0_UNK13 0x00002000 -#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000 -#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 14 -static inline uint32_t A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val) -{ - return ((val) << A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK; -} -#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x00100000 -#define A6XX_SP_VS_CTRL_REG0_EARLYPREAMBLE 0x00200000 - -#define REG_A6XX_SP_VS_BRANCH_COND 0x0000a801 - -#define REG_A6XX_SP_VS_PRIMITIVE_CNTL 0x0000a802 -#define A6XX_SP_VS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f -#define A6XX_SP_VS_PRIMITIVE_CNTL_OUT__SHIFT 0 -static inline uint32_t A6XX_SP_VS_PRIMITIVE_CNTL_OUT(uint32_t val) -{ - return ((val) << A6XX_SP_VS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_VS_PRIMITIVE_CNTL_OUT__MASK; -} -#define A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0 -#define A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6 -static inline uint32_t A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val) -{ - return ((val) << A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__MASK; -} - -#define REG_A6XX_SP_VS_OUT(i0) (0x0000a803 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000a803 + 0x1*i0; } -#define A6XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff -#define A6XX_SP_VS_OUT_REG_A_REGID__SHIFT 0 -static inline uint32_t A6XX_SP_VS_OUT_REG_A_REGID(uint32_t val) -{ - return ((val) << A6XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_A_REGID__MASK; -} -#define A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00 -#define A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8 -static inline uint32_t A6XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val) -{ - return ((val) << A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK; -} -#define A6XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000 -#define A6XX_SP_VS_OUT_REG_B_REGID__SHIFT 16 -static inline uint32_t A6XX_SP_VS_OUT_REG_B_REGID(uint32_t val) -{ - return ((val) << A6XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_B_REGID__MASK; -} -#define A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000 -#define A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24 -static inline uint32_t A6XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val) -{ - return ((val) << A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK; -} - -#define REG_A6XX_SP_VS_VPC_DST(i0) (0x0000a813 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000a813 + 0x1*i0; } -#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff -#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0 -static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val) -{ - return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK; -} -#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 -#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8 -static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val) -{ - return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK; -} -#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 -#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16 -static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val) -{ - return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK; -} -#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 -#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24 -static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val) -{ - return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK; -} - -#define REG_A6XX_SP_VS_OBJ_FIRST_EXEC_OFFSET 0x0000a81b - -#define REG_A6XX_SP_VS_OBJ_START 0x0000a81c - -#define REG_A6XX_SP_VS_PVT_MEM_PARAM 0x0000a81e -#define A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff -#define A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 -static inline uint32_t A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) -{ - assert(!(val & 0x1ff)); - return (((val >> 9)) << A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; -} -#define A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 -#define A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 -static inline uint32_t A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) -{ - return ((val) << A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; -} - -#define REG_A6XX_SP_VS_PVT_MEM_ADDR 0x0000a81f - -#define REG_A6XX_SP_VS_PVT_MEM_SIZE 0x0000a821 -#define A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff -#define A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 -static inline uint32_t A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; -} -#define A6XX_SP_VS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000 - -#define REG_A6XX_SP_VS_TEX_COUNT 0x0000a822 - -#define REG_A6XX_SP_VS_CONFIG 0x0000a823 -#define A6XX_SP_VS_CONFIG_BINDLESS_TEX 0x00000001 -#define A6XX_SP_VS_CONFIG_BINDLESS_SAMP 0x00000002 -#define A6XX_SP_VS_CONFIG_BINDLESS_IBO 0x00000004 -#define A6XX_SP_VS_CONFIG_BINDLESS_UBO 0x00000008 -#define A6XX_SP_VS_CONFIG_ENABLED 0x00000100 -#define A6XX_SP_VS_CONFIG_NTEX__MASK 0x0001fe00 -#define A6XX_SP_VS_CONFIG_NTEX__SHIFT 9 -static inline uint32_t A6XX_SP_VS_CONFIG_NTEX(uint32_t val) -{ - return ((val) << A6XX_SP_VS_CONFIG_NTEX__SHIFT) & A6XX_SP_VS_CONFIG_NTEX__MASK; -} -#define A6XX_SP_VS_CONFIG_NSAMP__MASK 0x003e0000 -#define A6XX_SP_VS_CONFIG_NSAMP__SHIFT 17 -static inline uint32_t A6XX_SP_VS_CONFIG_NSAMP(uint32_t val) -{ - return ((val) << A6XX_SP_VS_CONFIG_NSAMP__SHIFT) & A6XX_SP_VS_CONFIG_NSAMP__MASK; -} -#define A6XX_SP_VS_CONFIG_NIBO__MASK 0x1fc00000 -#define A6XX_SP_VS_CONFIG_NIBO__SHIFT 22 -static inline uint32_t A6XX_SP_VS_CONFIG_NIBO(uint32_t val) -{ - return ((val) << A6XX_SP_VS_CONFIG_NIBO__SHIFT) & A6XX_SP_VS_CONFIG_NIBO__MASK; -} - -#define REG_A6XX_SP_VS_INSTRLEN 0x0000a824 - -#define REG_A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET 0x0000a825 -#define A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff -#define A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0 -static inline uint32_t A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val) -{ - assert(!(val & 0x7ff)); - return (((val >> 11)) << A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK; -} - -#define REG_A7XX_SP_VS_VGPR_CONFIG 0x0000a82d - -#define REG_A6XX_SP_HS_CTRL_REG0 0x0000a830 -#define A6XX_SP_HS_CTRL_REG0_THREADMODE__MASK 0x00000001 -#define A6XX_SP_HS_CTRL_REG0_THREADMODE__SHIFT 0 -static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) -{ - return ((val) << A6XX_SP_HS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_HS_CTRL_REG0_THREADMODE__MASK; -} -#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e -#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1 -static inline uint32_t A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80 -#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7 -static inline uint32_t A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A6XX_SP_HS_CTRL_REG0_UNK13 0x00002000 -#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000 -#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 14 -static inline uint32_t A6XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val) -{ - return ((val) << A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK; -} -#define A6XX_SP_HS_CTRL_REG0_EARLYPREAMBLE 0x00100000 - -#define REG_A6XX_SP_HS_WAVE_INPUT_SIZE 0x0000a831 - -#define REG_A6XX_SP_HS_BRANCH_COND 0x0000a832 - -#define REG_A6XX_SP_HS_OBJ_FIRST_EXEC_OFFSET 0x0000a833 - -#define REG_A6XX_SP_HS_OBJ_START 0x0000a834 - -#define REG_A6XX_SP_HS_PVT_MEM_PARAM 0x0000a836 -#define A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff -#define A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 -static inline uint32_t A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) -{ - assert(!(val & 0x1ff)); - return (((val >> 9)) << A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; -} -#define A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 -#define A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 -static inline uint32_t A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) -{ - return ((val) << A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; -} - -#define REG_A6XX_SP_HS_PVT_MEM_ADDR 0x0000a837 - -#define REG_A6XX_SP_HS_PVT_MEM_SIZE 0x0000a839 -#define A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff -#define A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 -static inline uint32_t A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; -} -#define A6XX_SP_HS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000 - -#define REG_A6XX_SP_HS_TEX_COUNT 0x0000a83a - -#define REG_A6XX_SP_HS_CONFIG 0x0000a83b -#define A6XX_SP_HS_CONFIG_BINDLESS_TEX 0x00000001 -#define A6XX_SP_HS_CONFIG_BINDLESS_SAMP 0x00000002 -#define A6XX_SP_HS_CONFIG_BINDLESS_IBO 0x00000004 -#define A6XX_SP_HS_CONFIG_BINDLESS_UBO 0x00000008 -#define A6XX_SP_HS_CONFIG_ENABLED 0x00000100 -#define A6XX_SP_HS_CONFIG_NTEX__MASK 0x0001fe00 -#define A6XX_SP_HS_CONFIG_NTEX__SHIFT 9 -static inline uint32_t A6XX_SP_HS_CONFIG_NTEX(uint32_t val) -{ - return ((val) << A6XX_SP_HS_CONFIG_NTEX__SHIFT) & A6XX_SP_HS_CONFIG_NTEX__MASK; -} -#define A6XX_SP_HS_CONFIG_NSAMP__MASK 0x003e0000 -#define A6XX_SP_HS_CONFIG_NSAMP__SHIFT 17 -static inline uint32_t A6XX_SP_HS_CONFIG_NSAMP(uint32_t val) -{ - return ((val) << A6XX_SP_HS_CONFIG_NSAMP__SHIFT) & A6XX_SP_HS_CONFIG_NSAMP__MASK; -} -#define A6XX_SP_HS_CONFIG_NIBO__MASK 0x1fc00000 -#define A6XX_SP_HS_CONFIG_NIBO__SHIFT 22 -static inline uint32_t A6XX_SP_HS_CONFIG_NIBO(uint32_t val) -{ - return ((val) << A6XX_SP_HS_CONFIG_NIBO__SHIFT) & A6XX_SP_HS_CONFIG_NIBO__MASK; -} - -#define REG_A6XX_SP_HS_INSTRLEN 0x0000a83c - -#define REG_A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET 0x0000a83d -#define A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff -#define A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0 -static inline uint32_t A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val) -{ - assert(!(val & 0x7ff)); - return (((val >> 11)) << A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK; -} - -#define REG_A7XX_SP_HS_VGPR_CONFIG 0x0000a82f - -#define REG_A6XX_SP_DS_CTRL_REG0 0x0000a840 -#define A6XX_SP_DS_CTRL_REG0_THREADMODE__MASK 0x00000001 -#define A6XX_SP_DS_CTRL_REG0_THREADMODE__SHIFT 0 -static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) -{ - return ((val) << A6XX_SP_DS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_DS_CTRL_REG0_THREADMODE__MASK; -} -#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e -#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1 -static inline uint32_t A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80 -#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7 -static inline uint32_t A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A6XX_SP_DS_CTRL_REG0_UNK13 0x00002000 -#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000 -#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 14 -static inline uint32_t A6XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val) -{ - return ((val) << A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK; -} -#define A6XX_SP_DS_CTRL_REG0_EARLYPREAMBLE 0x00100000 - -#define REG_A6XX_SP_DS_BRANCH_COND 0x0000a841 - -#define REG_A6XX_SP_DS_PRIMITIVE_CNTL 0x0000a842 -#define A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f -#define A6XX_SP_DS_PRIMITIVE_CNTL_OUT__SHIFT 0 -static inline uint32_t A6XX_SP_DS_PRIMITIVE_CNTL_OUT(uint32_t val) -{ - return ((val) << A6XX_SP_DS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK; -} -#define A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0 -#define A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6 -static inline uint32_t A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val) -{ - return ((val) << A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__MASK; -} - -#define REG_A6XX_SP_DS_OUT(i0) (0x0000a843 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000a843 + 0x1*i0; } -#define A6XX_SP_DS_OUT_REG_A_REGID__MASK 0x000000ff -#define A6XX_SP_DS_OUT_REG_A_REGID__SHIFT 0 -static inline uint32_t A6XX_SP_DS_OUT_REG_A_REGID(uint32_t val) -{ - return ((val) << A6XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_DS_OUT_REG_A_REGID__MASK; -} -#define A6XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00000f00 -#define A6XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 8 -static inline uint32_t A6XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val) -{ - return ((val) << A6XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_DS_OUT_REG_A_COMPMASK__MASK; -} -#define A6XX_SP_DS_OUT_REG_B_REGID__MASK 0x00ff0000 -#define A6XX_SP_DS_OUT_REG_B_REGID__SHIFT 16 -static inline uint32_t A6XX_SP_DS_OUT_REG_B_REGID(uint32_t val) -{ - return ((val) << A6XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_DS_OUT_REG_B_REGID__MASK; -} -#define A6XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x0f000000 -#define A6XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 24 -static inline uint32_t A6XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val) -{ - return ((val) << A6XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_DS_OUT_REG_B_COMPMASK__MASK; -} - -#define REG_A6XX_SP_DS_VPC_DST(i0) (0x0000a853 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000a853 + 0x1*i0; } -#define A6XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff -#define A6XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0 -static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val) -{ - return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK; -} -#define A6XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 -#define A6XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8 -static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val) -{ - return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK; -} -#define A6XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 -#define A6XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16 -static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val) -{ - return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK; -} -#define A6XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 -#define A6XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24 -static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val) -{ - return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK; -} - -#define REG_A6XX_SP_DS_OBJ_FIRST_EXEC_OFFSET 0x0000a85b - -#define REG_A6XX_SP_DS_OBJ_START 0x0000a85c - -#define REG_A6XX_SP_DS_PVT_MEM_PARAM 0x0000a85e -#define A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff -#define A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 -static inline uint32_t A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) -{ - assert(!(val & 0x1ff)); - return (((val >> 9)) << A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; -} -#define A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 -#define A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 -static inline uint32_t A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) -{ - return ((val) << A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; -} - -#define REG_A6XX_SP_DS_PVT_MEM_ADDR 0x0000a85f - -#define REG_A6XX_SP_DS_PVT_MEM_SIZE 0x0000a861 -#define A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff -#define A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 -static inline uint32_t A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; -} -#define A6XX_SP_DS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000 - -#define REG_A6XX_SP_DS_TEX_COUNT 0x0000a862 - -#define REG_A6XX_SP_DS_CONFIG 0x0000a863 -#define A6XX_SP_DS_CONFIG_BINDLESS_TEX 0x00000001 -#define A6XX_SP_DS_CONFIG_BINDLESS_SAMP 0x00000002 -#define A6XX_SP_DS_CONFIG_BINDLESS_IBO 0x00000004 -#define A6XX_SP_DS_CONFIG_BINDLESS_UBO 0x00000008 -#define A6XX_SP_DS_CONFIG_ENABLED 0x00000100 -#define A6XX_SP_DS_CONFIG_NTEX__MASK 0x0001fe00 -#define A6XX_SP_DS_CONFIG_NTEX__SHIFT 9 -static inline uint32_t A6XX_SP_DS_CONFIG_NTEX(uint32_t val) -{ - return ((val) << A6XX_SP_DS_CONFIG_NTEX__SHIFT) & A6XX_SP_DS_CONFIG_NTEX__MASK; -} -#define A6XX_SP_DS_CONFIG_NSAMP__MASK 0x003e0000 -#define A6XX_SP_DS_CONFIG_NSAMP__SHIFT 17 -static inline uint32_t A6XX_SP_DS_CONFIG_NSAMP(uint32_t val) -{ - return ((val) << A6XX_SP_DS_CONFIG_NSAMP__SHIFT) & A6XX_SP_DS_CONFIG_NSAMP__MASK; -} -#define A6XX_SP_DS_CONFIG_NIBO__MASK 0x1fc00000 -#define A6XX_SP_DS_CONFIG_NIBO__SHIFT 22 -static inline uint32_t A6XX_SP_DS_CONFIG_NIBO(uint32_t val) -{ - return ((val) << A6XX_SP_DS_CONFIG_NIBO__SHIFT) & A6XX_SP_DS_CONFIG_NIBO__MASK; -} - -#define REG_A6XX_SP_DS_INSTRLEN 0x0000a864 - -#define REG_A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET 0x0000a865 -#define A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff -#define A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0 -static inline uint32_t A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val) -{ - assert(!(val & 0x7ff)); - return (((val >> 11)) << A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK; -} - -#define REG_A7XX_SP_DS_VGPR_CONFIG 0x0000a868 - -#define REG_A6XX_SP_GS_CTRL_REG0 0x0000a870 -#define A6XX_SP_GS_CTRL_REG0_THREADMODE__MASK 0x00000001 -#define A6XX_SP_GS_CTRL_REG0_THREADMODE__SHIFT 0 -static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) -{ - return ((val) << A6XX_SP_GS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_GS_CTRL_REG0_THREADMODE__MASK; -} -#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e -#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1 -static inline uint32_t A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80 -#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7 -static inline uint32_t A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A6XX_SP_GS_CTRL_REG0_UNK13 0x00002000 -#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000 -#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 14 -static inline uint32_t A6XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val) -{ - return ((val) << A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK; -} -#define A6XX_SP_GS_CTRL_REG0_EARLYPREAMBLE 0x00100000 - -#define REG_A6XX_SP_GS_PRIM_SIZE 0x0000a871 - -#define REG_A6XX_SP_GS_BRANCH_COND 0x0000a872 - -#define REG_A6XX_SP_GS_PRIMITIVE_CNTL 0x0000a873 -#define A6XX_SP_GS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f -#define A6XX_SP_GS_PRIMITIVE_CNTL_OUT__SHIFT 0 -static inline uint32_t A6XX_SP_GS_PRIMITIVE_CNTL_OUT(uint32_t val) -{ - return ((val) << A6XX_SP_GS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_GS_PRIMITIVE_CNTL_OUT__MASK; -} -#define A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0 -#define A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6 -static inline uint32_t A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val) -{ - return ((val) << A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__MASK; -} - -#define REG_A6XX_SP_GS_OUT(i0) (0x0000a874 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_SP_GS_OUT_REG(uint32_t i0) { return 0x0000a874 + 0x1*i0; } -#define A6XX_SP_GS_OUT_REG_A_REGID__MASK 0x000000ff -#define A6XX_SP_GS_OUT_REG_A_REGID__SHIFT 0 -static inline uint32_t A6XX_SP_GS_OUT_REG_A_REGID(uint32_t val) -{ - return ((val) << A6XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_GS_OUT_REG_A_REGID__MASK; -} -#define A6XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00000f00 -#define A6XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 8 -static inline uint32_t A6XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val) -{ - return ((val) << A6XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_GS_OUT_REG_A_COMPMASK__MASK; -} -#define A6XX_SP_GS_OUT_REG_B_REGID__MASK 0x00ff0000 -#define A6XX_SP_GS_OUT_REG_B_REGID__SHIFT 16 -static inline uint32_t A6XX_SP_GS_OUT_REG_B_REGID(uint32_t val) -{ - return ((val) << A6XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_GS_OUT_REG_B_REGID__MASK; -} -#define A6XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x0f000000 -#define A6XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 24 -static inline uint32_t A6XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val) -{ - return ((val) << A6XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_GS_OUT_REG_B_COMPMASK__MASK; -} - -#define REG_A6XX_SP_GS_VPC_DST(i0) (0x0000a884 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x0000a884 + 0x1*i0; } -#define A6XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff -#define A6XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0 -static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val) -{ - return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK; -} -#define A6XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 -#define A6XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8 -static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val) -{ - return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK; -} -#define A6XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 -#define A6XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16 -static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val) -{ - return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK; -} -#define A6XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 -#define A6XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24 -static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val) -{ - return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK; -} - -#define REG_A6XX_SP_GS_OBJ_FIRST_EXEC_OFFSET 0x0000a88c - -#define REG_A6XX_SP_GS_OBJ_START 0x0000a88d - -#define REG_A6XX_SP_GS_PVT_MEM_PARAM 0x0000a88f -#define A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff -#define A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 -static inline uint32_t A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) -{ - assert(!(val & 0x1ff)); - return (((val >> 9)) << A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; -} -#define A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 -#define A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 -static inline uint32_t A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) -{ - return ((val) << A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; -} - -#define REG_A6XX_SP_GS_PVT_MEM_ADDR 0x0000a890 - -#define REG_A6XX_SP_GS_PVT_MEM_SIZE 0x0000a892 -#define A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff -#define A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 -static inline uint32_t A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; -} -#define A6XX_SP_GS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000 - -#define REG_A6XX_SP_GS_TEX_COUNT 0x0000a893 - -#define REG_A6XX_SP_GS_CONFIG 0x0000a894 -#define A6XX_SP_GS_CONFIG_BINDLESS_TEX 0x00000001 -#define A6XX_SP_GS_CONFIG_BINDLESS_SAMP 0x00000002 -#define A6XX_SP_GS_CONFIG_BINDLESS_IBO 0x00000004 -#define A6XX_SP_GS_CONFIG_BINDLESS_UBO 0x00000008 -#define A6XX_SP_GS_CONFIG_ENABLED 0x00000100 -#define A6XX_SP_GS_CONFIG_NTEX__MASK 0x0001fe00 -#define A6XX_SP_GS_CONFIG_NTEX__SHIFT 9 -static inline uint32_t A6XX_SP_GS_CONFIG_NTEX(uint32_t val) -{ - return ((val) << A6XX_SP_GS_CONFIG_NTEX__SHIFT) & A6XX_SP_GS_CONFIG_NTEX__MASK; -} -#define A6XX_SP_GS_CONFIG_NSAMP__MASK 0x003e0000 -#define A6XX_SP_GS_CONFIG_NSAMP__SHIFT 17 -static inline uint32_t A6XX_SP_GS_CONFIG_NSAMP(uint32_t val) -{ - return ((val) << A6XX_SP_GS_CONFIG_NSAMP__SHIFT) & A6XX_SP_GS_CONFIG_NSAMP__MASK; -} -#define A6XX_SP_GS_CONFIG_NIBO__MASK 0x1fc00000 -#define A6XX_SP_GS_CONFIG_NIBO__SHIFT 22 -static inline uint32_t A6XX_SP_GS_CONFIG_NIBO(uint32_t val) -{ - return ((val) << A6XX_SP_GS_CONFIG_NIBO__SHIFT) & A6XX_SP_GS_CONFIG_NIBO__MASK; -} - -#define REG_A6XX_SP_GS_INSTRLEN 0x0000a895 - -#define REG_A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET 0x0000a896 -#define A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff -#define A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0 -static inline uint32_t A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val) -{ - assert(!(val & 0x7ff)); - return (((val >> 11)) << A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK; -} - -#define REG_A7XX_SP_GS_VGPR_CONFIG 0x0000a899 - -#define REG_A6XX_SP_VS_TEX_SAMP 0x0000a8a0 - -#define REG_A6XX_SP_HS_TEX_SAMP 0x0000a8a2 - -#define REG_A6XX_SP_DS_TEX_SAMP 0x0000a8a4 - -#define REG_A6XX_SP_GS_TEX_SAMP 0x0000a8a6 - -#define REG_A6XX_SP_VS_TEX_CONST 0x0000a8a8 - -#define REG_A6XX_SP_HS_TEX_CONST 0x0000a8aa - -#define REG_A6XX_SP_DS_TEX_CONST 0x0000a8ac - -#define REG_A6XX_SP_GS_TEX_CONST 0x0000a8ae - -#define REG_A6XX_SP_FS_CTRL_REG0 0x0000a980 -#define A6XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001 -#define A6XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0 -static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) -{ - return ((val) << A6XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADMODE__MASK; -} -#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e -#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1 -static inline uint32_t A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80 -#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7 -static inline uint32_t A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A6XX_SP_FS_CTRL_REG0_UNK13 0x00002000 -#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000 -#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 14 -static inline uint32_t A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val) -{ - return ((val) << A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK; -} -#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000 -#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20 -static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val) -{ - return ((val) << A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK; -} -#define A6XX_SP_FS_CTRL_REG0_UNK21 0x00200000 -#define A6XX_SP_FS_CTRL_REG0_VARYING 0x00400000 -#define A6XX_SP_FS_CTRL_REG0_LODPIXMASK 0x00800000 -#define A6XX_SP_FS_CTRL_REG0_UNK24 0x01000000 -#define A6XX_SP_FS_CTRL_REG0_UNK25 0x02000000 -#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000 -#define A6XX_SP_FS_CTRL_REG0_UNK27 0x08000000 -#define A6XX_SP_FS_CTRL_REG0_EARLYPREAMBLE 0x10000000 -#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000 - -#define REG_A6XX_SP_FS_BRANCH_COND 0x0000a981 - -#define REG_A6XX_SP_FS_OBJ_FIRST_EXEC_OFFSET 0x0000a982 - -#define REG_A6XX_SP_FS_OBJ_START 0x0000a983 - -#define REG_A6XX_SP_FS_PVT_MEM_PARAM 0x0000a985 -#define A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff -#define A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 -static inline uint32_t A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) -{ - assert(!(val & 0x1ff)); - return (((val >> 9)) << A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; -} -#define A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 -#define A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 -static inline uint32_t A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) -{ - return ((val) << A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; -} - -#define REG_A6XX_SP_FS_PVT_MEM_ADDR 0x0000a986 - -#define REG_A6XX_SP_FS_PVT_MEM_SIZE 0x0000a988 -#define A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff -#define A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 -static inline uint32_t A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; -} -#define A6XX_SP_FS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000 - -#define REG_A6XX_SP_BLEND_CNTL 0x0000a989 -#define A6XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff -#define A6XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT 0 -static inline uint32_t A6XX_SP_BLEND_CNTL_ENABLE_BLEND(uint32_t val) -{ - return ((val) << A6XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK; -} -#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100 -#define A6XX_SP_BLEND_CNTL_DUAL_COLOR_IN_ENABLE 0x00000200 -#define A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400 - -#define REG_A6XX_SP_SRGB_CNTL 0x0000a98a -#define A6XX_SP_SRGB_CNTL_SRGB_MRT0 0x00000001 -#define A6XX_SP_SRGB_CNTL_SRGB_MRT1 0x00000002 -#define A6XX_SP_SRGB_CNTL_SRGB_MRT2 0x00000004 -#define A6XX_SP_SRGB_CNTL_SRGB_MRT3 0x00000008 -#define A6XX_SP_SRGB_CNTL_SRGB_MRT4 0x00000010 -#define A6XX_SP_SRGB_CNTL_SRGB_MRT5 0x00000020 -#define A6XX_SP_SRGB_CNTL_SRGB_MRT6 0x00000040 -#define A6XX_SP_SRGB_CNTL_SRGB_MRT7 0x00000080 - -#define REG_A6XX_SP_FS_RENDER_COMPONENTS 0x0000a98b -#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK 0x0000000f -#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT 0 -static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT0(uint32_t val) -{ - return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK; -} -#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK 0x000000f0 -#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT 4 -static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT1(uint32_t val) -{ - return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK; -} -#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK 0x00000f00 -#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT 8 -static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT2(uint32_t val) -{ - return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK; -} -#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK 0x0000f000 -#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT 12 -static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT3(uint32_t val) -{ - return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK; -} -#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK 0x000f0000 -#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT 16 -static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT4(uint32_t val) -{ - return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK; -} -#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK 0x00f00000 -#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT 20 -static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT5(uint32_t val) -{ - return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK; -} -#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK 0x0f000000 -#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT 24 -static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT6(uint32_t val) -{ - return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK; -} -#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK 0xf0000000 -#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT 28 -static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT7(uint32_t val) -{ - return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK; -} - -#define REG_A6XX_SP_FS_OUTPUT_CNTL0 0x0000a98c -#define A6XX_SP_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE 0x00000001 -#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK 0x0000ff00 -#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT 8 -static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(uint32_t val) -{ - return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK; -} -#define A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__MASK 0x00ff0000 -#define A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__SHIFT 16 -static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(uint32_t val) -{ - return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__MASK; -} -#define A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__MASK 0xff000000 -#define A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__SHIFT 24 -static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID(uint32_t val) -{ - return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__MASK; -} - -#define REG_A6XX_SP_FS_OUTPUT_CNTL1 0x0000a98d -#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f -#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT 0 -static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL1_MRT(uint32_t val) -{ - return ((val) << A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK; -} - -#define REG_A6XX_SP_FS_OUTPUT(i0) (0x0000a98e + 0x1*(i0)) - -static inline uint32_t REG_A6XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000a98e + 0x1*i0; } -#define A6XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff -#define A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0 -static inline uint32_t A6XX_SP_FS_OUTPUT_REG_REGID(uint32_t val) -{ - return ((val) << A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_REG_REGID__MASK; -} -#define A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100 - -#define REG_A6XX_SP_FS_MRT(i0) (0x0000a996 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000a996 + 0x1*i0; } -#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff -#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0 -static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_format val) -{ - return ((val) << A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK; -} -#define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100 -#define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200 -#define A6XX_SP_FS_MRT_REG_UNK10 0x00000400 - -#define REG_A6XX_SP_FS_PREFETCH_CNTL 0x0000a99e -#define A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK 0x00000007 -#define A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT 0 -static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_COUNT(uint32_t val) -{ - return ((val) << A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK; -} -#define A6XX_SP_FS_PREFETCH_CNTL_IJ_WRITE_DISABLE 0x00000008 -#define A6XX_SP_FS_PREFETCH_CNTL_ENDOFQUAD 0x00000010 -#define A6XX_SP_FS_PREFETCH_CNTL_WRITE_COLOR_TO_OUTPUT 0x00000020 -#define A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID__MASK 0x00007fc0 -#define A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID__SHIFT 6 -static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID(uint32_t val) -{ - return ((val) << A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID__MASK; -} -#define A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID4COORD__MASK 0x01ff0000 -#define A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID4COORD__SHIFT 16 -static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID4COORD(uint32_t val) -{ - return ((val) << A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID4COORD__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_CONSTSLOTID4COORD__MASK; -} - -#define REG_A6XX_SP_FS_PREFETCH(i0) (0x0000a99f + 0x1*(i0)) - -static inline uint32_t REG_A6XX_SP_FS_PREFETCH_CMD(uint32_t i0) { return 0x0000a99f + 0x1*i0; } -#define A6XX_SP_FS_PREFETCH_CMD_SRC__MASK 0x0000007f -#define A6XX_SP_FS_PREFETCH_CMD_SRC__SHIFT 0 -static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_SRC(uint32_t val) -{ - return ((val) << A6XX_SP_FS_PREFETCH_CMD_SRC__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_SRC__MASK; -} -#define A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK 0x00000780 -#define A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT 7 -static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(uint32_t val) -{ - return ((val) << A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK; -} -#define A6XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK 0x0000f800 -#define A6XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT 11 -static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_TEX_ID(uint32_t val) -{ - return ((val) << A6XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK; -} -#define A6XX_SP_FS_PREFETCH_CMD_DST__MASK 0x003f0000 -#define A6XX_SP_FS_PREFETCH_CMD_DST__SHIFT 16 -static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_DST(uint32_t val) -{ - return ((val) << A6XX_SP_FS_PREFETCH_CMD_DST__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_DST__MASK; -} -#define A6XX_SP_FS_PREFETCH_CMD_WRMASK__MASK 0x03c00000 -#define A6XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT 22 -static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_WRMASK(uint32_t val) -{ - return ((val) << A6XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_WRMASK__MASK; -} -#define A6XX_SP_FS_PREFETCH_CMD_HALF 0x04000000 -#define A6XX_SP_FS_PREFETCH_CMD_UNK27 0x08000000 -#define A6XX_SP_FS_PREFETCH_CMD_BINDLESS 0x10000000 -#define A6XX_SP_FS_PREFETCH_CMD_CMD__MASK 0xe0000000 -#define A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT 29 -static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_CMD(enum a6xx_tex_prefetch_cmd val) -{ - return ((val) << A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_CMD__MASK; -} - -#define REG_A7XX_SP_FS_PREFETCH(i0) (0x0000a99f + 0x1*(i0)) - -static inline uint32_t REG_A7XX_SP_FS_PREFETCH_CMD(uint32_t i0) { return 0x0000a99f + 0x1*i0; } -#define A7XX_SP_FS_PREFETCH_CMD_SRC__MASK 0x0000007f -#define A7XX_SP_FS_PREFETCH_CMD_SRC__SHIFT 0 -static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_SRC(uint32_t val) -{ - return ((val) << A7XX_SP_FS_PREFETCH_CMD_SRC__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_SRC__MASK; -} -#define A7XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK 0x00000380 -#define A7XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT 7 -static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_SAMP_ID(uint32_t val) -{ - return ((val) << A7XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK; -} -#define A7XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK 0x00001c00 -#define A7XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT 10 -static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_TEX_ID(uint32_t val) -{ - return ((val) << A7XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK; -} -#define A7XX_SP_FS_PREFETCH_CMD_DST__MASK 0x0007e000 -#define A7XX_SP_FS_PREFETCH_CMD_DST__SHIFT 13 -static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_DST(uint32_t val) -{ - return ((val) << A7XX_SP_FS_PREFETCH_CMD_DST__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_DST__MASK; -} -#define A7XX_SP_FS_PREFETCH_CMD_WRMASK__MASK 0x00780000 -#define A7XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT 19 -static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_WRMASK(uint32_t val) -{ - return ((val) << A7XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_WRMASK__MASK; -} -#define A7XX_SP_FS_PREFETCH_CMD_HALF 0x00800000 -#define A7XX_SP_FS_PREFETCH_CMD_BINDLESS 0x02000000 -#define A7XX_SP_FS_PREFETCH_CMD_CMD__MASK 0x3c000000 -#define A7XX_SP_FS_PREFETCH_CMD_CMD__SHIFT 26 -static inline uint32_t A7XX_SP_FS_PREFETCH_CMD_CMD(enum a6xx_tex_prefetch_cmd val) -{ - return ((val) << A7XX_SP_FS_PREFETCH_CMD_CMD__SHIFT) & A7XX_SP_FS_PREFETCH_CMD_CMD__MASK; -} - -#define REG_A6XX_SP_FS_BINDLESS_PREFETCH(i0) (0x0000a9a3 + 0x1*(i0)) - -static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH_CMD(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; } -#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK 0x0000ffff -#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT 0 -static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID(uint32_t val) -{ - return ((val) << A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT) & A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK; -} -#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK 0xffff0000 -#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__SHIFT 16 -static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(uint32_t val) -{ - return ((val) << A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__SHIFT) & A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK; -} - -#define REG_A6XX_SP_FS_TEX_COUNT 0x0000a9a7 - -#define REG_A6XX_SP_UNKNOWN_A9A8 0x0000a9a8 - -#define REG_A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET 0x0000a9a9 -#define A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff -#define A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0 -static inline uint32_t A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val) -{ - assert(!(val & 0x7ff)); - return (((val >> 11)) << A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK; -} - -#define REG_A6XX_SP_CS_CTRL_REG0 0x0000a9b0 -#define A6XX_SP_CS_CTRL_REG0_THREADMODE__MASK 0x00000001 -#define A6XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT 0 -static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) -{ - return ((val) << A6XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADMODE__MASK; -} -#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e -#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1 -static inline uint32_t A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) -{ - return ((val) << A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK; -} -#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80 -#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7 -static inline uint32_t A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) -{ - return ((val) << A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK; -} -#define A6XX_SP_CS_CTRL_REG0_UNK13 0x00002000 -#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000 -#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 14 -static inline uint32_t A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val) -{ - return ((val) << A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK; -} -#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000 -#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20 -static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val) -{ - return ((val) << A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK; -} -#define A6XX_SP_CS_CTRL_REG0_UNK21 0x00200000 -#define A6XX_SP_CS_CTRL_REG0_UNK22 0x00400000 -#define A6XX_SP_CS_CTRL_REG0_EARLYPREAMBLE 0x00800000 -#define A6XX_SP_CS_CTRL_REG0_MERGEDREGS 0x80000000 - -#define REG_A6XX_SP_CS_UNKNOWN_A9B1 0x0000a9b1 -#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__MASK 0x0000001f -#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__SHIFT 0 -static inline uint32_t A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE(uint32_t val) -{ - return ((val) << A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__SHIFT) & A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__MASK; -} -#define A6XX_SP_CS_UNKNOWN_A9B1_UNK5 0x00000020 -#define A6XX_SP_CS_UNKNOWN_A9B1_UNK6 0x00000040 - -#define REG_A6XX_SP_CS_BRANCH_COND 0x0000a9b2 - -#define REG_A6XX_SP_CS_OBJ_FIRST_EXEC_OFFSET 0x0000a9b3 - -#define REG_A6XX_SP_CS_OBJ_START 0x0000a9b4 - -#define REG_A6XX_SP_CS_PVT_MEM_PARAM 0x0000a9b6 -#define A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff -#define A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 -static inline uint32_t A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) -{ - assert(!(val & 0x1ff)); - return (((val >> 9)) << A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; -} -#define A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 -#define A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 -static inline uint32_t A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) -{ - return ((val) << A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; -} - -#define REG_A6XX_SP_CS_PVT_MEM_ADDR 0x0000a9b7 - -#define REG_A6XX_SP_CS_PVT_MEM_SIZE 0x0000a9b9 -#define A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff -#define A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 -static inline uint32_t A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; -} -#define A6XX_SP_CS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000 - -#define REG_A6XX_SP_CS_TEX_COUNT 0x0000a9ba - -#define REG_A6XX_SP_CS_CONFIG 0x0000a9bb -#define A6XX_SP_CS_CONFIG_BINDLESS_TEX 0x00000001 -#define A6XX_SP_CS_CONFIG_BINDLESS_SAMP 0x00000002 -#define A6XX_SP_CS_CONFIG_BINDLESS_IBO 0x00000004 -#define A6XX_SP_CS_CONFIG_BINDLESS_UBO 0x00000008 -#define A6XX_SP_CS_CONFIG_ENABLED 0x00000100 -#define A6XX_SP_CS_CONFIG_NTEX__MASK 0x0001fe00 -#define A6XX_SP_CS_CONFIG_NTEX__SHIFT 9 -static inline uint32_t A6XX_SP_CS_CONFIG_NTEX(uint32_t val) -{ - return ((val) << A6XX_SP_CS_CONFIG_NTEX__SHIFT) & A6XX_SP_CS_CONFIG_NTEX__MASK; -} -#define A6XX_SP_CS_CONFIG_NSAMP__MASK 0x003e0000 -#define A6XX_SP_CS_CONFIG_NSAMP__SHIFT 17 -static inline uint32_t A6XX_SP_CS_CONFIG_NSAMP(uint32_t val) -{ - return ((val) << A6XX_SP_CS_CONFIG_NSAMP__SHIFT) & A6XX_SP_CS_CONFIG_NSAMP__MASK; -} -#define A6XX_SP_CS_CONFIG_NIBO__MASK 0x1fc00000 -#define A6XX_SP_CS_CONFIG_NIBO__SHIFT 22 -static inline uint32_t A6XX_SP_CS_CONFIG_NIBO(uint32_t val) -{ - return ((val) << A6XX_SP_CS_CONFIG_NIBO__SHIFT) & A6XX_SP_CS_CONFIG_NIBO__MASK; -} - -#define REG_A6XX_SP_CS_INSTRLEN 0x0000a9bc - -#define REG_A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET 0x0000a9bd -#define A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff -#define A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0 -static inline uint32_t A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val) -{ - assert(!(val & 0x7ff)); - return (((val >> 11)) << A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK; -} - -#define REG_A7XX_SP_CS_UNKNOWN_A9BE 0x0000a9be - -#define REG_A7XX_SP_CS_VGPR_CONFIG 0x0000a9c5 - -#define REG_A6XX_SP_CS_CNTL_0 0x0000a9c2 -#define A6XX_SP_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff -#define A6XX_SP_CS_CNTL_0_WGIDCONSTID__SHIFT 0 -static inline uint32_t A6XX_SP_CS_CNTL_0_WGIDCONSTID(uint32_t val) -{ - return ((val) << A6XX_SP_CS_CNTL_0_WGIDCONSTID__SHIFT) & A6XX_SP_CS_CNTL_0_WGIDCONSTID__MASK; -} -#define A6XX_SP_CS_CNTL_0_WGSIZECONSTID__MASK 0x0000ff00 -#define A6XX_SP_CS_CNTL_0_WGSIZECONSTID__SHIFT 8 -static inline uint32_t A6XX_SP_CS_CNTL_0_WGSIZECONSTID(uint32_t val) -{ - return ((val) << A6XX_SP_CS_CNTL_0_WGSIZECONSTID__SHIFT) & A6XX_SP_CS_CNTL_0_WGSIZECONSTID__MASK; -} -#define A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__MASK 0x00ff0000 -#define A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__SHIFT 16 -static inline uint32_t A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID(uint32_t val) -{ - return ((val) << A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__SHIFT) & A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__MASK; -} -#define A6XX_SP_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000 -#define A6XX_SP_CS_CNTL_0_LOCALIDREGID__SHIFT 24 -static inline uint32_t A6XX_SP_CS_CNTL_0_LOCALIDREGID(uint32_t val) -{ - return ((val) << A6XX_SP_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_SP_CS_CNTL_0_LOCALIDREGID__MASK; -} - -#define REG_A6XX_SP_CS_CNTL_1 0x0000a9c3 -#define A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff -#define A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0 -static inline uint32_t A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val) -{ - return ((val) << A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK; -} -#define A6XX_SP_CS_CNTL_1_SINGLE_SP_CORE 0x00000100 -#define A6XX_SP_CS_CNTL_1_THREADSIZE__MASK 0x00000200 -#define A6XX_SP_CS_CNTL_1_THREADSIZE__SHIFT 9 -static inline uint32_t A6XX_SP_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val) -{ - return ((val) << A6XX_SP_CS_CNTL_1_THREADSIZE__SHIFT) & A6XX_SP_CS_CNTL_1_THREADSIZE__MASK; -} -#define A6XX_SP_CS_CNTL_1_THREADSIZE_SCALAR 0x00000400 - -#define REG_A7XX_SP_CS_CNTL_1 0x0000a9c3 -#define A7XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff -#define A7XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0 -static inline uint32_t A7XX_SP_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val) -{ - return ((val) << A7XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A7XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK; -} -#define A7XX_SP_CS_CNTL_1_THREADSIZE__MASK 0x00000100 -#define A7XX_SP_CS_CNTL_1_THREADSIZE__SHIFT 8 -static inline uint32_t A7XX_SP_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val) -{ - return ((val) << A7XX_SP_CS_CNTL_1_THREADSIZE__SHIFT) & A7XX_SP_CS_CNTL_1_THREADSIZE__MASK; -} -#define A7XX_SP_CS_CNTL_1_THREADSIZE_SCALAR 0x00000200 -#define A7XX_SP_CS_CNTL_1_UNK15 0x00008000 - -#define REG_A6XX_SP_FS_TEX_SAMP 0x0000a9e0 - -#define REG_A6XX_SP_CS_TEX_SAMP 0x0000a9e2 - -#define REG_A6XX_SP_FS_TEX_CONST 0x0000a9e4 - -#define REG_A6XX_SP_CS_TEX_CONST 0x0000a9e6 - -#define REG_A6XX_SP_CS_BINDLESS_BASE(i0) (0x0000a9e8 + 0x2*(i0)) - -static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; } -#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003 -#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0 -static inline uint32_t A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val) -{ - return ((val) << A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK; -} -#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc -#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2 -static inline uint32_t A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK; -} - -#define REG_A7XX_SP_CS_BINDLESS_BASE(i0) (0x0000a9e8 + 0x2*(i0)) - -static inline uint32_t REG_A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; } -#define A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003 -#define A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0 -static inline uint32_t A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val) -{ - return ((val) << A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK; -} -#define A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc -#define A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2 -static inline uint32_t A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A7XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK; -} - -#define REG_A6XX_SP_CS_IBO 0x0000a9f2 - -#define REG_A6XX_SP_CS_IBO_COUNT 0x0000aa00 - -#define REG_A7XX_SP_FS_VGPR_CONFIG 0x0000aa01 - -#define REG_A7XX_SP_PS_ALIASED_COMPONENTS_CONTROL 0x0000aa02 -#define A7XX_SP_PS_ALIASED_COMPONENTS_CONTROL_ENABLED 0x00000001 - -#define REG_A7XX_SP_PS_ALIASED_COMPONENTS 0x0000aa03 -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT0__MASK 0x0000000f -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT0__SHIFT 0 -static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT0(uint32_t val) -{ - return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT0__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT0__MASK; -} -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT1__MASK 0x000000f0 -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT1__SHIFT 4 -static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT1(uint32_t val) -{ - return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT1__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT1__MASK; -} -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT2__MASK 0x00000f00 -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT2__SHIFT 8 -static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT2(uint32_t val) -{ - return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT2__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT2__MASK; -} -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT3__MASK 0x0000f000 -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT3__SHIFT 12 -static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT3(uint32_t val) -{ - return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT3__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT3__MASK; -} -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT4__MASK 0x000f0000 -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT4__SHIFT 16 -static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT4(uint32_t val) -{ - return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT4__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT4__MASK; -} -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT5__MASK 0x00f00000 -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT5__SHIFT 20 -static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT5(uint32_t val) -{ - return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT5__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT5__MASK; -} -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT6__MASK 0x0f000000 -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT6__SHIFT 24 -static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT6(uint32_t val) -{ - return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT6__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT6__MASK; -} -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT7__MASK 0xf0000000 -#define A7XX_SP_PS_ALIASED_COMPONENTS_RT7__SHIFT 28 -static inline uint32_t A7XX_SP_PS_ALIASED_COMPONENTS_RT7(uint32_t val) -{ - return ((val) << A7XX_SP_PS_ALIASED_COMPONENTS_RT7__SHIFT) & A7XX_SP_PS_ALIASED_COMPONENTS_RT7__MASK; -} - -#define REG_A6XX_SP_UNKNOWN_AAF2 0x0000aaf2 - -#define REG_A6XX_SP_MODE_CONTROL 0x0000ab00 -#define A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE 0x00000001 -#define A6XX_SP_MODE_CONTROL_ISAMMODE__MASK 0x00000006 -#define A6XX_SP_MODE_CONTROL_ISAMMODE__SHIFT 1 -static inline uint32_t A6XX_SP_MODE_CONTROL_ISAMMODE(enum a6xx_isam_mode val) -{ - return ((val) << A6XX_SP_MODE_CONTROL_ISAMMODE__SHIFT) & A6XX_SP_MODE_CONTROL_ISAMMODE__MASK; -} -#define A6XX_SP_MODE_CONTROL_SHARED_CONSTS_ENABLE 0x00000008 - -#define REG_A7XX_SP_UNKNOWN_AB01 0x0000ab01 - -#define REG_A7XX_SP_UNKNOWN_AB02 0x0000ab02 - -#define REG_A6XX_SP_FS_CONFIG 0x0000ab04 -#define A6XX_SP_FS_CONFIG_BINDLESS_TEX 0x00000001 -#define A6XX_SP_FS_CONFIG_BINDLESS_SAMP 0x00000002 -#define A6XX_SP_FS_CONFIG_BINDLESS_IBO 0x00000004 -#define A6XX_SP_FS_CONFIG_BINDLESS_UBO 0x00000008 -#define A6XX_SP_FS_CONFIG_ENABLED 0x00000100 -#define A6XX_SP_FS_CONFIG_NTEX__MASK 0x0001fe00 -#define A6XX_SP_FS_CONFIG_NTEX__SHIFT 9 -static inline uint32_t A6XX_SP_FS_CONFIG_NTEX(uint32_t val) -{ - return ((val) << A6XX_SP_FS_CONFIG_NTEX__SHIFT) & A6XX_SP_FS_CONFIG_NTEX__MASK; -} -#define A6XX_SP_FS_CONFIG_NSAMP__MASK 0x003e0000 -#define A6XX_SP_FS_CONFIG_NSAMP__SHIFT 17 -static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val) -{ - return ((val) << A6XX_SP_FS_CONFIG_NSAMP__SHIFT) & A6XX_SP_FS_CONFIG_NSAMP__MASK; -} -#define A6XX_SP_FS_CONFIG_NIBO__MASK 0x1fc00000 -#define A6XX_SP_FS_CONFIG_NIBO__SHIFT 22 -static inline uint32_t A6XX_SP_FS_CONFIG_NIBO(uint32_t val) -{ - return ((val) << A6XX_SP_FS_CONFIG_NIBO__SHIFT) & A6XX_SP_FS_CONFIG_NIBO__MASK; -} - -#define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05 - -#define REG_A6XX_SP_BINDLESS_BASE(i0) (0x0000ab10 + 0x2*(i0)) - -static inline uint32_t REG_A6XX_SP_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000ab10 + 0x2*i0; } -#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003 -#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0 -static inline uint32_t A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val) -{ - return ((val) << A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK; -} -#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc -#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2 -static inline uint32_t A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK; -} - -#define REG_A7XX_SP_BINDLESS_BASE(i0) (0x0000ab0a + 0x2*(i0)) - -static inline uint32_t REG_A7XX_SP_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000ab0a + 0x2*i0; } -#define A7XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003 -#define A7XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0 -static inline uint32_t A7XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val) -{ - return ((val) << A7XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A7XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK; -} -#define A7XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc -#define A7XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2 -static inline uint32_t A7XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A7XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A7XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK; -} - -#define REG_A6XX_SP_IBO 0x0000ab1a - -#define REG_A6XX_SP_IBO_COUNT 0x0000ab20 - -#define REG_A7XX_SP_UNKNOWN_AB22 0x0000ab22 - -#define REG_A6XX_SP_2D_DST_FORMAT 0x0000acc0 -#define A6XX_SP_2D_DST_FORMAT_NORM 0x00000001 -#define A6XX_SP_2D_DST_FORMAT_SINT 0x00000002 -#define A6XX_SP_2D_DST_FORMAT_UINT 0x00000004 -#define A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK 0x000007f8 -#define A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT 3 -static inline uint32_t A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT(enum a6xx_format val) -{ - return ((val) << A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT) & A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK; -} -#define A6XX_SP_2D_DST_FORMAT_SRGB 0x00000800 -#define A6XX_SP_2D_DST_FORMAT_MASK__MASK 0x0000f000 -#define A6XX_SP_2D_DST_FORMAT_MASK__SHIFT 12 -static inline uint32_t A6XX_SP_2D_DST_FORMAT_MASK(uint32_t val) -{ - return ((val) << A6XX_SP_2D_DST_FORMAT_MASK__SHIFT) & A6XX_SP_2D_DST_FORMAT_MASK__MASK; -} - -#define REG_A7XX_SP_2D_DST_FORMAT 0x0000a9bf -#define A7XX_SP_2D_DST_FORMAT_NORM 0x00000001 -#define A7XX_SP_2D_DST_FORMAT_SINT 0x00000002 -#define A7XX_SP_2D_DST_FORMAT_UINT 0x00000004 -#define A7XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK 0x000007f8 -#define A7XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT 3 -static inline uint32_t A7XX_SP_2D_DST_FORMAT_COLOR_FORMAT(enum a6xx_format val) -{ - return ((val) << A7XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT) & A7XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK; -} -#define A7XX_SP_2D_DST_FORMAT_SRGB 0x00000800 -#define A7XX_SP_2D_DST_FORMAT_MASK__MASK 0x0000f000 -#define A7XX_SP_2D_DST_FORMAT_MASK__SHIFT 12 -static inline uint32_t A7XX_SP_2D_DST_FORMAT_MASK(uint32_t val) -{ - return ((val) << A7XX_SP_2D_DST_FORMAT_MASK__SHIFT) & A7XX_SP_2D_DST_FORMAT_MASK__MASK; -} - -#define REG_A6XX_SP_DBG_ECO_CNTL 0x0000ae00 - -#define REG_A6XX_SP_ADDR_MODE_CNTL 0x0000ae01 - -#define REG_A6XX_SP_NC_MODE_CNTL 0x0000ae02 - -#define REG_A6XX_SP_CHICKEN_BITS 0x0000ae03 - -#define REG_A6XX_SP_FLOAT_CNTL 0x0000ae04 -#define A6XX_SP_FLOAT_CNTL_F16_NO_INF 0x00000008 - -#define REG_A7XX_SP_UNKNOWN_AE06 0x0000ae06 - -#define REG_A7XX_SP_UNKNOWN_AE08 0x0000ae08 - -#define REG_A7XX_SP_UNKNOWN_AE09 0x0000ae09 - -#define REG_A7XX_SP_UNKNOWN_AE0A 0x0000ae0a - -#define REG_A6XX_SP_PERFCTR_ENABLE 0x0000ae0f -#define A6XX_SP_PERFCTR_ENABLE_VS 0x00000001 -#define A6XX_SP_PERFCTR_ENABLE_HS 0x00000002 -#define A6XX_SP_PERFCTR_ENABLE_DS 0x00000004 -#define A6XX_SP_PERFCTR_ENABLE_GS 0x00000008 -#define A6XX_SP_PERFCTR_ENABLE_FS 0x00000010 -#define A6XX_SP_PERFCTR_ENABLE_CS 0x00000020 - -#define REG_A6XX_SP_PERFCTR_SP_SEL(i0) (0x0000ae10 + 0x1*(i0)) - -#define REG_A7XX_SP_PERFCTR_HLSQ_SEL(i0) (0x0000ae60 + 0x1*(i0)) - -#define REG_A7XX_SP_UNKNOWN_AE6A 0x0000ae6a - -#define REG_A7XX_SP_UNKNOWN_AE6B 0x0000ae6b - -#define REG_A7XX_SP_UNKNOWN_AE6C 0x0000ae6c - -#define REG_A7XX_SP_READ_SEL 0x0000ae6d -#define A7XX_SP_READ_SEL_LOCATION__MASK 0x000c0000 -#define A7XX_SP_READ_SEL_LOCATION__SHIFT 18 -static inline uint32_t A7XX_SP_READ_SEL_LOCATION(enum a7xx_state_location val) -{ - return ((val) << A7XX_SP_READ_SEL_LOCATION__SHIFT) & A7XX_SP_READ_SEL_LOCATION__MASK; -} -#define A7XX_SP_READ_SEL_PIPE__MASK 0x00030000 -#define A7XX_SP_READ_SEL_PIPE__SHIFT 16 -static inline uint32_t A7XX_SP_READ_SEL_PIPE(enum a7xx_pipe val) -{ - return ((val) << A7XX_SP_READ_SEL_PIPE__SHIFT) & A7XX_SP_READ_SEL_PIPE__MASK; -} -#define A7XX_SP_READ_SEL_STATETYPE__MASK 0x0000ff00 -#define A7XX_SP_READ_SEL_STATETYPE__SHIFT 8 -static inline uint32_t A7XX_SP_READ_SEL_STATETYPE(enum a7xx_statetype_id val) -{ - return ((val) << A7XX_SP_READ_SEL_STATETYPE__SHIFT) & A7XX_SP_READ_SEL_STATETYPE__MASK; -} -#define A7XX_SP_READ_SEL_USPTP__MASK 0x000000f0 -#define A7XX_SP_READ_SEL_USPTP__SHIFT 4 -static inline uint32_t A7XX_SP_READ_SEL_USPTP(uint32_t val) -{ - return ((val) << A7XX_SP_READ_SEL_USPTP__SHIFT) & A7XX_SP_READ_SEL_USPTP__MASK; -} -#define A7XX_SP_READ_SEL_SPTP__MASK 0x0000000f -#define A7XX_SP_READ_SEL_SPTP__SHIFT 0 -static inline uint32_t A7XX_SP_READ_SEL_SPTP(uint32_t val) -{ - return ((val) << A7XX_SP_READ_SEL_SPTP__SHIFT) & A7XX_SP_READ_SEL_SPTP__MASK; -} - -#define REG_A7XX_SP_DBG_CNTL 0x0000ae71 - -#define REG_A7XX_SP_UNKNOWN_AE73 0x0000ae73 - -#define REG_A7XX_SP_PERFCTR_SP_SEL(i0) (0x0000ae80 + 0x1*(i0)) - -#define REG_A6XX_SP_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE 0x0000be22 - -#define REG_A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR 0x0000b180 - -#define REG_A6XX_SP_UNKNOWN_B182 0x0000b182 - -#define REG_A6XX_SP_UNKNOWN_B183 0x0000b183 - -#define REG_A6XX_SP_UNKNOWN_B190 0x0000b190 - -#define REG_A6XX_SP_UNKNOWN_B191 0x0000b191 - -#define REG_A6XX_SP_TP_RAS_MSAA_CNTL 0x0000b300 -#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 -#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 -static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK; -} -#define A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__MASK 0x0000000c -#define A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__SHIFT 2 -static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_UNK2(uint32_t val) -{ - return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__MASK; -} - -#define REG_A6XX_SP_TP_DEST_MSAA_CNTL 0x0000b301 -#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003 -#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0 -static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK; -} -#define A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 - -#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR 0x0000b302 - -#define REG_A6XX_SP_TP_SAMPLE_CONFIG 0x0000b304 -#define A6XX_SP_TP_SAMPLE_CONFIG_UNK0 0x00000001 -#define A6XX_SP_TP_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002 - -#define REG_A6XX_SP_TP_SAMPLE_LOCATION_0 0x0000b305 -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK; -} -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0 -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK; -} -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00 -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK; -} -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000 -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK; -} -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000 -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK; -} -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000 -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK; -} -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000 -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK; -} -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000 -#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK; -} - -#define REG_A6XX_SP_TP_SAMPLE_LOCATION_1 0x0000b306 -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK; -} -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0 -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK; -} -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00 -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK; -} -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000 -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK; -} -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000 -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK; -} -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000 -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK; -} -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000 -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK; -} -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000 -#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28 -static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val) -{ - return ((((int32_t)(val * 16.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK; -} - -#define REG_A6XX_SP_TP_WINDOW_OFFSET 0x0000b307 -#define A6XX_SP_TP_WINDOW_OFFSET_X__MASK 0x00003fff -#define A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT 0 -static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_X(uint32_t val) -{ - return ((val) << A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_X__MASK; -} -#define A6XX_SP_TP_WINDOW_OFFSET_Y__MASK 0x3fff0000 -#define A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT 16 -static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_Y(uint32_t val) -{ - return ((val) << A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_Y__MASK; -} - -#define REG_A6XX_SP_TP_MODE_CNTL 0x0000b309 -#define A6XX_SP_TP_MODE_CNTL_ISAMMODE__MASK 0x00000003 -#define A6XX_SP_TP_MODE_CNTL_ISAMMODE__SHIFT 0 -static inline uint32_t A6XX_SP_TP_MODE_CNTL_ISAMMODE(enum a6xx_isam_mode val) -{ - return ((val) << A6XX_SP_TP_MODE_CNTL_ISAMMODE__SHIFT) & A6XX_SP_TP_MODE_CNTL_ISAMMODE__MASK; -} -#define A6XX_SP_TP_MODE_CNTL_UNK3__MASK 0x000000fc -#define A6XX_SP_TP_MODE_CNTL_UNK3__SHIFT 2 -static inline uint32_t A6XX_SP_TP_MODE_CNTL_UNK3(uint32_t val) -{ - return ((val) << A6XX_SP_TP_MODE_CNTL_UNK3__SHIFT) & A6XX_SP_TP_MODE_CNTL_UNK3__MASK; -} - -#define REG_A7XX_SP_UNKNOWN_B310 0x0000b310 - -#define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0 -#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff -#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0 -static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_format val) -{ - return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK; -} -#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300 -#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT 8 -static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_TILE_MODE(enum a6xx_tile_mode val) -{ - return ((val) << A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK; -} -#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00 -#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10 -static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK; -} -#define A6XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000 -#define A6XX_SP_PS_2D_SRC_INFO_SRGB 0x00002000 -#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK 0x0000c000 -#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT 14 -static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A6XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK; -} -#define A6XX_SP_PS_2D_SRC_INFO_FILTER 0x00010000 -#define A6XX_SP_PS_2D_SRC_INFO_UNK17 0x00020000 -#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES_AVERAGE 0x00040000 -#define A6XX_SP_PS_2D_SRC_INFO_UNK19 0x00080000 -#define A6XX_SP_PS_2D_SRC_INFO_UNK20 0x00100000 -#define A6XX_SP_PS_2D_SRC_INFO_UNK21 0x00200000 -#define A6XX_SP_PS_2D_SRC_INFO_UNK22 0x00400000 -#define A6XX_SP_PS_2D_SRC_INFO_UNK23__MASK 0x07800000 -#define A6XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT 23 -static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_UNK23(uint32_t val) -{ - return ((val) << A6XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_UNK23__MASK; -} -#define A6XX_SP_PS_2D_SRC_INFO_UNK28 0x10000000 - -#define REG_A6XX_SP_PS_2D_SRC_SIZE 0x0000b4c1 -#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK 0x00007fff -#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT 0 -static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_WIDTH(uint32_t val) -{ - return ((val) << A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK; -} -#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK 0x3fff8000 -#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT 15 -static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK; -} - -#define REG_A6XX_SP_PS_2D_SRC 0x0000b4c2 - -#define REG_A6XX_SP_PS_2D_SRC_PITCH 0x0000b4c4 -#define A6XX_SP_PS_2D_SRC_PITCH_UNK0__MASK 0x000001ff -#define A6XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT 0 -static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_UNK0(uint32_t val) -{ - return ((val) << A6XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_UNK0__MASK; -} -#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x00fffe00 -#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9 -static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK; -} - -#define REG_A7XX_SP_PS_2D_SRC_INFO 0x0000b2c0 -#define A7XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff -#define A7XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0 -static inline uint32_t A7XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_format val) -{ - return ((val) << A7XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A7XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK; -} -#define A7XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300 -#define A7XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT 8 -static inline uint32_t A7XX_SP_PS_2D_SRC_INFO_TILE_MODE(enum a6xx_tile_mode val) -{ - return ((val) << A7XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT) & A7XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK; -} -#define A7XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00 -#define A7XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10 -static inline uint32_t A7XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A7XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A7XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK; -} -#define A7XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000 -#define A7XX_SP_PS_2D_SRC_INFO_SRGB 0x00002000 -#define A7XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK 0x0000c000 -#define A7XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT 14 -static inline uint32_t A7XX_SP_PS_2D_SRC_INFO_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A7XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT) & A7XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK; -} -#define A7XX_SP_PS_2D_SRC_INFO_FILTER 0x00010000 -#define A7XX_SP_PS_2D_SRC_INFO_UNK17 0x00020000 -#define A7XX_SP_PS_2D_SRC_INFO_SAMPLES_AVERAGE 0x00040000 -#define A7XX_SP_PS_2D_SRC_INFO_UNK19 0x00080000 -#define A7XX_SP_PS_2D_SRC_INFO_UNK20 0x00100000 -#define A7XX_SP_PS_2D_SRC_INFO_UNK21 0x00200000 -#define A7XX_SP_PS_2D_SRC_INFO_UNK22 0x00400000 -#define A7XX_SP_PS_2D_SRC_INFO_UNK23__MASK 0x07800000 -#define A7XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT 23 -static inline uint32_t A7XX_SP_PS_2D_SRC_INFO_UNK23(uint32_t val) -{ - return ((val) << A7XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT) & A7XX_SP_PS_2D_SRC_INFO_UNK23__MASK; -} -#define A7XX_SP_PS_2D_SRC_INFO_UNK28 0x10000000 - -#define REG_A7XX_SP_PS_2D_SRC_SIZE 0x0000b2c1 -#define A7XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK 0x00007fff -#define A7XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT 0 -static inline uint32_t A7XX_SP_PS_2D_SRC_SIZE_WIDTH(uint32_t val) -{ - return ((val) << A7XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT) & A7XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK; -} -#define A7XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK 0x3fff8000 -#define A7XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT 15 -static inline uint32_t A7XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << A7XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A7XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK; -} - -#define REG_A7XX_SP_PS_2D_SRC 0x0000b2c2 - -#define REG_A7XX_SP_PS_2D_SRC_PITCH 0x0000b2c4 -#define A7XX_SP_PS_2D_SRC_PITCH_UNK0__MASK 0x000001ff -#define A7XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT 0 -static inline uint32_t A7XX_SP_PS_2D_SRC_PITCH_UNK0(uint32_t val) -{ - return ((val) << A7XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT) & A7XX_SP_PS_2D_SRC_PITCH_UNK0__MASK; -} -#define A7XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x00fffe00 -#define A7XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9 -static inline uint32_t A7XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A7XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A7XX_SP_PS_2D_SRC_PITCH_PITCH__MASK; -} - -#define REG_A6XX_SP_PS_2D_SRC_PLANE1 0x0000b4c5 - -#define REG_A6XX_SP_PS_2D_SRC_PLANE_PITCH 0x0000b4c7 -#define A6XX_SP_PS_2D_SRC_PLANE_PITCH__MASK 0x00000fff -#define A6XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT 0 -static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE_PITCH__MASK; -} - -#define REG_A6XX_SP_PS_2D_SRC_PLANE2 0x0000b4c8 - -#define REG_A7XX_SP_PS_2D_SRC_PLANE1 0x0000b2c5 - -#define REG_A7XX_SP_PS_2D_SRC_PLANE_PITCH 0x0000b2c7 -#define A7XX_SP_PS_2D_SRC_PLANE_PITCH__MASK 0x00000fff -#define A7XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT 0 -static inline uint32_t A7XX_SP_PS_2D_SRC_PLANE_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A7XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT) & A7XX_SP_PS_2D_SRC_PLANE_PITCH__MASK; -} - -#define REG_A7XX_SP_PS_2D_SRC_PLANE2 0x0000b2c8 - -#define REG_A6XX_SP_PS_2D_SRC_FLAGS 0x0000b4ca - -#define REG_A6XX_SP_PS_2D_SRC_FLAGS_PITCH 0x0000b4cc -#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK 0x000000ff -#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT 0 -static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK; -} - -#define REG_A7XX_SP_PS_2D_SRC_FLAGS 0x0000b2ca - -#define REG_A7XX_SP_PS_2D_SRC_FLAGS_PITCH 0x0000b2cc -#define A7XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK 0x000000ff -#define A7XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT 0 -static inline uint32_t A7XX_SP_PS_2D_SRC_FLAGS_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A7XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT) & A7XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK; -} - -#define REG_A6XX_SP_PS_UNKNOWN_B4CD 0x0000b4cd - -#define REG_A6XX_SP_PS_UNKNOWN_B4CE 0x0000b4ce - -#define REG_A6XX_SP_PS_UNKNOWN_B4CF 0x0000b4cf - -#define REG_A6XX_SP_PS_UNKNOWN_B4D0 0x0000b4d0 - -#define REG_A6XX_SP_WINDOW_OFFSET 0x0000b4d1 -#define A6XX_SP_WINDOW_OFFSET_X__MASK 0x00003fff -#define A6XX_SP_WINDOW_OFFSET_X__SHIFT 0 -static inline uint32_t A6XX_SP_WINDOW_OFFSET_X(uint32_t val) -{ - return ((val) << A6XX_SP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_WINDOW_OFFSET_X__MASK; -} -#define A6XX_SP_WINDOW_OFFSET_Y__MASK 0x3fff0000 -#define A6XX_SP_WINDOW_OFFSET_Y__SHIFT 16 -static inline uint32_t A6XX_SP_WINDOW_OFFSET_Y(uint32_t val) -{ - return ((val) << A6XX_SP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_WINDOW_OFFSET_Y__MASK; -} - -#define REG_A7XX_SP_PS_UNKNOWN_B4CD 0x0000b2cd - -#define REG_A7XX_SP_PS_UNKNOWN_B4CE 0x0000b2ce - -#define REG_A7XX_SP_PS_UNKNOWN_B4CF 0x0000b2cf - -#define REG_A7XX_SP_PS_UNKNOWN_B4D0 0x0000b2d0 - -#define REG_A7XX_SP_PS_2D_WINDOW_OFFSET 0x0000b2d1 -#define A7XX_SP_PS_2D_WINDOW_OFFSET_X__MASK 0x00003fff -#define A7XX_SP_PS_2D_WINDOW_OFFSET_X__SHIFT 0 -static inline uint32_t A7XX_SP_PS_2D_WINDOW_OFFSET_X(uint32_t val) -{ - return ((val) << A7XX_SP_PS_2D_WINDOW_OFFSET_X__SHIFT) & A7XX_SP_PS_2D_WINDOW_OFFSET_X__MASK; -} -#define A7XX_SP_PS_2D_WINDOW_OFFSET_Y__MASK 0x3fff0000 -#define A7XX_SP_PS_2D_WINDOW_OFFSET_Y__SHIFT 16 -static inline uint32_t A7XX_SP_PS_2D_WINDOW_OFFSET_Y(uint32_t val) -{ - return ((val) << A7XX_SP_PS_2D_WINDOW_OFFSET_Y__SHIFT) & A7XX_SP_PS_2D_WINDOW_OFFSET_Y__MASK; -} - -#define REG_A7XX_SP_PS_UNKNOWN_B2D2 0x0000b2d2 - -#define REG_A7XX_SP_WINDOW_OFFSET 0x0000ab21 -#define A7XX_SP_WINDOW_OFFSET_X__MASK 0x00003fff -#define A7XX_SP_WINDOW_OFFSET_X__SHIFT 0 -static inline uint32_t A7XX_SP_WINDOW_OFFSET_X(uint32_t val) -{ - return ((val) << A7XX_SP_WINDOW_OFFSET_X__SHIFT) & A7XX_SP_WINDOW_OFFSET_X__MASK; -} -#define A7XX_SP_WINDOW_OFFSET_Y__MASK 0x3fff0000 -#define A7XX_SP_WINDOW_OFFSET_Y__SHIFT 16 -static inline uint32_t A7XX_SP_WINDOW_OFFSET_Y(uint32_t val) -{ - return ((val) << A7XX_SP_WINDOW_OFFSET_Y__SHIFT) & A7XX_SP_WINDOW_OFFSET_Y__MASK; -} - -#define REG_A6XX_TPL1_DBG_ECO_CNTL 0x0000b600 - -#define REG_A6XX_TPL1_ADDR_MODE_CNTL 0x0000b601 - -#define REG_A6XX_TPL1_UNKNOWN_B602 0x0000b602 - -#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604 -#define A6XX_TPL1_NC_MODE_CNTL_MODE 0x00000001 -#define A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__MASK 0x00000006 -#define A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__SHIFT 1 -static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT(uint32_t val) -{ - return ((val) << A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__MASK; -} -#define A6XX_TPL1_NC_MODE_CNTL_MIN_ACCESS_LENGTH 0x00000008 -#define A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__MASK 0x00000010 -#define A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__SHIFT 4 -static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT(uint32_t val) -{ - return ((val) << A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__MASK; -} -#define A6XX_TPL1_NC_MODE_CNTL_UNK6__MASK 0x000000c0 -#define A6XX_TPL1_NC_MODE_CNTL_UNK6__SHIFT 6 -static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_UNK6(uint32_t val) -{ - return ((val) << A6XX_TPL1_NC_MODE_CNTL_UNK6__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_UNK6__MASK; -} - -#define REG_A6XX_TPL1_UNKNOWN_B605 0x0000b605 - -#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 0x0000b608 - -#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 0x0000b609 - -#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 0x0000b60a - -#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 0x0000b60b - -#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c - -#define REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 0x0000b608 - -#define REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 0x0000b609 - -#define REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 0x0000b60a - -#define REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 0x0000b60b - -#define REG_A7XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c - -#define REG_A6XX_TPL1_PERFCTR_TP_SEL(i0) (0x0000b610 + 0x1*(i0)) - -#define REG_A6XX_HLSQ_VS_CNTL 0x0000b800 -#define A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK 0x000000ff -#define A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT 0 -static inline uint32_t A6XX_HLSQ_VS_CNTL_CONSTLEN(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK; -} -#define A6XX_HLSQ_VS_CNTL_ENABLED 0x00000100 -#define A6XX_HLSQ_VS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200 - -#define REG_A6XX_HLSQ_HS_CNTL 0x0000b801 -#define A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK 0x000000ff -#define A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT 0 -static inline uint32_t A6XX_HLSQ_HS_CNTL_CONSTLEN(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK; -} -#define A6XX_HLSQ_HS_CNTL_ENABLED 0x00000100 -#define A6XX_HLSQ_HS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200 - -#define REG_A6XX_HLSQ_DS_CNTL 0x0000b802 -#define A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK 0x000000ff -#define A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT 0 -static inline uint32_t A6XX_HLSQ_DS_CNTL_CONSTLEN(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK; -} -#define A6XX_HLSQ_DS_CNTL_ENABLED 0x00000100 -#define A6XX_HLSQ_DS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200 - -#define REG_A6XX_HLSQ_GS_CNTL 0x0000b803 -#define A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK 0x000000ff -#define A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT 0 -static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK; -} -#define A6XX_HLSQ_GS_CNTL_ENABLED 0x00000100 -#define A6XX_HLSQ_GS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200 - -#define REG_A7XX_HLSQ_VS_CNTL 0x0000a827 -#define A7XX_HLSQ_VS_CNTL_CONSTLEN__MASK 0x000000ff -#define A7XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT 0 -static inline uint32_t A7XX_HLSQ_VS_CNTL_CONSTLEN(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A7XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_VS_CNTL_CONSTLEN__MASK; -} -#define A7XX_HLSQ_VS_CNTL_ENABLED 0x00000100 -#define A7XX_HLSQ_VS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200 - -#define REG_A7XX_HLSQ_HS_CNTL 0x0000a83f -#define A7XX_HLSQ_HS_CNTL_CONSTLEN__MASK 0x000000ff -#define A7XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT 0 -static inline uint32_t A7XX_HLSQ_HS_CNTL_CONSTLEN(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A7XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_HS_CNTL_CONSTLEN__MASK; -} -#define A7XX_HLSQ_HS_CNTL_ENABLED 0x00000100 -#define A7XX_HLSQ_HS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200 - -#define REG_A7XX_HLSQ_DS_CNTL 0x0000a867 -#define A7XX_HLSQ_DS_CNTL_CONSTLEN__MASK 0x000000ff -#define A7XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT 0 -static inline uint32_t A7XX_HLSQ_DS_CNTL_CONSTLEN(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A7XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_DS_CNTL_CONSTLEN__MASK; -} -#define A7XX_HLSQ_DS_CNTL_ENABLED 0x00000100 -#define A7XX_HLSQ_DS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200 - -#define REG_A7XX_HLSQ_GS_CNTL 0x0000a898 -#define A7XX_HLSQ_GS_CNTL_CONSTLEN__MASK 0x000000ff -#define A7XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT 0 -static inline uint32_t A7XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A7XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_GS_CNTL_CONSTLEN__MASK; -} -#define A7XX_HLSQ_GS_CNTL_ENABLED 0x00000100 -#define A7XX_HLSQ_GS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200 - -#define REG_A7XX_HLSQ_FS_UNKNOWN_A9AA 0x0000a9aa -#define A7XX_HLSQ_FS_UNKNOWN_A9AA_CONSTS_LOAD_DISABLE 0x00000001 - -#define REG_A7XX_HLSQ_UNKNOWN_A9AC 0x0000a9ac - -#define REG_A7XX_HLSQ_UNKNOWN_A9AD 0x0000a9ad - -#define REG_A7XX_HLSQ_UNKNOWN_A9AE 0x0000a9ae -#define A7XX_HLSQ_UNKNOWN_A9AE_SYSVAL_REGS_COUNT__MASK 0x000000ff -#define A7XX_HLSQ_UNKNOWN_A9AE_SYSVAL_REGS_COUNT__SHIFT 0 -static inline uint32_t A7XX_HLSQ_UNKNOWN_A9AE_SYSVAL_REGS_COUNT(uint32_t val) -{ - return ((val) << A7XX_HLSQ_UNKNOWN_A9AE_SYSVAL_REGS_COUNT__SHIFT) & A7XX_HLSQ_UNKNOWN_A9AE_SYSVAL_REGS_COUNT__MASK; -} -#define A7XX_HLSQ_UNKNOWN_A9AE_UNK8 0x00000100 -#define A7XX_HLSQ_UNKNOWN_A9AE_UNK9 0x00000200 - -#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_CMD 0x0000b820 - -#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR 0x0000b821 - -#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_DATA 0x0000b823 - -#define REG_A6XX_HLSQ_FS_CNTL_0 0x0000b980 -#define A6XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK 0x00000001 -#define A6XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT 0 -static inline uint32_t A6XX_HLSQ_FS_CNTL_0_THREADSIZE(enum a6xx_threadsize val) -{ - return ((val) << A6XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT) & A6XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK; -} -#define A6XX_HLSQ_FS_CNTL_0_VARYINGS 0x00000002 -#define A6XX_HLSQ_FS_CNTL_0_UNK2__MASK 0x00000ffc -#define A6XX_HLSQ_FS_CNTL_0_UNK2__SHIFT 2 -static inline uint32_t A6XX_HLSQ_FS_CNTL_0_UNK2(uint32_t val) -{ - return ((val) << A6XX_HLSQ_FS_CNTL_0_UNK2__SHIFT) & A6XX_HLSQ_FS_CNTL_0_UNK2__MASK; -} - -#define REG_A6XX_HLSQ_UNKNOWN_B981 0x0000b981 - -#define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982 -#define A6XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x00000007 -#define A6XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT) & A6XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK; -} - -#define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983 -#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff -#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK; -} -#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00 -#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8 -static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK; -} -#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000 -#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16 -static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK; -} -#define A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK 0xff000000 -#define A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT 24 -static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_CENTERRHW(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK; -} - -#define REG_A6XX_HLSQ_CONTROL_3_REG 0x0000b984 -#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff -#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK; -} -#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00 -#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8 -static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK; -} -#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000 -#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16 -static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK; -} -#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000 -#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24 -static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK; -} - -#define REG_A6XX_HLSQ_CONTROL_4_REG 0x0000b985 -#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff -#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK; -} -#define A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00 -#define A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8 -static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK; -} -#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000 -#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16 -static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK; -} -#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000 -#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24 -static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK; -} - -#define REG_A6XX_HLSQ_CONTROL_5_REG 0x0000b986 -#define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK 0x000000ff -#define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK; -} -#define A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK 0x0000ff00 -#define A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT 8 -static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK; -} - -#define REG_A6XX_HLSQ_CS_CNTL 0x0000b987 -#define A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK 0x000000ff -#define A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CS_CNTL_CONSTLEN(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK; -} -#define A6XX_HLSQ_CS_CNTL_ENABLED 0x00000100 -#define A6XX_HLSQ_CS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200 - -#define REG_A7XX_HLSQ_FS_CNTL_0 0x0000a9c6 -#define A7XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK 0x00000001 -#define A7XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT 0 -static inline uint32_t A7XX_HLSQ_FS_CNTL_0_THREADSIZE(enum a6xx_threadsize val) -{ - return ((val) << A7XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT) & A7XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK; -} -#define A7XX_HLSQ_FS_CNTL_0_VARYINGS 0x00000002 -#define A7XX_HLSQ_FS_CNTL_0_UNK2__MASK 0x00000ffc -#define A7XX_HLSQ_FS_CNTL_0_UNK2__SHIFT 2 -static inline uint32_t A7XX_HLSQ_FS_CNTL_0_UNK2(uint32_t val) -{ - return ((val) << A7XX_HLSQ_FS_CNTL_0_UNK2__SHIFT) & A7XX_HLSQ_FS_CNTL_0_UNK2__MASK; -} - -#define REG_A7XX_HLSQ_CONTROL_1_REG 0x0000a9c7 -#define A7XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x00000007 -#define A7XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT 0 -static inline uint32_t A7XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT) & A7XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK; -} - -#define REG_A7XX_HLSQ_CONTROL_2_REG 0x0000a9c8 -#define A7XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff -#define A7XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0 -static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK; -} -#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00 -#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8 -static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK; -} -#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000 -#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16 -static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK; -} -#define A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK 0xff000000 -#define A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT 24 -static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_CENTERRHW(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK; -} - -#define REG_A7XX_HLSQ_CONTROL_3_REG 0x0000a9c9 -#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff -#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0 -static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK; -} -#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00 -#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8 -static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK; -} -#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000 -#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16 -static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK; -} -#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000 -#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24 -static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK; -} - -#define REG_A7XX_HLSQ_CONTROL_4_REG 0x0000a9ca -#define A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff -#define A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0 -static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK; -} -#define A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00 -#define A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8 -static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK; -} -#define A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000 -#define A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16 -static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK; -} -#define A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000 -#define A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24 -static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK; -} - -#define REG_A7XX_HLSQ_CONTROL_5_REG 0x0000a9cb -#define A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK 0x000000ff -#define A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT 0 -static inline uint32_t A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT) & A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK; -} -#define A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK 0x0000ff00 -#define A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT 8 -static inline uint32_t A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT) & A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK; -} - -#define REG_A7XX_HLSQ_CS_CNTL 0x0000a9cd -#define A7XX_HLSQ_CS_CNTL_CONSTLEN__MASK 0x000000ff -#define A7XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT 0 -static inline uint32_t A7XX_HLSQ_CS_CNTL_CONSTLEN(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A7XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_CS_CNTL_CONSTLEN__MASK; -} -#define A7XX_HLSQ_CS_CNTL_ENABLED 0x00000100 -#define A7XX_HLSQ_CS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200 - -#define REG_A6XX_HLSQ_CS_NDRANGE_0 0x0000b990 -#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003 -#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK; -} -#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc -#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2 -static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK; -} -#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000 -#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12 -static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK; -} -#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000 -#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22 -static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK; -} - -#define REG_A6XX_HLSQ_CS_NDRANGE_1 0x0000b991 -#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff -#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK; -} - -#define REG_A6XX_HLSQ_CS_NDRANGE_2 0x0000b992 -#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff -#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK; -} - -#define REG_A6XX_HLSQ_CS_NDRANGE_3 0x0000b993 -#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff -#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK; -} - -#define REG_A6XX_HLSQ_CS_NDRANGE_4 0x0000b994 -#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff -#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK; -} - -#define REG_A6XX_HLSQ_CS_NDRANGE_5 0x0000b995 -#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff -#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK; -} - -#define REG_A6XX_HLSQ_CS_NDRANGE_6 0x0000b996 -#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff -#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK; -} - -#define REG_A6XX_HLSQ_CS_CNTL_0 0x0000b997 -#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff -#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK; -} -#define A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__MASK 0x0000ff00 -#define A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__SHIFT 8 -static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__MASK; -} -#define A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__MASK 0x00ff0000 -#define A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__SHIFT 16 -static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__MASK; -} -#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000 -#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24 -static inline uint32_t A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK; -} - -#define REG_A6XX_HLSQ_CS_CNTL_1 0x0000b998 -#define A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff -#define A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK; -} -#define A6XX_HLSQ_CS_CNTL_1_SINGLE_SP_CORE 0x00000100 -#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK 0x00000200 -#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT 9 -static inline uint32_t A6XX_HLSQ_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val) -{ - return ((val) << A6XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT) & A6XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK; -} -#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE_SCALAR 0x00000400 - -#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_X 0x0000b999 - -#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000b99a - -#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000b99b - -#define REG_A7XX_HLSQ_CS_NDRANGE_0 0x0000a9d4 -#define A7XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003 -#define A7XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0 -static inline uint32_t A7XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A7XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK; -} -#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc -#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2 -static inline uint32_t A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK; -} -#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000 -#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12 -static inline uint32_t A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK; -} -#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000 -#define A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22 -static inline uint32_t A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A7XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK; -} - -#define REG_A7XX_HLSQ_CS_NDRANGE_1 0x0000a9d5 -#define A7XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff -#define A7XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0 -static inline uint32_t A7XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A7XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK; -} - -#define REG_A7XX_HLSQ_CS_NDRANGE_2 0x0000a9d6 -#define A7XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff -#define A7XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0 -static inline uint32_t A7XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A7XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK; -} - -#define REG_A7XX_HLSQ_CS_NDRANGE_3 0x0000a9d7 -#define A7XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff -#define A7XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0 -static inline uint32_t A7XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A7XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK; -} - -#define REG_A7XX_HLSQ_CS_NDRANGE_4 0x0000a9d8 -#define A7XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff -#define A7XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0 -static inline uint32_t A7XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A7XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK; -} - -#define REG_A7XX_HLSQ_CS_NDRANGE_5 0x0000a9d9 -#define A7XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff -#define A7XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0 -static inline uint32_t A7XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A7XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK; -} - -#define REG_A7XX_HLSQ_CS_NDRANGE_6 0x0000a9da -#define A7XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff -#define A7XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0 -static inline uint32_t A7XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A7XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK; -} - -#define REG_A7XX_HLSQ_CS_KERNEL_GROUP_X 0x0000a9dc - -#define REG_A7XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000a9dd - -#define REG_A7XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000a9de - -#define REG_A7XX_HLSQ_CS_CNTL_1 0x0000a9db -#define A7XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff -#define A7XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0 -static inline uint32_t A7XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A7XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK; -} -#define A7XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK 0x00000200 -#define A7XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT 9 -static inline uint32_t A7XX_HLSQ_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val) -{ - return ((val) << A7XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT) & A7XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK; -} -#define A7XX_HLSQ_CS_CNTL_1_UNK11 0x00000800 -#define A7XX_HLSQ_CS_CNTL_1_UNK22 0x00400000 -#define A7XX_HLSQ_CS_CNTL_1_UNK26 0x04000000 -#define A7XX_HLSQ_CS_CNTL_1_YALIGN__MASK 0x78000000 -#define A7XX_HLSQ_CS_CNTL_1_YALIGN__SHIFT 27 -static inline uint32_t A7XX_HLSQ_CS_CNTL_1_YALIGN(enum a7xx_cs_yalign val) -{ - return ((val) << A7XX_HLSQ_CS_CNTL_1_YALIGN__SHIFT) & A7XX_HLSQ_CS_CNTL_1_YALIGN__MASK; -} - -#define REG_A7XX_HLSQ_CS_LOCAL_SIZE 0x0000a9df -#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEX__MASK 0x00000ffc -#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEX__SHIFT 2 -static inline uint32_t A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEX(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEX__SHIFT) & A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEX__MASK; -} -#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEY__MASK 0x003ff000 -#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEY__SHIFT 12 -static inline uint32_t A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEY(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEY__SHIFT) & A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEY__MASK; -} -#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEZ__MASK 0xffc00000 -#define A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEZ__SHIFT 22 -static inline uint32_t A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEZ(uint32_t val) -{ - return ((val) << A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEZ__SHIFT) & A7XX_HLSQ_CS_LOCAL_SIZE_LOCALSIZEZ__MASK; -} - -#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_CMD 0x0000b9a0 - -#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR 0x0000b9a1 - -#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_DATA 0x0000b9a3 - -#define REG_A6XX_HLSQ_CS_BINDLESS_BASE(i0) (0x0000b9c0 + 0x2*(i0)) - -static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; } -#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003 -#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val) -{ - return ((val) << A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK; -} -#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc -#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2 -static inline uint32_t A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK; -} - -#define REG_A6XX_HLSQ_CS_UNKNOWN_B9D0 0x0000b9d0 -#define A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__MASK 0x0000001f -#define A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__SHIFT 0 -static inline uint32_t A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE(uint32_t val) -{ - return ((val) << A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__SHIFT) & A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__MASK; -} -#define A6XX_HLSQ_CS_UNKNOWN_B9D0_UNK5 0x00000020 -#define A6XX_HLSQ_CS_UNKNOWN_B9D0_UNK6 0x00000040 - -#define REG_A6XX_HLSQ_DRAW_CMD 0x0000bb00 -#define A6XX_HLSQ_DRAW_CMD_STATE_ID__MASK 0x000000ff -#define A6XX_HLSQ_DRAW_CMD_STATE_ID__SHIFT 0 -static inline uint32_t A6XX_HLSQ_DRAW_CMD_STATE_ID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_DRAW_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_DRAW_CMD_STATE_ID__MASK; -} - -#define REG_A6XX_HLSQ_DISPATCH_CMD 0x0000bb01 -#define A6XX_HLSQ_DISPATCH_CMD_STATE_ID__MASK 0x000000ff -#define A6XX_HLSQ_DISPATCH_CMD_STATE_ID__SHIFT 0 -static inline uint32_t A6XX_HLSQ_DISPATCH_CMD_STATE_ID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_DISPATCH_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_DISPATCH_CMD_STATE_ID__MASK; -} - -#define REG_A6XX_HLSQ_EVENT_CMD 0x0000bb02 -#define A6XX_HLSQ_EVENT_CMD_STATE_ID__MASK 0x00ff0000 -#define A6XX_HLSQ_EVENT_CMD_STATE_ID__SHIFT 16 -static inline uint32_t A6XX_HLSQ_EVENT_CMD_STATE_ID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_EVENT_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_EVENT_CMD_STATE_ID__MASK; -} -#define A6XX_HLSQ_EVENT_CMD_EVENT__MASK 0x0000007f -#define A6XX_HLSQ_EVENT_CMD_EVENT__SHIFT 0 -static inline uint32_t A6XX_HLSQ_EVENT_CMD_EVENT(enum vgt_event_type val) -{ - return ((val) << A6XX_HLSQ_EVENT_CMD_EVENT__SHIFT) & A6XX_HLSQ_EVENT_CMD_EVENT__MASK; -} - -#define REG_A6XX_HLSQ_INVALIDATE_CMD 0x0000bb08 -#define A6XX_HLSQ_INVALIDATE_CMD_VS_STATE 0x00000001 -#define A6XX_HLSQ_INVALIDATE_CMD_HS_STATE 0x00000002 -#define A6XX_HLSQ_INVALIDATE_CMD_DS_STATE 0x00000004 -#define A6XX_HLSQ_INVALIDATE_CMD_GS_STATE 0x00000008 -#define A6XX_HLSQ_INVALIDATE_CMD_FS_STATE 0x00000010 -#define A6XX_HLSQ_INVALIDATE_CMD_CS_STATE 0x00000020 -#define A6XX_HLSQ_INVALIDATE_CMD_CS_IBO 0x00000040 -#define A6XX_HLSQ_INVALIDATE_CMD_GFX_IBO 0x00000080 -#define A6XX_HLSQ_INVALIDATE_CMD_CS_SHARED_CONST 0x00080000 -#define A6XX_HLSQ_INVALIDATE_CMD_GFX_SHARED_CONST 0x00000100 -#define A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK 0x00003e00 -#define A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT 9 -static inline uint32_t A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(uint32_t val) -{ - return ((val) << A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT) & A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK; -} -#define A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK 0x0007c000 -#define A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT 14 -static inline uint32_t A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(uint32_t val) -{ - return ((val) << A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT) & A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK; -} - -#define REG_A7XX_HLSQ_INVALIDATE_CMD 0x0000ab1f -#define A7XX_HLSQ_INVALIDATE_CMD_VS_STATE 0x00000001 -#define A7XX_HLSQ_INVALIDATE_CMD_HS_STATE 0x00000002 -#define A7XX_HLSQ_INVALIDATE_CMD_DS_STATE 0x00000004 -#define A7XX_HLSQ_INVALIDATE_CMD_GS_STATE 0x00000008 -#define A7XX_HLSQ_INVALIDATE_CMD_FS_STATE 0x00000010 -#define A7XX_HLSQ_INVALIDATE_CMD_CS_STATE 0x00000020 -#define A7XX_HLSQ_INVALIDATE_CMD_CS_IBO 0x00000040 -#define A7XX_HLSQ_INVALIDATE_CMD_GFX_IBO 0x00000080 -#define A7XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK 0x0001fe00 -#define A7XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT 9 -static inline uint32_t A7XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(uint32_t val) -{ - return ((val) << A7XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT) & A7XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK; -} -#define A7XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK 0x01fe0000 -#define A7XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT 17 -static inline uint32_t A7XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(uint32_t val) -{ - return ((val) << A7XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT) & A7XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK; -} - -#define REG_A6XX_HLSQ_FS_CNTL 0x0000bb10 -#define A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK 0x000000ff -#define A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT 0 -static inline uint32_t A6XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK; -} -#define A6XX_HLSQ_FS_CNTL_ENABLED 0x00000100 -#define A6XX_HLSQ_FS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200 - -#define REG_A7XX_HLSQ_FS_CNTL 0x0000ab03 -#define A7XX_HLSQ_FS_CNTL_CONSTLEN__MASK 0x000000ff -#define A7XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT 0 -static inline uint32_t A7XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A7XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A7XX_HLSQ_FS_CNTL_CONSTLEN__MASK; -} -#define A7XX_HLSQ_FS_CNTL_ENABLED 0x00000100 -#define A7XX_HLSQ_FS_CNTL_READ_IMM_SHARED_CONSTS 0x00000200 - -#define REG_A7XX_HLSQ_SHARED_CONSTS_IMM(i0) (0x0000ab40 + 0x1*(i0)) - -#define REG_A6XX_HLSQ_SHARED_CONSTS 0x0000bb11 -#define A6XX_HLSQ_SHARED_CONSTS_ENABLE 0x00000001 - -#define REG_A6XX_HLSQ_BINDLESS_BASE(i0) (0x0000bb20 + 0x2*(i0)) - -static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000bb20 + 0x2*i0; } -#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003 -#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0 -static inline uint32_t A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val) -{ - return ((val) << A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK; -} -#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffffffffffc -#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2 -static inline uint32_t A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR(uint64_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK; -} - -#define REG_A6XX_HLSQ_2D_EVENT_CMD 0x0000bd80 -#define A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK 0x0000ff00 -#define A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__SHIFT 8 -static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_STATE_ID(uint32_t val) -{ - return ((val) << A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK; -} -#define A6XX_HLSQ_2D_EVENT_CMD_EVENT__MASK 0x0000007f -#define A6XX_HLSQ_2D_EVENT_CMD_EVENT__SHIFT 0 -static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_EVENT(enum vgt_event_type val) -{ - return ((val) << A6XX_HLSQ_2D_EVENT_CMD_EVENT__SHIFT) & A6XX_HLSQ_2D_EVENT_CMD_EVENT__MASK; -} - -#define REG_A6XX_HLSQ_UNKNOWN_BE00 0x0000be00 - -#define REG_A6XX_HLSQ_UNKNOWN_BE01 0x0000be01 - -#define REG_A6XX_HLSQ_DBG_ECO_CNTL 0x0000be04 - -#define REG_A6XX_HLSQ_ADDR_MODE_CNTL 0x0000be05 - -#define REG_A6XX_HLSQ_UNKNOWN_BE08 0x0000be08 - -#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL(i0) (0x0000be10 + 0x1*(i0)) - -#define REG_A6XX_HLSQ_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE 0x0000be22 - -#define REG_A7XX_SP_AHB_READ_APERTURE 0x0000c000 - -#define REG_A7XX_SP_UNKNOWN_0CE2 0x00000ce2 - -#define REG_A7XX_SP_UNKNOWN_0CE4 0x00000ce4 - -#define REG_A7XX_SP_UNKNOWN_0CE6 0x00000ce6 - -#define REG_A6XX_CP_EVENT_START 0x0000d600 -#define A6XX_CP_EVENT_START_STATE_ID__MASK 0x000000ff -#define A6XX_CP_EVENT_START_STATE_ID__SHIFT 0 -static inline uint32_t A6XX_CP_EVENT_START_STATE_ID(uint32_t val) -{ - return ((val) << A6XX_CP_EVENT_START_STATE_ID__SHIFT) & A6XX_CP_EVENT_START_STATE_ID__MASK; -} - -#define REG_A6XX_CP_EVENT_END 0x0000d601 -#define A6XX_CP_EVENT_END_STATE_ID__MASK 0x000000ff -#define A6XX_CP_EVENT_END_STATE_ID__SHIFT 0 -static inline uint32_t A6XX_CP_EVENT_END_STATE_ID(uint32_t val) -{ - return ((val) << A6XX_CP_EVENT_END_STATE_ID__SHIFT) & A6XX_CP_EVENT_END_STATE_ID__MASK; -} - -#define REG_A6XX_CP_2D_EVENT_START 0x0000d700 -#define A6XX_CP_2D_EVENT_START_STATE_ID__MASK 0x000000ff -#define A6XX_CP_2D_EVENT_START_STATE_ID__SHIFT 0 -static inline uint32_t A6XX_CP_2D_EVENT_START_STATE_ID(uint32_t val) -{ - return ((val) << A6XX_CP_2D_EVENT_START_STATE_ID__SHIFT) & A6XX_CP_2D_EVENT_START_STATE_ID__MASK; -} - -#define REG_A6XX_CP_2D_EVENT_END 0x0000d701 -#define A6XX_CP_2D_EVENT_END_STATE_ID__MASK 0x000000ff -#define A6XX_CP_2D_EVENT_END_STATE_ID__SHIFT 0 -static inline uint32_t A6XX_CP_2D_EVENT_END_STATE_ID(uint32_t val) -{ - return ((val) << A6XX_CP_2D_EVENT_END_STATE_ID__SHIFT) & A6XX_CP_2D_EVENT_END_STATE_ID__MASK; -} - -#define REG_A6XX_TEX_SAMP_0 0x00000000 -#define A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001 -#define A6XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006 -#define A6XX_TEX_SAMP_0_XY_MAG__SHIFT 1 -static inline uint32_t A6XX_TEX_SAMP_0_XY_MAG(enum a6xx_tex_filter val) -{ - return ((val) << A6XX_TEX_SAMP_0_XY_MAG__SHIFT) & A6XX_TEX_SAMP_0_XY_MAG__MASK; -} -#define A6XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018 -#define A6XX_TEX_SAMP_0_XY_MIN__SHIFT 3 -static inline uint32_t A6XX_TEX_SAMP_0_XY_MIN(enum a6xx_tex_filter val) -{ - return ((val) << A6XX_TEX_SAMP_0_XY_MIN__SHIFT) & A6XX_TEX_SAMP_0_XY_MIN__MASK; -} -#define A6XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0 -#define A6XX_TEX_SAMP_0_WRAP_S__SHIFT 5 -static inline uint32_t A6XX_TEX_SAMP_0_WRAP_S(enum a6xx_tex_clamp val) -{ - return ((val) << A6XX_TEX_SAMP_0_WRAP_S__SHIFT) & A6XX_TEX_SAMP_0_WRAP_S__MASK; -} -#define A6XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700 -#define A6XX_TEX_SAMP_0_WRAP_T__SHIFT 8 -static inline uint32_t A6XX_TEX_SAMP_0_WRAP_T(enum a6xx_tex_clamp val) -{ - return ((val) << A6XX_TEX_SAMP_0_WRAP_T__SHIFT) & A6XX_TEX_SAMP_0_WRAP_T__MASK; -} -#define A6XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800 -#define A6XX_TEX_SAMP_0_WRAP_R__SHIFT 11 -static inline uint32_t A6XX_TEX_SAMP_0_WRAP_R(enum a6xx_tex_clamp val) -{ - return ((val) << A6XX_TEX_SAMP_0_WRAP_R__SHIFT) & A6XX_TEX_SAMP_0_WRAP_R__MASK; -} -#define A6XX_TEX_SAMP_0_ANISO__MASK 0x0001c000 -#define A6XX_TEX_SAMP_0_ANISO__SHIFT 14 -static inline uint32_t A6XX_TEX_SAMP_0_ANISO(enum a6xx_tex_aniso val) -{ - return ((val) << A6XX_TEX_SAMP_0_ANISO__SHIFT) & A6XX_TEX_SAMP_0_ANISO__MASK; -} -#define A6XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000 -#define A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19 -static inline uint32_t A6XX_TEX_SAMP_0_LOD_BIAS(float val) -{ - return ((((int32_t)(val * 256.0))) << A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A6XX_TEX_SAMP_0_LOD_BIAS__MASK; -} - -#define REG_A6XX_TEX_SAMP_1 0x00000001 -#define A6XX_TEX_SAMP_1_CLAMPENABLE 0x00000001 -#define A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e -#define A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1 -static inline uint32_t A6XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val) -{ - return ((val) << A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK; -} -#define A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010 -#define A6XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020 -#define A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040 -#define A6XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00 -#define A6XX_TEX_SAMP_1_MAX_LOD__SHIFT 8 -static inline uint32_t A6XX_TEX_SAMP_1_MAX_LOD(float val) -{ - return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A6XX_TEX_SAMP_1_MAX_LOD__MASK; -} -#define A6XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000 -#define A6XX_TEX_SAMP_1_MIN_LOD__SHIFT 20 -static inline uint32_t A6XX_TEX_SAMP_1_MIN_LOD(float val) -{ - return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A6XX_TEX_SAMP_1_MIN_LOD__MASK; -} - -#define REG_A6XX_TEX_SAMP_2 0x00000002 -#define A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK 0x00000003 -#define A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT 0 -static inline uint32_t A6XX_TEX_SAMP_2_REDUCTION_MODE(enum a6xx_reduction_mode val) -{ - return ((val) << A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT) & A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK; -} -#define A6XX_TEX_SAMP_2_CHROMA_LINEAR 0x00000020 -#define A6XX_TEX_SAMP_2_BCOLOR__MASK 0xffffff80 -#define A6XX_TEX_SAMP_2_BCOLOR__SHIFT 7 -static inline uint32_t A6XX_TEX_SAMP_2_BCOLOR(uint32_t val) -{ - return ((val) << A6XX_TEX_SAMP_2_BCOLOR__SHIFT) & A6XX_TEX_SAMP_2_BCOLOR__MASK; -} - -#define REG_A6XX_TEX_SAMP_3 0x00000003 - -#define REG_A6XX_TEX_CONST_0 0x00000000 -#define A6XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003 -#define A6XX_TEX_CONST_0_TILE_MODE__SHIFT 0 -static inline uint32_t A6XX_TEX_CONST_0_TILE_MODE(enum a6xx_tile_mode val) -{ - return ((val) << A6XX_TEX_CONST_0_TILE_MODE__SHIFT) & A6XX_TEX_CONST_0_TILE_MODE__MASK; -} -#define A6XX_TEX_CONST_0_SRGB 0x00000004 -#define A6XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070 -#define A6XX_TEX_CONST_0_SWIZ_X__SHIFT 4 -static inline uint32_t A6XX_TEX_CONST_0_SWIZ_X(enum a6xx_tex_swiz val) -{ - return ((val) << A6XX_TEX_CONST_0_SWIZ_X__SHIFT) & A6XX_TEX_CONST_0_SWIZ_X__MASK; -} -#define A6XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380 -#define A6XX_TEX_CONST_0_SWIZ_Y__SHIFT 7 -static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Y(enum a6xx_tex_swiz val) -{ - return ((val) << A6XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Y__MASK; -} -#define A6XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00 -#define A6XX_TEX_CONST_0_SWIZ_Z__SHIFT 10 -static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Z(enum a6xx_tex_swiz val) -{ - return ((val) << A6XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Z__MASK; -} -#define A6XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000 -#define A6XX_TEX_CONST_0_SWIZ_W__SHIFT 13 -static inline uint32_t A6XX_TEX_CONST_0_SWIZ_W(enum a6xx_tex_swiz val) -{ - return ((val) << A6XX_TEX_CONST_0_SWIZ_W__SHIFT) & A6XX_TEX_CONST_0_SWIZ_W__MASK; -} -#define A6XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000 -#define A6XX_TEX_CONST_0_MIPLVLS__SHIFT 16 -static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val) -{ - return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK; -} -#define A6XX_TEX_CONST_0_CHROMA_MIDPOINT_X 0x00010000 -#define A6XX_TEX_CONST_0_CHROMA_MIDPOINT_Y 0x00040000 -#define A6XX_TEX_CONST_0_SAMPLES__MASK 0x00300000 -#define A6XX_TEX_CONST_0_SAMPLES__SHIFT 20 -static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val) -{ - return ((val) << A6XX_TEX_CONST_0_SAMPLES__SHIFT) & A6XX_TEX_CONST_0_SAMPLES__MASK; -} -#define A6XX_TEX_CONST_0_FMT__MASK 0x3fc00000 -#define A6XX_TEX_CONST_0_FMT__SHIFT 22 -static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_format val) -{ - return ((val) << A6XX_TEX_CONST_0_FMT__SHIFT) & A6XX_TEX_CONST_0_FMT__MASK; -} -#define A6XX_TEX_CONST_0_SWAP__MASK 0xc0000000 -#define A6XX_TEX_CONST_0_SWAP__SHIFT 30 -static inline uint32_t A6XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val) -{ - return ((val) << A6XX_TEX_CONST_0_SWAP__SHIFT) & A6XX_TEX_CONST_0_SWAP__MASK; -} - -#define REG_A6XX_TEX_CONST_1 0x00000001 -#define A6XX_TEX_CONST_1_WIDTH__MASK 0x00007fff -#define A6XX_TEX_CONST_1_WIDTH__SHIFT 0 -static inline uint32_t A6XX_TEX_CONST_1_WIDTH(uint32_t val) -{ - return ((val) << A6XX_TEX_CONST_1_WIDTH__SHIFT) & A6XX_TEX_CONST_1_WIDTH__MASK; -} -#define A6XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000 -#define A6XX_TEX_CONST_1_HEIGHT__SHIFT 15 -static inline uint32_t A6XX_TEX_CONST_1_HEIGHT(uint32_t val) -{ - return ((val) << A6XX_TEX_CONST_1_HEIGHT__SHIFT) & A6XX_TEX_CONST_1_HEIGHT__MASK; -} - -#define REG_A6XX_TEX_CONST_2 0x00000002 -#define A6XX_TEX_CONST_2_STRUCTSIZETEXELS__MASK 0x0000fff0 -#define A6XX_TEX_CONST_2_STRUCTSIZETEXELS__SHIFT 4 -static inline uint32_t A6XX_TEX_CONST_2_STRUCTSIZETEXELS(uint32_t val) -{ - return ((val) << A6XX_TEX_CONST_2_STRUCTSIZETEXELS__SHIFT) & A6XX_TEX_CONST_2_STRUCTSIZETEXELS__MASK; -} -#define A6XX_TEX_CONST_2_STARTOFFSETTEXELS__MASK 0x003f0000 -#define A6XX_TEX_CONST_2_STARTOFFSETTEXELS__SHIFT 16 -static inline uint32_t A6XX_TEX_CONST_2_STARTOFFSETTEXELS(uint32_t val) -{ - return ((val) << A6XX_TEX_CONST_2_STARTOFFSETTEXELS__SHIFT) & A6XX_TEX_CONST_2_STARTOFFSETTEXELS__MASK; -} -#define A6XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f -#define A6XX_TEX_CONST_2_PITCHALIGN__SHIFT 0 -static inline uint32_t A6XX_TEX_CONST_2_PITCHALIGN(uint32_t val) -{ - return ((val) << A6XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A6XX_TEX_CONST_2_PITCHALIGN__MASK; -} -#define A6XX_TEX_CONST_2_PITCH__MASK 0x1fffff80 -#define A6XX_TEX_CONST_2_PITCH__SHIFT 7 -static inline uint32_t A6XX_TEX_CONST_2_PITCH(uint32_t val) -{ - return ((val) << A6XX_TEX_CONST_2_PITCH__SHIFT) & A6XX_TEX_CONST_2_PITCH__MASK; -} -#define A6XX_TEX_CONST_2_TYPE__MASK 0xe0000000 -#define A6XX_TEX_CONST_2_TYPE__SHIFT 29 -static inline uint32_t A6XX_TEX_CONST_2_TYPE(enum a6xx_tex_type val) -{ - return ((val) << A6XX_TEX_CONST_2_TYPE__SHIFT) & A6XX_TEX_CONST_2_TYPE__MASK; -} - -#define REG_A6XX_TEX_CONST_3 0x00000003 -#define A6XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x007fffff -#define A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0 -static inline uint32_t A6XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_3_ARRAY_PITCH__MASK; -} -#define A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK 0x07800000 -#define A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT 23 -static inline uint32_t A6XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val) -{ - assert(!(val & 0xfff)); - return (((val >> 12)) << A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK; -} -#define A6XX_TEX_CONST_3_TILE_ALL 0x08000000 -#define A6XX_TEX_CONST_3_FLAG 0x10000000 - -#define REG_A6XX_TEX_CONST_4 0x00000004 -#define A6XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0 -#define A6XX_TEX_CONST_4_BASE_LO__SHIFT 5 -static inline uint32_t A6XX_TEX_CONST_4_BASE_LO(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A6XX_TEX_CONST_4_BASE_LO__SHIFT) & A6XX_TEX_CONST_4_BASE_LO__MASK; -} - -#define REG_A6XX_TEX_CONST_5 0x00000005 -#define A6XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff -#define A6XX_TEX_CONST_5_BASE_HI__SHIFT 0 -static inline uint32_t A6XX_TEX_CONST_5_BASE_HI(uint32_t val) -{ - return ((val) << A6XX_TEX_CONST_5_BASE_HI__SHIFT) & A6XX_TEX_CONST_5_BASE_HI__MASK; -} -#define A6XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000 -#define A6XX_TEX_CONST_5_DEPTH__SHIFT 17 -static inline uint32_t A6XX_TEX_CONST_5_DEPTH(uint32_t val) -{ - return ((val) << A6XX_TEX_CONST_5_DEPTH__SHIFT) & A6XX_TEX_CONST_5_DEPTH__MASK; -} - -#define REG_A6XX_TEX_CONST_6 0x00000006 -#define A6XX_TEX_CONST_6_MIN_LOD_CLAMP__MASK 0x00000fff -#define A6XX_TEX_CONST_6_MIN_LOD_CLAMP__SHIFT 0 -static inline uint32_t A6XX_TEX_CONST_6_MIN_LOD_CLAMP(float val) -{ - return ((((uint32_t)(val * 256.0))) << A6XX_TEX_CONST_6_MIN_LOD_CLAMP__SHIFT) & A6XX_TEX_CONST_6_MIN_LOD_CLAMP__MASK; -} -#define A6XX_TEX_CONST_6_PLANE_PITCH__MASK 0xffffff00 -#define A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT 8 -static inline uint32_t A6XX_TEX_CONST_6_PLANE_PITCH(uint32_t val) -{ - return ((val) << A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT) & A6XX_TEX_CONST_6_PLANE_PITCH__MASK; -} - -#define REG_A6XX_TEX_CONST_7 0x00000007 -#define A6XX_TEX_CONST_7_FLAG_LO__MASK 0xffffffe0 -#define A6XX_TEX_CONST_7_FLAG_LO__SHIFT 5 -static inline uint32_t A6XX_TEX_CONST_7_FLAG_LO(uint32_t val) -{ - assert(!(val & 0x1f)); - return (((val >> 5)) << A6XX_TEX_CONST_7_FLAG_LO__SHIFT) & A6XX_TEX_CONST_7_FLAG_LO__MASK; -} - -#define REG_A6XX_TEX_CONST_8 0x00000008 -#define A6XX_TEX_CONST_8_FLAG_HI__MASK 0x0001ffff -#define A6XX_TEX_CONST_8_FLAG_HI__SHIFT 0 -static inline uint32_t A6XX_TEX_CONST_8_FLAG_HI(uint32_t val) -{ - return ((val) << A6XX_TEX_CONST_8_FLAG_HI__SHIFT) & A6XX_TEX_CONST_8_FLAG_HI__MASK; -} - -#define REG_A6XX_TEX_CONST_9 0x00000009 -#define A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK 0x0001ffff -#define A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0 -static inline uint32_t A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH(uint32_t val) -{ - assert(!(val & 0xf)); - return (((val >> 4)) << A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK; -} - -#define REG_A6XX_TEX_CONST_10 0x0000000a -#define A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK 0x0000007f -#define A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT 0 -static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH(uint32_t val) -{ - assert(!(val & 0x3f)); - return (((val >> 6)) << A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK; -} -#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK 0x00000f00 -#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT 8 -static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW(uint32_t val) -{ - return ((val) << A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK; -} -#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__MASK 0x0000f000 -#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__SHIFT 12 -static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH(uint32_t val) -{ - return ((val) << A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__MASK; -} - -#define REG_A6XX_TEX_CONST_11 0x0000000b - -#define REG_A6XX_TEX_CONST_12 0x0000000c - -#define REG_A6XX_TEX_CONST_13 0x0000000d - -#define REG_A6XX_TEX_CONST_14 0x0000000e - -#define REG_A6XX_TEX_CONST_15 0x0000000f - -#define REG_A6XX_UBO_0 0x00000000 -#define A6XX_UBO_0_BASE_LO__MASK 0xffffffff -#define A6XX_UBO_0_BASE_LO__SHIFT 0 -static inline uint32_t A6XX_UBO_0_BASE_LO(uint32_t val) -{ - return ((val) << A6XX_UBO_0_BASE_LO__SHIFT) & A6XX_UBO_0_BASE_LO__MASK; -} - -#define REG_A6XX_UBO_1 0x00000001 -#define A6XX_UBO_1_BASE_HI__MASK 0x0001ffff -#define A6XX_UBO_1_BASE_HI__SHIFT 0 -static inline uint32_t A6XX_UBO_1_BASE_HI(uint32_t val) -{ - return ((val) << A6XX_UBO_1_BASE_HI__SHIFT) & A6XX_UBO_1_BASE_HI__MASK; -} -#define A6XX_UBO_1_SIZE__MASK 0xfffe0000 -#define A6XX_UBO_1_SIZE__SHIFT 17 -static inline uint32_t A6XX_UBO_1_SIZE(uint32_t val) -{ - return ((val) << A6XX_UBO_1_SIZE__SHIFT) & A6XX_UBO_1_SIZE__MASK; -} - -#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00001140 - -#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00001148 - -#define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00001540 - -#define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00001541 - -#define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00001542 - -#define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00001543 - -#define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00001544 - -#define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00001545 - -#define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00001572 - -#define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00001573 - -#define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00001574 - -#define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00001575 - -#define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00001576 - -#define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00001577 - -#define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000015a4 - -#define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000015a5 - -#define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000015a6 - -#define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000015a7 - -#define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000015a8 - -#define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000015a9 - -#define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000015d6 - -#define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000015d7 - -#define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000015d8 - -#define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000015d9 - -#define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000015da - -#define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000015db - -#define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x00000000 - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00000000 -#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK 0x000000ff -#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT 0 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK 0x0000ff00 -#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT 8 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK; -} - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00000001 - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00000002 - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00000003 - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00000004 -#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f -#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000 -#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000 -#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK; -} - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00000005 -#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000 -#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK; -} - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00000008 - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00000009 - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0000000a - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0000000b - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0000000c - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0000000d - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0000000e - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0000000f - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000010 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK; -} - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000011 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK; -} -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000 -#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28 -static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val) -{ - return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK; -} - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000002f - -#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000030 - -#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0 0x00000001 - -#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1 0x00000002 - -#define REG_A7XX_CX_MISC_TCM_RET_CNTL 0x00000039 - -#ifdef __cplusplus -template constexpr inline uint16_t CMD_REGS[] = {}; -template constexpr inline uint16_t RP_BLIT_REGS[] = {}; -template<> constexpr inline uint16_t CMD_REGS[] = { - 0xc03, - 0xc04, - 0xc30, - 0xc31, - 0xc32, - 0xc33, - 0xc34, - 0xc35, - 0xc36, - 0xc37, - 0xe12, - 0xe17, - 0xe19, - 0x8099, - 0x80af, - 0x810a, - 0x8110, - 0x8600, - 0x880e, - 0x8811, - 0x8818, - 0x8819, - 0x881a, - 0x881b, - 0x881c, - 0x881d, - 0x881e, - 0x8864, - 0x8891, - 0x88f0, - 0x8927, - 0x8928, - 0x8e01, - 0x8e04, - 0x8e07, - 0x9210, - 0x9211, - 0x9218, - 0x9219, - 0x921a, - 0x921b, - 0x921c, - 0x921d, - 0x921e, - 0x921f, - 0x9220, - 0x9221, - 0x9222, - 0x9223, - 0x9224, - 0x9225, - 0x9226, - 0x9227, - 0x9228, - 0x9229, - 0x922a, - 0x922b, - 0x922c, - 0x922d, - 0x922e, - 0x922f, - 0x9230, - 0x9231, - 0x9232, - 0x9233, - 0x9234, - 0x9235, - 0x9236, - 0x9300, - 0x9600, - 0x9601, - 0x9602, - 0x9e08, - 0x9e09, - 0x9e72, - 0xa007, - 0xa009, - 0xa8a0, - 0xa8a1, - 0xa8a2, - 0xa8a3, - 0xa8a4, - 0xa8a5, - 0xa8a6, - 0xa8a7, - 0xa8a8, - 0xa8a9, - 0xa8aa, - 0xa8ab, - 0xa8ac, - 0xa8ad, - 0xa8ae, - 0xa8af, - 0xa9a8, - 0xa9b0, - 0xa9b1, - 0xa9b2, - 0xa9b3, - 0xa9b4, - 0xa9b5, - 0xa9b6, - 0xa9b7, - 0xa9b8, - 0xa9b9, - 0xa9ba, - 0xa9bb, - 0xa9bc, - 0xa9bd, - 0xa9c2, - 0xa9c3, - 0xa9e2, - 0xa9e3, - 0xa9e6, - 0xa9e7, - 0xa9e8, - 0xa9e9, - 0xa9ea, - 0xa9eb, - 0xa9ec, - 0xa9ed, - 0xa9ee, - 0xa9ef, - 0xa9f0, - 0xa9f1, - 0xaaf2, - 0xab1a, - 0xab1b, - 0xab20, - 0xae00, - 0xae03, - 0xae04, - 0xae0f, - 0xb180, - 0xb181, - 0xb182, - 0xb183, - 0xb302, - 0xb303, - 0xb309, - 0xb600, - 0xb602, - 0xb605, - 0xb987, - 0xb9d0, - 0xbb08, - 0xbb11, - 0xbb20, - 0xbb21, - 0xbb22, - 0xbb23, - 0xbb24, - 0xbb25, - 0xbb26, - 0xbb27, - 0xbb28, - 0xbb29, - 0xbe00, - 0xbe01, - 0xbe04, -}; -template<> constexpr inline uint16_t CMD_REGS[] = { - 0xc03, - 0xc04, - 0xc30, - 0xc31, - 0xc32, - 0xc33, - 0xc34, - 0xc35, - 0xc36, - 0xc37, - 0xce2, - 0xce3, - 0xce4, - 0xce5, - 0xce6, - 0xce7, - 0xe10, - 0xe11, - 0xe12, - 0xe17, - 0xe19, - 0x8008, - 0x8009, - 0x800a, - 0x800b, - 0x800c, - 0x8099, - 0x80a7, - 0x80af, - 0x80f4, - 0x80f5, - 0x80f5, - 0x80f6, - 0x80f6, - 0x80f7, - 0x80f8, - 0x80f9, - 0x80f9, - 0x80fa, - 0x80fa, - 0x80fb, - 0x810a, - 0x810b, - 0x8110, - 0x8120, - 0x8121, - 0x8600, - 0x880e, - 0x8811, - 0x8818, - 0x8819, - 0x881a, - 0x881b, - 0x881c, - 0x881d, - 0x881e, - 0x8864, - 0x8891, - 0x8899, - 0x88e5, - 0x88f0, - 0x8927, - 0x8928, - 0x8e01, - 0x8e04, - 0x8e06, - 0x8e07, - 0x8e09, - 0x8e79, - 0x9218, - 0x9219, - 0x921a, - 0x921b, - 0x921c, - 0x921d, - 0x921e, - 0x921f, - 0x9220, - 0x9221, - 0x9222, - 0x9223, - 0x9224, - 0x9225, - 0x9226, - 0x9227, - 0x9228, - 0x9229, - 0x922a, - 0x922b, - 0x922c, - 0x922d, - 0x922e, - 0x922f, - 0x9230, - 0x9231, - 0x9232, - 0x9233, - 0x9234, - 0x9235, - 0x9236, - 0x9300, - 0x9600, - 0x9601, - 0x9602, - 0x9810, - 0x9811, - 0x9e24, - 0x9e72, - 0xa007, - 0xa009, - 0xa600, - 0xa82d, - 0xa82f, - 0xa868, - 0xa899, - 0xa8a0, - 0xa8a1, - 0xa8a2, - 0xa8a3, - 0xa8a4, - 0xa8a5, - 0xa8a6, - 0xa8a7, - 0xa8a8, - 0xa8a9, - 0xa8aa, - 0xa8ab, - 0xa8ac, - 0xa8ad, - 0xa8ae, - 0xa8af, - 0xa9a8, - 0xa9ac, - 0xa9ad, - 0xa9b0, - 0xa9b1, - 0xa9b2, - 0xa9b3, - 0xa9b4, - 0xa9b5, - 0xa9b6, - 0xa9b7, - 0xa9b8, - 0xa9b9, - 0xa9ba, - 0xa9bb, - 0xa9bc, - 0xa9bd, - 0xa9be, - 0xa9c2, - 0xa9c3, - 0xa9c5, - 0xa9cd, - 0xa9df, - 0xa9e2, - 0xa9e3, - 0xa9e6, - 0xa9e7, - 0xa9e8, - 0xa9e9, - 0xa9ea, - 0xa9eb, - 0xa9ec, - 0xa9ed, - 0xa9ee, - 0xa9ef, - 0xa9f0, - 0xa9f1, - 0xa9f2, - 0xa9f3, - 0xa9f4, - 0xa9f5, - 0xa9f6, - 0xa9f7, - 0xaa01, - 0xaa02, - 0xaa03, - 0xaaf2, - 0xab01, - 0xab02, - 0xab1a, - 0xab1b, - 0xab1f, - 0xab20, - 0xab22, - 0xae00, - 0xae03, - 0xae04, - 0xae06, - 0xae08, - 0xae09, - 0xae0a, - 0xae0f, - 0xae6a, - 0xae6b, - 0xae6c, - 0xae73, - 0xb180, - 0xb181, - 0xb182, - 0xb183, - 0xb302, - 0xb303, - 0xb309, - 0xb310, - 0xb600, - 0xb602, - 0xb608, - 0xb609, - 0xb60a, - 0xb60b, - 0xb60c, -}; -template<> constexpr inline uint16_t RP_BLIT_REGS[] = { - 0xc02, - 0xc06, - 0xc10, - 0xc11, - 0xc12, - 0xc13, - 0xc14, - 0xc15, - 0xc16, - 0xc17, - 0xc18, - 0xc19, - 0xc1a, - 0xc1b, - 0xc1c, - 0xc1d, - 0xc1e, - 0xc1f, - 0xc20, - 0xc21, - 0xc22, - 0xc23, - 0xc24, - 0xc25, - 0xc26, - 0xc27, - 0xc28, - 0xc29, - 0xc2a, - 0xc2b, - 0xc2c, - 0xc2d, - 0xc2e, - 0xc2f, - 0xc38, - 0xc39, - 0xc3a, - 0xc3b, - 0xc3c, - 0xc3d, - 0xc3e, - 0xc3f, - 0xc40, - 0xc41, - 0xc42, - 0xc43, - 0xc44, - 0xc45, - 0xc46, - 0xc47, - 0xc48, - 0xc49, - 0xc4a, - 0xc4b, - 0xc4c, - 0xc4d, - 0xc4e, - 0xc4f, - 0xc50, - 0xc51, - 0xc52, - 0xc53, - 0xc54, - 0xc55, - 0xc56, - 0xc57, - 0xc58, - 0xc59, - 0xc5a, - 0xc5b, - 0xc5c, - 0xc5d, - 0xc5e, - 0xc5f, - 0xc60, - 0xc61, - 0xc62, - 0xc63, - 0xc64, - 0xc65, - 0xc66, - 0xc67, - 0xc68, - 0xc69, - 0xc6a, - 0xc6b, - 0xc6c, - 0xc6d, - 0xc6e, - 0xc6f, - 0xc70, - 0xc71, - 0xc72, - 0xc73, - 0xc74, - 0xc75, - 0xc76, - 0xc77, - 0xc78, - 0xc79, - 0xc7a, - 0xc7b, - 0xc7c, - 0xc7d, - 0xc7e, - 0xc7f, - 0xc80, - 0xc81, - 0xc82, - 0xc83, - 0xc84, - 0xc85, - 0xc86, - 0xc87, - 0xc88, - 0xc89, - 0xc8a, - 0xc8b, - 0xc8c, - 0xc8d, - 0xc8e, - 0xc8f, - 0xc90, - 0xc91, - 0xc92, - 0xc93, - 0xc94, - 0xc95, - 0xc96, - 0xc97, - 0x8000, - 0x8001, - 0x8002, - 0x8003, - 0x8004, - 0x8005, - 0x8006, - 0x8010, - 0x8011, - 0x8012, - 0x8013, - 0x8014, - 0x8015, - 0x8016, - 0x8017, - 0x8018, - 0x8019, - 0x801a, - 0x801b, - 0x801c, - 0x801d, - 0x801e, - 0x801f, - 0x8020, - 0x8021, - 0x8022, - 0x8023, - 0x8024, - 0x8025, - 0x8026, - 0x8027, - 0x8028, - 0x8029, - 0x802a, - 0x802b, - 0x802c, - 0x802d, - 0x802e, - 0x802f, - 0x8030, - 0x8031, - 0x8032, - 0x8033, - 0x8034, - 0x8035, - 0x8036, - 0x8037, - 0x8038, - 0x8039, - 0x803a, - 0x803b, - 0x803c, - 0x803d, - 0x803e, - 0x803f, - 0x8040, - 0x8041, - 0x8042, - 0x8043, - 0x8044, - 0x8045, - 0x8046, - 0x8047, - 0x8048, - 0x8049, - 0x804a, - 0x804b, - 0x804c, - 0x804d, - 0x804e, - 0x804f, - 0x8050, - 0x8051, - 0x8052, - 0x8053, - 0x8054, - 0x8055, - 0x8056, - 0x8057, - 0x8058, - 0x8059, - 0x805a, - 0x805b, - 0x805c, - 0x805d, - 0x805e, - 0x805f, - 0x8060, - 0x8061, - 0x8062, - 0x8063, - 0x8064, - 0x8065, - 0x8066, - 0x8067, - 0x8068, - 0x8069, - 0x806a, - 0x806b, - 0x806c, - 0x806d, - 0x806e, - 0x806f, - 0x8070, - 0x8071, - 0x8072, - 0x8073, - 0x8074, - 0x8075, - 0x8076, - 0x8077, - 0x8078, - 0x8079, - 0x807a, - 0x807b, - 0x807c, - 0x807d, - 0x807e, - 0x807f, - 0x8080, - 0x8081, - 0x8082, - 0x8083, - 0x8084, - 0x8085, - 0x8086, - 0x8087, - 0x8088, - 0x8089, - 0x808a, - 0x808b, - 0x808c, - 0x808d, - 0x808e, - 0x808f, - 0x8090, - 0x8091, - 0x8092, - 0x8094, - 0x8095, - 0x8096, - 0x8097, - 0x8098, - 0x809b, - 0x809c, - 0x809d, - 0x80a0, - 0x80a1, - 0x80a2, - 0x80a3, - 0x80a4, - 0x80a5, - 0x80a6, - 0x80b0, - 0x80b1, - 0x80b2, - 0x80b3, - 0x80b4, - 0x80b5, - 0x80b6, - 0x80b7, - 0x80b8, - 0x80b9, - 0x80ba, - 0x80bb, - 0x80bc, - 0x80bd, - 0x80be, - 0x80bf, - 0x80c0, - 0x80c1, - 0x80c2, - 0x80c3, - 0x80c4, - 0x80c5, - 0x80c6, - 0x80c7, - 0x80c8, - 0x80c9, - 0x80ca, - 0x80cb, - 0x80cc, - 0x80cd, - 0x80ce, - 0x80cf, - 0x80d0, - 0x80d1, - 0x80d2, - 0x80d3, - 0x80d4, - 0x80d5, - 0x80d6, - 0x80d7, - 0x80d8, - 0x80d9, - 0x80da, - 0x80db, - 0x80dc, - 0x80dd, - 0x80de, - 0x80df, - 0x80e0, - 0x80e1, - 0x80e2, - 0x80e3, - 0x80e4, - 0x80e5, - 0x80e6, - 0x80e7, - 0x80e8, - 0x80e9, - 0x80ea, - 0x80eb, - 0x80ec, - 0x80ed, - 0x80ee, - 0x80ef, - 0x80f0, - 0x80f1, - 0x8100, - 0x8101, - 0x8102, - 0x8103, - 0x8104, - 0x8105, - 0x8106, - 0x8107, - 0x8109, - 0x8114, - 0x8115, - 0x8400, - 0x8401, - 0x8402, - 0x8403, - 0x8404, - 0x8405, - 0x8406, - 0x840a, - 0x840b, - 0x8800, - 0x8801, - 0x8802, - 0x8803, - 0x8804, - 0x8805, - 0x8806, - 0x8809, - 0x880a, - 0x880b, - 0x880c, - 0x880d, - 0x880f, - 0x8810, - 0x8820, - 0x8821, - 0x8822, - 0x8823, - 0x8824, - 0x8825, - 0x8826, - 0x8827, - 0x8828, - 0x8829, - 0x882a, - 0x882b, - 0x882c, - 0x882d, - 0x882e, - 0x882f, - 0x8830, - 0x8831, - 0x8832, - 0x8833, - 0x8834, - 0x8835, - 0x8836, - 0x8837, - 0x8838, - 0x8839, - 0x883a, - 0x883b, - 0x883c, - 0x883d, - 0x883e, - 0x883f, - 0x8840, - 0x8841, - 0x8842, - 0x8843, - 0x8844, - 0x8845, - 0x8846, - 0x8847, - 0x8848, - 0x8849, - 0x884a, - 0x884b, - 0x884c, - 0x884d, - 0x884e, - 0x884f, - 0x8850, - 0x8851, - 0x8852, - 0x8853, - 0x8854, - 0x8855, - 0x8856, - 0x8857, - 0x8858, - 0x8859, - 0x885a, - 0x885b, - 0x885c, - 0x885d, - 0x885e, - 0x885f, - 0x8860, - 0x8861, - 0x8862, - 0x8863, - 0x8865, - 0x8870, - 0x8871, - 0x8872, - 0x8873, - 0x8874, - 0x8875, - 0x8876, - 0x8877, - 0x8878, - 0x8879, - 0x8880, - 0x8881, - 0x8882, - 0x8883, - 0x8884, - 0x8885, - 0x8886, - 0x8887, - 0x8888, - 0x8889, - 0x8890, - 0x8898, - 0x88c0, - 0x88c1, - 0x88d0, - 0x88d1, - 0x88d2, - 0x88d3, - 0x88d4, - 0x88d5, - 0x88d6, - 0x88d7, - 0x88d8, - 0x88d9, - 0x88da, - 0x88db, - 0x88dc, - 0x88dd, - 0x88de, - 0x88df, - 0x88e0, - 0x88e1, - 0x88e2, - 0x88e3, - 0x8900, - 0x8901, - 0x8902, - 0x8903, - 0x8904, - 0x8905, - 0x8906, - 0x8907, - 0x8908, - 0x8909, - 0x890a, - 0x890b, - 0x890c, - 0x890d, - 0x890e, - 0x890f, - 0x8910, - 0x8911, - 0x8912, - 0x8913, - 0x8914, - 0x8915, - 0x8916, - 0x8917, - 0x8918, - 0x8919, - 0x891a, - 0x8a00, - 0x8a10, - 0x8a20, - 0x8a30, - 0x8c00, - 0x8c01, - 0x8c17, - 0x8c18, - 0x8c19, - 0x8c1a, - 0x8c1b, - 0x8c1c, - 0x8c1d, - 0x8c1e, - 0x8c1f, - 0x8c20, - 0x8c21, - 0x8c22, - 0x8c23, - 0x8c24, - 0x8c25, - 0x8c2c, - 0x8c2d, - 0x8c2e, - 0x8c2f, - 0x9100, - 0x9101, - 0x9102, - 0x9103, - 0x9104, - 0x9105, - 0x9106, - 0x9107, - 0x9108, - 0x9200, - 0x9201, - 0x9202, - 0x9203, - 0x9204, - 0x9205, - 0x9206, - 0x9207, - 0x9208, - 0x9209, - 0x920a, - 0x920b, - 0x920c, - 0x920d, - 0x920e, - 0x920f, - 0x9212, - 0x9213, - 0x9214, - 0x9215, - 0x9216, - 0x9217, - 0x9301, - 0x9302, - 0x9303, - 0x9304, - 0x9305, - 0x9306, - 0x9311, - 0x9312, - 0x9313, - 0x9314, - 0x9315, - 0x9316, - 0x9800, - 0x9801, - 0x9802, - 0x9803, - 0x9804, - 0x9805, - 0x9806, - 0x9808, - 0x9980, - 0x9981, - 0x9b00, - 0x9b01, - 0x9b02, - 0x9b03, - 0x9b04, - 0x9b05, - 0x9b06, - 0x9b07, - 0x9b08, - 0xa000, - 0xa001, - 0xa002, - 0xa003, - 0xa004, - 0xa005, - 0xa006, - 0xa008, - 0xa00e, - 0xa00f, - 0xa010, - 0xa011, - 0xa012, - 0xa013, - 0xa014, - 0xa015, - 0xa016, - 0xa017, - 0xa018, - 0xa019, - 0xa01a, - 0xa01b, - 0xa01c, - 0xa01d, - 0xa01e, - 0xa01f, - 0xa020, - 0xa021, - 0xa022, - 0xa023, - 0xa024, - 0xa025, - 0xa026, - 0xa027, - 0xa028, - 0xa029, - 0xa02a, - 0xa02b, - 0xa02c, - 0xa02d, - 0xa02e, - 0xa02f, - 0xa030, - 0xa031, - 0xa032, - 0xa033, - 0xa034, - 0xa035, - 0xa036, - 0xa037, - 0xa038, - 0xa039, - 0xa03a, - 0xa03b, - 0xa03c, - 0xa03d, - 0xa03e, - 0xa03f, - 0xa040, - 0xa041, - 0xa042, - 0xa043, - 0xa044, - 0xa045, - 0xa046, - 0xa047, - 0xa048, - 0xa049, - 0xa04a, - 0xa04b, - 0xa04c, - 0xa04d, - 0xa04e, - 0xa04f, - 0xa050, - 0xa051, - 0xa052, - 0xa053, - 0xa054, - 0xa055, - 0xa056, - 0xa057, - 0xa058, - 0xa059, - 0xa05a, - 0xa05b, - 0xa05c, - 0xa05d, - 0xa05e, - 0xa05f, - 0xa060, - 0xa061, - 0xa062, - 0xa063, - 0xa064, - 0xa065, - 0xa066, - 0xa067, - 0xa068, - 0xa069, - 0xa06a, - 0xa06b, - 0xa06c, - 0xa06d, - 0xa06e, - 0xa06f, - 0xa070, - 0xa071, - 0xa072, - 0xa073, - 0xa074, - 0xa075, - 0xa076, - 0xa077, - 0xa078, - 0xa079, - 0xa07a, - 0xa07b, - 0xa07c, - 0xa07d, - 0xa07e, - 0xa07f, - 0xa080, - 0xa081, - 0xa082, - 0xa083, - 0xa084, - 0xa085, - 0xa086, - 0xa087, - 0xa088, - 0xa089, - 0xa08a, - 0xa08b, - 0xa08c, - 0xa08d, - 0xa08e, - 0xa08f, - 0xa090, - 0xa091, - 0xa092, - 0xa093, - 0xa094, - 0xa095, - 0xa096, - 0xa097, - 0xa098, - 0xa099, - 0xa09a, - 0xa09b, - 0xa09c, - 0xa09d, - 0xa09e, - 0xa09f, - 0xa0a0, - 0xa0a1, - 0xa0a2, - 0xa0a3, - 0xa0a4, - 0xa0a5, - 0xa0a6, - 0xa0a7, - 0xa0a8, - 0xa0a9, - 0xa0aa, - 0xa0ab, - 0xa0ac, - 0xa0ad, - 0xa0ae, - 0xa0af, - 0xa0b0, - 0xa0b1, - 0xa0b2, - 0xa0b3, - 0xa0b4, - 0xa0b5, - 0xa0b6, - 0xa0b7, - 0xa0b8, - 0xa0b9, - 0xa0ba, - 0xa0bb, - 0xa0bc, - 0xa0bd, - 0xa0be, - 0xa0bf, - 0xa0c0, - 0xa0c1, - 0xa0c2, - 0xa0c3, - 0xa0c4, - 0xa0c5, - 0xa0c6, - 0xa0c7, - 0xa0c8, - 0xa0c9, - 0xa0ca, - 0xa0cb, - 0xa0cc, - 0xa0cd, - 0xa0ce, - 0xa0cf, - 0xa0d0, - 0xa0d1, - 0xa0d2, - 0xa0d3, - 0xa0d4, - 0xa0d5, - 0xa0d6, - 0xa0d7, - 0xa0d8, - 0xa0d9, - 0xa0da, - 0xa0db, - 0xa0dc, - 0xa0dd, - 0xa0de, - 0xa0df, - 0xa0e0, - 0xa0e1, - 0xa0e2, - 0xa0e3, - 0xa0e4, - 0xa0e5, - 0xa0e6, - 0xa0e7, - 0xa0e8, - 0xa0e9, - 0xa0ea, - 0xa0eb, - 0xa0ec, - 0xa0ed, - 0xa0ee, - 0xa0ef, - 0xa0f8, - 0xa800, - 0xa802, - 0xa803, - 0xa804, - 0xa805, - 0xa806, - 0xa807, - 0xa808, - 0xa809, - 0xa80a, - 0xa80b, - 0xa80c, - 0xa80d, - 0xa80e, - 0xa80f, - 0xa810, - 0xa811, - 0xa812, - 0xa813, - 0xa814, - 0xa815, - 0xa816, - 0xa817, - 0xa818, - 0xa819, - 0xa81a, - 0xa81b, - 0xa81c, - 0xa81d, - 0xa81e, - 0xa81f, - 0xa820, - 0xa821, - 0xa822, - 0xa823, - 0xa824, - 0xa825, - 0xa830, - 0xa831, - 0xa832, - 0xa833, - 0xa834, - 0xa835, - 0xa836, - 0xa837, - 0xa838, - 0xa839, - 0xa83a, - 0xa83b, - 0xa83c, - 0xa83d, - 0xa840, - 0xa842, - 0xa843, - 0xa844, - 0xa845, - 0xa846, - 0xa847, - 0xa848, - 0xa849, - 0xa84a, - 0xa84b, - 0xa84c, - 0xa84d, - 0xa84e, - 0xa84f, - 0xa850, - 0xa851, - 0xa852, - 0xa853, - 0xa854, - 0xa855, - 0xa856, - 0xa857, - 0xa858, - 0xa859, - 0xa85a, - 0xa85b, - 0xa85c, - 0xa85d, - 0xa85e, - 0xa85f, - 0xa860, - 0xa861, - 0xa862, - 0xa863, - 0xa864, - 0xa865, - 0xa870, - 0xa871, - 0xa872, - 0xa873, - 0xa874, - 0xa875, - 0xa876, - 0xa877, - 0xa878, - 0xa879, - 0xa87a, - 0xa87b, - 0xa87c, - 0xa87d, - 0xa87e, - 0xa87f, - 0xa880, - 0xa881, - 0xa882, - 0xa883, - 0xa884, - 0xa885, - 0xa886, - 0xa887, - 0xa888, - 0xa889, - 0xa88a, - 0xa88b, - 0xa88c, - 0xa88d, - 0xa88e, - 0xa88f, - 0xa890, - 0xa891, - 0xa892, - 0xa893, - 0xa894, - 0xa895, - 0xa896, - 0xa980, - 0xa982, - 0xa983, - 0xa984, - 0xa985, - 0xa986, - 0xa987, - 0xa988, - 0xa989, - 0xa98a, - 0xa98b, - 0xa98c, - 0xa98d, - 0xa98e, - 0xa98f, - 0xa990, - 0xa991, - 0xa992, - 0xa993, - 0xa994, - 0xa995, - 0xa996, - 0xa997, - 0xa998, - 0xa999, - 0xa99a, - 0xa99b, - 0xa99c, - 0xa99d, - 0xa99e, - 0xa99f, - 0xa9a0, - 0xa9a1, - 0xa9a2, - 0xa9a3, - 0xa9a4, - 0xa9a5, - 0xa9a6, - 0xa9a7, - 0xa9a9, - 0xa9e0, - 0xa9e1, - 0xa9e4, - 0xa9e5, - 0xab00, - 0xab04, - 0xab05, - 0xab10, - 0xab11, - 0xab12, - 0xab13, - 0xab14, - 0xab15, - 0xab16, - 0xab17, - 0xab18, - 0xab19, - 0xacc0, - 0xb300, - 0xb301, - 0xb304, - 0xb305, - 0xb306, - 0xb307, - 0xb4c0, - 0xb4c1, - 0xb4c2, - 0xb4c3, - 0xb4c4, - 0xb4ca, - 0xb4cb, - 0xb4cc, - 0xb4d1, - 0xb800, - 0xb801, - 0xb802, - 0xb803, - 0xb980, - 0xb982, - 0xb983, - 0xb984, - 0xb985, - 0xb986, - 0xb990, - 0xb991, - 0xb992, - 0xb993, - 0xb994, - 0xb995, - 0xb996, - 0xb997, - 0xb998, - 0xb999, - 0xb99a, - 0xb99b, - 0xb9c0, - 0xb9c1, - 0xb9c2, - 0xb9c3, - 0xb9c4, - 0xb9c5, - 0xb9c6, - 0xb9c7, - 0xb9c8, - 0xb9c9, - 0xbb10, -}; -template<> constexpr inline uint16_t RP_BLIT_REGS[] = { - 0xc02, - 0xc06, - 0xc10, - 0xc11, - 0xc12, - 0xc13, - 0xc14, - 0xc15, - 0xc16, - 0xc17, - 0xc18, - 0xc19, - 0xc1a, - 0xc1b, - 0xc1c, - 0xc1d, - 0xc1e, - 0xc1f, - 0xc20, - 0xc21, - 0xc22, - 0xc23, - 0xc24, - 0xc25, - 0xc26, - 0xc27, - 0xc28, - 0xc29, - 0xc2a, - 0xc2b, - 0xc2c, - 0xc2d, - 0xc2e, - 0xc2f, - 0xc38, - 0xc39, - 0xc3a, - 0xc3b, - 0xc3c, - 0xc3d, - 0xc3e, - 0xc3f, - 0xc40, - 0xc41, - 0xc42, - 0xc43, - 0xc44, - 0xc45, - 0xc46, - 0xc47, - 0xc48, - 0xc49, - 0xc4a, - 0xc4b, - 0xc4c, - 0xc4d, - 0xc4e, - 0xc4f, - 0xc50, - 0xc51, - 0xc52, - 0xc53, - 0xc54, - 0xc55, - 0xc56, - 0xc57, - 0x8000, - 0x8001, - 0x8002, - 0x8003, - 0x8004, - 0x8005, - 0x8006, - 0x8007, - 0x8010, - 0x8011, - 0x8012, - 0x8013, - 0x8014, - 0x8015, - 0x8016, - 0x8017, - 0x8018, - 0x8019, - 0x801a, - 0x801b, - 0x801c, - 0x801d, - 0x801e, - 0x801f, - 0x8020, - 0x8021, - 0x8022, - 0x8023, - 0x8024, - 0x8025, - 0x8026, - 0x8027, - 0x8028, - 0x8029, - 0x802a, - 0x802b, - 0x802c, - 0x802d, - 0x802e, - 0x802f, - 0x8030, - 0x8031, - 0x8032, - 0x8033, - 0x8034, - 0x8035, - 0x8036, - 0x8037, - 0x8038, - 0x8039, - 0x803a, - 0x803b, - 0x803c, - 0x803d, - 0x803e, - 0x803f, - 0x8040, - 0x8041, - 0x8042, - 0x8043, - 0x8044, - 0x8045, - 0x8046, - 0x8047, - 0x8048, - 0x8049, - 0x804a, - 0x804b, - 0x804c, - 0x804d, - 0x804e, - 0x804f, - 0x8050, - 0x8051, - 0x8052, - 0x8053, - 0x8054, - 0x8055, - 0x8056, - 0x8057, - 0x8058, - 0x8059, - 0x805a, - 0x805b, - 0x805c, - 0x805d, - 0x805e, - 0x805f, - 0x8060, - 0x8061, - 0x8062, - 0x8063, - 0x8064, - 0x8065, - 0x8066, - 0x8067, - 0x8068, - 0x8069, - 0x806a, - 0x806b, - 0x806c, - 0x806d, - 0x806e, - 0x806f, - 0x8070, - 0x8071, - 0x8072, - 0x8073, - 0x8074, - 0x8075, - 0x8076, - 0x8077, - 0x8078, - 0x8079, - 0x807a, - 0x807b, - 0x807c, - 0x807d, - 0x807e, - 0x807f, - 0x8080, - 0x8081, - 0x8082, - 0x8083, - 0x8084, - 0x8085, - 0x8086, - 0x8087, - 0x8088, - 0x8089, - 0x808a, - 0x808b, - 0x808c, - 0x808d, - 0x808e, - 0x808f, - 0x8090, - 0x8091, - 0x8092, - 0x8094, - 0x8095, - 0x8096, - 0x8097, - 0x8098, - 0x809b, - 0x809c, - 0x809d, - 0x80a0, - 0x80a1, - 0x80a2, - 0x80a3, - 0x80a4, - 0x80a5, - 0x80a6, - 0x80b0, - 0x80b1, - 0x80b2, - 0x80b3, - 0x80b4, - 0x80b5, - 0x80b6, - 0x80b7, - 0x80b8, - 0x80b9, - 0x80ba, - 0x80bb, - 0x80bc, - 0x80bd, - 0x80be, - 0x80bf, - 0x80c0, - 0x80c1, - 0x80c2, - 0x80c3, - 0x80c4, - 0x80c5, - 0x80c6, - 0x80c7, - 0x80c8, - 0x80c9, - 0x80ca, - 0x80cb, - 0x80cc, - 0x80cd, - 0x80ce, - 0x80cf, - 0x80d0, - 0x80d1, - 0x80d2, - 0x80d3, - 0x80d4, - 0x80d5, - 0x80d6, - 0x80d7, - 0x80d8, - 0x80d9, - 0x80da, - 0x80db, - 0x80dc, - 0x80dd, - 0x80de, - 0x80df, - 0x80e0, - 0x80e1, - 0x80e2, - 0x80e3, - 0x80e4, - 0x80e5, - 0x80e6, - 0x80e7, - 0x80e8, - 0x80e9, - 0x80ea, - 0x80eb, - 0x80ec, - 0x80ed, - 0x80ee, - 0x80ef, - 0x80f0, - 0x80f1, - 0x8100, - 0x8101, - 0x8102, - 0x8103, - 0x8104, - 0x8105, - 0x8106, - 0x8107, - 0x8109, - 0x8113, - 0x8114, - 0x8115, - 0x8116, - 0x8400, - 0x8401, - 0x8402, - 0x8403, - 0x8404, - 0x8405, - 0x8406, - 0x840a, - 0x840b, - 0x8800, - 0x8801, - 0x8802, - 0x8803, - 0x8804, - 0x8805, - 0x8806, - 0x8809, - 0x880a, - 0x880b, - 0x880c, - 0x880d, - 0x880f, - 0x8810, - 0x8812, - 0x8820, - 0x8821, - 0x8822, - 0x8823, - 0x8824, - 0x8825, - 0x8826, - 0x8827, - 0x8828, - 0x8829, - 0x882a, - 0x882b, - 0x882c, - 0x882d, - 0x882e, - 0x882f, - 0x8830, - 0x8831, - 0x8832, - 0x8833, - 0x8834, - 0x8835, - 0x8836, - 0x8837, - 0x8838, - 0x8839, - 0x883a, - 0x883b, - 0x883c, - 0x883d, - 0x883e, - 0x883f, - 0x8840, - 0x8841, - 0x8842, - 0x8843, - 0x8844, - 0x8845, - 0x8846, - 0x8847, - 0x8848, - 0x8849, - 0x884a, - 0x884b, - 0x884c, - 0x884d, - 0x884e, - 0x884f, - 0x8850, - 0x8851, - 0x8852, - 0x8853, - 0x8854, - 0x8855, - 0x8856, - 0x8857, - 0x8858, - 0x8859, - 0x885a, - 0x885b, - 0x885c, - 0x885d, - 0x885e, - 0x885f, - 0x8860, - 0x8861, - 0x8862, - 0x8863, - 0x8865, - 0x8870, - 0x8871, - 0x8872, - 0x8873, - 0x8874, - 0x8875, - 0x8876, - 0x8877, - 0x8878, - 0x8879, - 0x8880, - 0x8881, - 0x8882, - 0x8883, - 0x8884, - 0x8885, - 0x8886, - 0x8887, - 0x8888, - 0x8889, - 0x8890, - 0x8898, - 0x88c0, - 0x88c1, - 0x88d0, - 0x88d1, - 0x88d2, - 0x88d3, - 0x88d4, - 0x88d5, - 0x88d6, - 0x88d7, - 0x88d8, - 0x88d9, - 0x88da, - 0x88db, - 0x88dc, - 0x88dd, - 0x88de, - 0x88df, - 0x88e0, - 0x88e1, - 0x88e2, - 0x88e3, - 0x8900, - 0x8901, - 0x8902, - 0x8903, - 0x8904, - 0x8905, - 0x8906, - 0x8907, - 0x8908, - 0x8909, - 0x890a, - 0x890b, - 0x890c, - 0x890d, - 0x890e, - 0x890f, - 0x8910, - 0x8911, - 0x8912, - 0x8913, - 0x8914, - 0x8915, - 0x8916, - 0x8917, - 0x8918, - 0x8919, - 0x891a, - 0x8c00, - 0x8c01, - 0x8c17, - 0x8c18, - 0x8c19, - 0x8c1a, - 0x8c1b, - 0x8c1c, - 0x8c1d, - 0x8c1e, - 0x8c1f, - 0x8c20, - 0x8c21, - 0x8c22, - 0x8c23, - 0x8c24, - 0x8c25, - 0x8c2c, - 0x8c2d, - 0x8c2e, - 0x8c2f, - 0x9101, - 0x9102, - 0x9103, - 0x9104, - 0x9105, - 0x9106, - 0x9107, - 0x9108, - 0x9109, - 0x910a, - 0x910b, - 0x910c, - 0x9200, - 0x9201, - 0x9202, - 0x9203, - 0x9204, - 0x9205, - 0x9206, - 0x9207, - 0x9208, - 0x9209, - 0x920a, - 0x920b, - 0x920c, - 0x920d, - 0x920e, - 0x920f, - 0x9212, - 0x9213, - 0x9214, - 0x9215, - 0x9216, - 0x9217, - 0x9301, - 0x9302, - 0x9303, - 0x9304, - 0x9305, - 0x9306, - 0x9307, - 0x9308, - 0x9309, - 0x9311, - 0x9312, - 0x9313, - 0x9314, - 0x9315, - 0x9316, - 0x9317, - 0x9800, - 0x9801, - 0x9802, - 0x9803, - 0x9804, - 0x9805, - 0x9806, - 0x9808, - 0x9809, - 0x9b00, - 0x9b01, - 0x9b02, - 0x9b03, - 0x9b04, - 0x9b05, - 0x9b07, - 0x9b08, - 0x9b09, - 0xa000, - 0xa001, - 0xa002, - 0xa003, - 0xa004, - 0xa005, - 0xa006, - 0xa008, - 0xa00e, - 0xa00f, - 0xa010, - 0xa011, - 0xa012, - 0xa013, - 0xa014, - 0xa015, - 0xa016, - 0xa017, - 0xa018, - 0xa019, - 0xa01a, - 0xa01b, - 0xa01c, - 0xa01d, - 0xa01e, - 0xa01f, - 0xa020, - 0xa021, - 0xa022, - 0xa023, - 0xa024, - 0xa025, - 0xa026, - 0xa027, - 0xa028, - 0xa029, - 0xa02a, - 0xa02b, - 0xa02c, - 0xa02d, - 0xa02e, - 0xa02f, - 0xa030, - 0xa031, - 0xa032, - 0xa033, - 0xa034, - 0xa035, - 0xa036, - 0xa037, - 0xa038, - 0xa039, - 0xa03a, - 0xa03b, - 0xa03c, - 0xa03d, - 0xa03e, - 0xa03f, - 0xa040, - 0xa041, - 0xa042, - 0xa043, - 0xa044, - 0xa045, - 0xa046, - 0xa047, - 0xa048, - 0xa049, - 0xa04a, - 0xa04b, - 0xa04c, - 0xa04d, - 0xa04e, - 0xa04f, - 0xa050, - 0xa051, - 0xa052, - 0xa053, - 0xa054, - 0xa055, - 0xa056, - 0xa057, - 0xa058, - 0xa059, - 0xa05a, - 0xa05b, - 0xa05c, - 0xa05d, - 0xa05e, - 0xa05f, - 0xa060, - 0xa061, - 0xa062, - 0xa063, - 0xa064, - 0xa065, - 0xa066, - 0xa067, - 0xa068, - 0xa069, - 0xa06a, - 0xa06b, - 0xa06c, - 0xa06d, - 0xa06e, - 0xa06f, - 0xa070, - 0xa071, - 0xa072, - 0xa073, - 0xa074, - 0xa075, - 0xa076, - 0xa077, - 0xa078, - 0xa079, - 0xa07a, - 0xa07b, - 0xa07c, - 0xa07d, - 0xa07e, - 0xa07f, - 0xa080, - 0xa081, - 0xa082, - 0xa083, - 0xa084, - 0xa085, - 0xa086, - 0xa087, - 0xa088, - 0xa089, - 0xa08a, - 0xa08b, - 0xa08c, - 0xa08d, - 0xa08e, - 0xa08f, - 0xa090, - 0xa091, - 0xa092, - 0xa093, - 0xa094, - 0xa095, - 0xa096, - 0xa097, - 0xa098, - 0xa099, - 0xa09a, - 0xa09b, - 0xa09c, - 0xa09d, - 0xa09e, - 0xa09f, - 0xa0a0, - 0xa0a1, - 0xa0a2, - 0xa0a3, - 0xa0a4, - 0xa0a5, - 0xa0a6, - 0xa0a7, - 0xa0a8, - 0xa0a9, - 0xa0aa, - 0xa0ab, - 0xa0ac, - 0xa0ad, - 0xa0ae, - 0xa0af, - 0xa0b0, - 0xa0b1, - 0xa0b2, - 0xa0b3, - 0xa0b4, - 0xa0b5, - 0xa0b6, - 0xa0b7, - 0xa0b8, - 0xa0b9, - 0xa0ba, - 0xa0bb, - 0xa0bc, - 0xa0bd, - 0xa0be, - 0xa0bf, - 0xa0c0, - 0xa0c1, - 0xa0c2, - 0xa0c3, - 0xa0c4, - 0xa0c5, - 0xa0c6, - 0xa0c7, - 0xa0c8, - 0xa0c9, - 0xa0ca, - 0xa0cb, - 0xa0cc, - 0xa0cd, - 0xa0ce, - 0xa0cf, - 0xa0d0, - 0xa0d1, - 0xa0d2, - 0xa0d3, - 0xa0d4, - 0xa0d5, - 0xa0d6, - 0xa0d7, - 0xa0d8, - 0xa0d9, - 0xa0da, - 0xa0db, - 0xa0dc, - 0xa0dd, - 0xa0de, - 0xa0df, - 0xa0e0, - 0xa0e1, - 0xa0e2, - 0xa0e3, - 0xa0e4, - 0xa0e5, - 0xa0e6, - 0xa0e7, - 0xa0e8, - 0xa0e9, - 0xa0ea, - 0xa0eb, - 0xa0ec, - 0xa0ed, - 0xa0ee, - 0xa0ef, - 0xa0f8, - 0xa800, - 0xa802, - 0xa803, - 0xa804, - 0xa805, - 0xa806, - 0xa807, - 0xa808, - 0xa809, - 0xa80a, - 0xa80b, - 0xa80c, - 0xa80d, - 0xa80e, - 0xa80f, - 0xa810, - 0xa811, - 0xa812, - 0xa813, - 0xa814, - 0xa815, - 0xa816, - 0xa817, - 0xa818, - 0xa819, - 0xa81a, - 0xa81b, - 0xa81c, - 0xa81d, - 0xa81e, - 0xa81f, - 0xa820, - 0xa821, - 0xa822, - 0xa823, - 0xa824, - 0xa825, - 0xa827, - 0xa830, - 0xa831, - 0xa832, - 0xa833, - 0xa834, - 0xa835, - 0xa836, - 0xa837, - 0xa838, - 0xa839, - 0xa83a, - 0xa83b, - 0xa83c, - 0xa83d, - 0xa83f, - 0xa840, - 0xa842, - 0xa843, - 0xa844, - 0xa845, - 0xa846, - 0xa847, - 0xa848, - 0xa849, - 0xa84a, - 0xa84b, - 0xa84c, - 0xa84d, - 0xa84e, - 0xa84f, - 0xa850, - 0xa851, - 0xa852, - 0xa853, - 0xa854, - 0xa855, - 0xa856, - 0xa857, - 0xa858, - 0xa859, - 0xa85a, - 0xa85b, - 0xa85c, - 0xa85d, - 0xa85e, - 0xa85f, - 0xa860, - 0xa861, - 0xa862, - 0xa863, - 0xa864, - 0xa865, - 0xa867, - 0xa870, - 0xa871, - 0xa872, - 0xa873, - 0xa874, - 0xa875, - 0xa876, - 0xa877, - 0xa878, - 0xa879, - 0xa87a, - 0xa87b, - 0xa87c, - 0xa87d, - 0xa87e, - 0xa87f, - 0xa880, - 0xa881, - 0xa882, - 0xa883, - 0xa884, - 0xa885, - 0xa886, - 0xa887, - 0xa888, - 0xa889, - 0xa88a, - 0xa88b, - 0xa88c, - 0xa88d, - 0xa88e, - 0xa88f, - 0xa890, - 0xa891, - 0xa892, - 0xa893, - 0xa894, - 0xa895, - 0xa896, - 0xa898, - 0xa980, - 0xa982, - 0xa983, - 0xa984, - 0xa985, - 0xa986, - 0xa987, - 0xa988, - 0xa989, - 0xa98a, - 0xa98b, - 0xa98c, - 0xa98d, - 0xa98e, - 0xa98f, - 0xa990, - 0xa991, - 0xa992, - 0xa993, - 0xa994, - 0xa995, - 0xa996, - 0xa997, - 0xa998, - 0xa999, - 0xa99a, - 0xa99b, - 0xa99c, - 0xa99d, - 0xa99e, - 0xa99f, - 0xa9a0, - 0xa9a1, - 0xa9a2, - 0xa9a3, - 0xa9a4, - 0xa9a5, - 0xa9a6, - 0xa9a7, - 0xa9a9, - 0xa9aa, - 0xa9ae, - 0xa9bf, - 0xa9c6, - 0xa9c7, - 0xa9c8, - 0xa9c9, - 0xa9ca, - 0xa9cb, - 0xa9d4, - 0xa9d5, - 0xa9d6, - 0xa9d7, - 0xa9d8, - 0xa9d9, - 0xa9da, - 0xa9db, - 0xa9dc, - 0xa9dd, - 0xa9de, - 0xa9e0, - 0xa9e1, - 0xa9e4, - 0xa9e5, - 0xab00, - 0xab03, - 0xab04, - 0xab05, - 0xab0a, - 0xab0b, - 0xab0c, - 0xab0d, - 0xab0e, - 0xab0f, - 0xab10, - 0xab11, - 0xab12, - 0xab13, - 0xab14, - 0xab15, - 0xab16, - 0xab17, - 0xab18, - 0xab19, - 0xab21, - 0xb2c0, - 0xb2c2, - 0xb2c3, - 0xb2ca, - 0xb2cb, - 0xb2cc, - 0xb2d2, - 0xb300, - 0xb301, - 0xb304, - 0xb305, - 0xb306, - 0xb307, -}; -#endif - -#endif /* A6XX_XML */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 8bea8ef26f..0e3dfd4c2b 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -507,7 +507,7 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) { - msm_writel(value, ptr + (offset << 2)); + writel(value, ptr + (offset << 2)); } static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 592b296aab..94b6c5cab6 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -103,12 +103,12 @@ struct a6xx_gmu { static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) { - return msm_readl(gmu->mmio + (offset << 2)); + return readl(gmu->mmio + (offset << 2)); } static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value) { - msm_writel(value, gmu->mmio + (offset << 2)); + writel(value, gmu->mmio + (offset << 2)); } static inline void @@ -131,8 +131,8 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi) { u64 val; - val = (u64) msm_readl(gmu->mmio + (lo << 2)); - val |= ((u64) msm_readl(gmu->mmio + (hi << 2)) << 32); + val = (u64) readl(gmu->mmio + (lo << 2)); + val |= ((u64) readl(gmu->mmio + (hi << 2)) << 32); return val; } @@ -143,12 +143,12 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi) static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset) { - return msm_readl(gmu->rscc + (offset << 2)); + return readl(gmu->rscc + (offset << 2)); } static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value) { - msm_writel(value, gmu->rscc + (offset << 2)); + writel(value, gmu->rscc + (offset << 2)); } #define gmu_poll_timeout_rscc(gmu, addr, val, cond, interval, timeout) \ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h deleted file mode 100644 index 9d7f939293..0000000000 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h +++ /dev/null @@ -1,422 +0,0 @@ -#ifndef A6XX_GMU_XML -#define A6XX_GMU_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng gen_header.py tool in this git repository: -http://gitlab.freedesktop.org/mesa/mesa/ -git clone https://gitlab.freedesktop.org/mesa/mesa.git - -The rules-ng-ng source files this header was generated from are: - -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11820 bytes, from Fri Jun 2 14:59:26 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023) - -Copyright (C) 2013-2024 by the following authors: -- Rob Clark Rob Clark -- Ilia Mirkin Ilia Mirkin - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -*/ - -#ifdef __KERNEL__ -#include -#define assert(x) BUG_ON(!(x)) -#else -#include -#endif - -#ifdef __cplusplus -#define __struct_cast(X) -#else -#define __struct_cast(X) (struct X) -#endif - -#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB 0x00800000 -#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB 0x40000000 - -#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK 0x00400000 -#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK 0x40000000 -#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK 0x40000000 -#define A6XX_GMU_OOB_DCVS_SET_MASK 0x00800000 -#define A6XX_GMU_OOB_DCVS_CHECK_MASK 0x80000000 -#define A6XX_GMU_OOB_DCVS_CLEAR_MASK 0x80000000 -#define A6XX_GMU_OOB_GPU_SET_MASK 0x00040000 -#define A6XX_GMU_OOB_GPU_CHECK_MASK 0x04000000 -#define A6XX_GMU_OOB_GPU_CLEAR_MASK 0x04000000 -#define A6XX_GMU_OOB_PERFCNTR_SET_MASK 0x00020000 -#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK 0x02000000 -#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK 0x02000000 - -#define A6XX_HFI_IRQ_MSGQ_MASK 0x00000001 -#define A6XX_HFI_IRQ_DSGQ_MASK 0x00000002 -#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK 0x00000004 -#define A6XX_HFI_IRQ_CM3_FAULT_MASK 0x00800000 -#define A6XX_HFI_IRQ_GMU_ERR_MASK__MASK 0x007f0000 -#define A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT 16 -static inline uint32_t A6XX_HFI_IRQ_GMU_ERR_MASK(uint32_t val) -{ - return ((val) << A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT) & A6XX_HFI_IRQ_GMU_ERR_MASK__MASK; -} -#define A6XX_HFI_IRQ_OOB_MASK__MASK 0xff000000 -#define A6XX_HFI_IRQ_OOB_MASK__SHIFT 24 -static inline uint32_t A6XX_HFI_IRQ_OOB_MASK(uint32_t val) -{ - return ((val) << A6XX_HFI_IRQ_OOB_MASK__SHIFT) & A6XX_HFI_IRQ_OOB_MASK__MASK; -} - -#define A6XX_HFI_H2F_IRQ_MASK_BIT 0x00000001 - -#define REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL 0x00000080 - -#define REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL 0x00000081 - -#define REG_A6XX_GMU_CM3_ITCM_START 0x00000c00 - -#define REG_A6XX_GMU_CM3_DTCM_START 0x00001c00 - -#define REG_A6XX_GMU_NMI_CONTROL_STATUS 0x000023f0 - -#define REG_A6XX_GMU_BOOT_SLUMBER_OPTION 0x000023f8 - -#define REG_A6XX_GMU_GX_VOTE_IDX 0x000023f9 - -#define REG_A6XX_GMU_MX_VOTE_IDX 0x000023fa - -#define REG_A6XX_GMU_DCVS_ACK_OPTION 0x000023fc - -#define REG_A6XX_GMU_DCVS_PERF_SETTING 0x000023fd - -#define REG_A6XX_GMU_DCVS_BW_SETTING 0x000023fe - -#define REG_A6XX_GMU_DCVS_RETURN 0x000023ff - -#define REG_A6XX_GMU_ICACHE_CONFIG 0x00004c00 - -#define REG_A6XX_GMU_DCACHE_CONFIG 0x00004c01 - -#define REG_A6XX_GMU_SYS_BUS_CONFIG 0x00004c0f - -#define REG_A6XX_GMU_CM3_SYSRESET 0x00005000 - -#define REG_A6XX_GMU_CM3_BOOT_CONFIG 0x00005001 - -#define REG_A6XX_GMU_CM3_FW_BUSY 0x0000501a - -#define REG_A6XX_GMU_CM3_FW_INIT_RESULT 0x0000501c - -#define REG_A6XX_GMU_CM3_CFG 0x0000502d - -#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE 0x00005040 - -#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0 0x00005041 - -#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1 0x00005042 - -#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L 0x00005044 - -#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H 0x00005045 - -#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L 0x00005046 - -#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H 0x00005047 - -#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L 0x00005048 - -#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H 0x00005049 - -#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L 0x0000504a - -#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H 0x0000504b - -#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L 0x0000504c - -#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H 0x0000504d - -#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L 0x0000504e - -#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H 0x0000504f - -#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL 0x000050c0 -#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE 0x00000001 -#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE 0x00000002 -#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE 0x00000004 -#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK 0x00003c00 -#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT 10 -static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS(uint32_t val) -{ - return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK; -} -#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK 0xffffc000 -#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT 14 -static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH(uint32_t val) -{ - return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK; -} - -#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST 0x000050c1 - -#define REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST 0x000050c2 - -#define REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS 0x000050d0 -#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_OFF 0x00000001 -#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_ON 0x00000002 -#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000004 -#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000008 -#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF 0x00000010 -#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GMU_UP_POWER_STATE 0x00000020 -#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF 0x00000040 -#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF 0x00000080 - -#define REG_A6XX_GMU_GPU_NAP_CTRL 0x000050e4 -#define A6XX_GMU_GPU_NAP_CTRL_HW_NAP_ENABLE 0x00000001 -#define A6XX_GMU_GPU_NAP_CTRL_SID__MASK 0x000001f0 -#define A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT 4 -static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val) -{ - return ((val) << A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT) & A6XX_GMU_GPU_NAP_CTRL_SID__MASK; -} - -#define REG_A6XX_GMU_RPMH_CTRL 0x000050e8 -#define A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE 0x00000001 -#define A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE 0x00000010 -#define A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE 0x00000100 -#define A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE 0x00000200 -#define A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE 0x00000400 -#define A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE 0x00000800 -#define A6XX_GMU_RPMH_CTRL_DDR_MIN_VOTE_ENABLE 0x00001000 -#define A6XX_GMU_RPMH_CTRL_MX_MIN_VOTE_ENABLE 0x00002000 -#define A6XX_GMU_RPMH_CTRL_CX_MIN_VOTE_ENABLE 0x00004000 -#define A6XX_GMU_RPMH_CTRL_GFX_MIN_VOTE_ENABLE 0x00008000 - -#define REG_A6XX_GMU_RPMH_HYST_CTRL 0x000050e9 - -#define REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x000050ec - -#define REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF 0x000050f0 - -#define REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF 0x000050f1 - -#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG 0x00005100 - -#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP 0x00005101 - -#define REG_A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x000051f0 - -#define REG_A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x00005157 - -#define REG_A6XX_GMU_LLM_GLM_SLEEP_STATUS 0x00005158 - -#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_L 0x00005088 - -#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_H 0x00005089 - -#define REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE 0x000050c3 - -#define REG_A6XX_GMU_HFI_CTRL_STATUS 0x00005180 - -#define REG_A6XX_GMU_HFI_VERSION_INFO 0x00005181 - -#define REG_A6XX_GMU_HFI_SFR_ADDR 0x00005182 - -#define REG_A6XX_GMU_HFI_MMAP_ADDR 0x00005183 - -#define REG_A6XX_GMU_HFI_QTBL_INFO 0x00005184 - -#define REG_A6XX_GMU_HFI_QTBL_ADDR 0x00005185 - -#define REG_A6XX_GMU_HFI_CTRL_INIT 0x00005186 - -#define REG_A6XX_GMU_GMU2HOST_INTR_SET 0x00005190 - -#define REG_A6XX_GMU_GMU2HOST_INTR_CLR 0x00005191 - -#define REG_A6XX_GMU_GMU2HOST_INTR_INFO 0x00005192 -#define A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ 0x00000001 -#define A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT 0x00800000 - -#define REG_A6XX_GMU_GMU2HOST_INTR_MASK 0x00005193 - -#define REG_A6XX_GMU_HOST2GMU_INTR_SET 0x00005194 - -#define REG_A6XX_GMU_HOST2GMU_INTR_CLR 0x00005195 - -#define REG_A6XX_GMU_HOST2GMU_INTR_RAW_INFO 0x00005196 - -#define REG_A6XX_GMU_HOST2GMU_INTR_EN_0 0x00005197 - -#define REG_A6XX_GMU_HOST2GMU_INTR_EN_1 0x00005198 - -#define REG_A6XX_GMU_HOST2GMU_INTR_EN_2 0x00005199 - -#define REG_A6XX_GMU_HOST2GMU_INTR_EN_3 0x0000519a - -#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_0 0x0000519b - -#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_1 0x0000519c - -#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_2 0x0000519d - -#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_3 0x0000519e - -#define REG_A6XX_GMU_GENERAL_0 0x000051c5 - -#define REG_A6XX_GMU_GENERAL_1 0x000051c6 - -#define REG_A6XX_GMU_GENERAL_6 0x000051cb - -#define REG_A6XX_GMU_GENERAL_7 0x000051cc - -#define REG_A7XX_GMU_GENERAL_8 0x000051cd - -#define REG_A7XX_GMU_GENERAL_9 0x000051ce - -#define REG_A7XX_GMU_GENERAL_10 0x000051cf - -#define REG_A6XX_GMU_ISENSE_CTRL 0x0000515d - -#define REG_A6XX_GPU_CS_ENABLE_REG 0x00008920 - -#define REG_A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL 0x0000515d - -#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL3 0x00008578 - -#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL2 0x00008558 - -#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_0 0x00008580 - -#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_2 0x00027ada - -#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a - -#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x00008957 - -#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a - -#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000881d - -#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000881f - -#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x00008821 - -#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965 - -#define REG_A6XX_GPU_CS_AMP_PERIOD_CTRL 0x0000896d - -#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965 - -#define REG_A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD 0x0000514d - -#define REG_A6XX_GMU_AO_INTERRUPT_EN 0x00009303 - -#define REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR 0x00009304 - -#define REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS 0x00009305 -#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE 0x00000001 -#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_RSCC_COMP 0x00000002 -#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_VDROOP 0x00000004 -#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR 0x00000008 -#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_DBD_WAKEUP 0x00000010 -#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR 0x00000020 - -#define REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK 0x00009306 - -#define REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL 0x00009309 - -#define REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL 0x0000930a - -#define REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL 0x0000930b - -#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS 0x0000930c -#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB 0x00800000 - -#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2 0x0000930d - -#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK 0x0000930e - -#define REG_A6XX_GMU_AO_AHB_FENCE_CTRL 0x00009310 - -#define REG_A6XX_GMU_AHB_FENCE_STATUS 0x00009313 - -#define REG_A6XX_GMU_AHB_FENCE_STATUS_CLR 0x00009314 - -#define REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x00009315 - -#define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316 - -#define REG_A6XX_GMU_RSCC_CONTROL_REQ 0x00009307 - -#define REG_A6XX_GMU_RSCC_CONTROL_ACK 0x00009308 - -#define REG_A6XX_GMU_AHB_FENCE_RANGE_0 0x00009311 - -#define REG_A6XX_GMU_AHB_FENCE_RANGE_1 0x00009312 - -#define REG_A6XX_GPU_CC_GX_GDSCR 0x00009c03 - -#define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42 - -#define REG_A6XX_GPU_CPR_FSM_CTL 0x0000c001 - -#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00000004 - -#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00000008 - -#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00000009 - -#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x0000000a - -#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x0000000b - -#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x0000000d - -#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x0000000e - -#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00000082 - -#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00000083 - -#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00000089 - -#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x0000008c - -#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00000100 - -#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00000101 - -#define REG_A7XX_RSCC_SEQ_MEM_0_DRV0_A740 0x00000154 - -#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00000180 - -#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00000346 - -#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000003ee - -#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00000496 - -#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000053e - -#ifdef __cplusplus -#endif - -#endif /* A6XX_GMU_XML */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 7b72327df7..5383aff848 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1255,8 +1255,9 @@ static const u32 a730_protect[] = { A6XX_PROTECT_NORDWR(0x00699, 0x01e9), A6XX_PROTECT_NORDWR(0x008a0, 0x0008), A6XX_PROTECT_NORDWR(0x008ab, 0x0024), - /* 0x008d0-0x008dd are unprotected on purpose for tools like perfetto */ - A6XX_PROTECT_RDONLY(0x008de, 0x0154), + /* 0x008d0-0x008dd and 0x008e0-0x008e6 are unprotected on purpose for tools like perfetto */ + A6XX_PROTECT_NORDWR(0x008de, 0x0001), + A6XX_PROTECT_RDONLY(0x008e7, 0x014b), A6XX_PROTECT_NORDWR(0x00900, 0x004d), A6XX_PROTECT_NORDWR(0x0098d, 0x00b2), A6XX_PROTECT_NORDWR(0x00a41, 0x01be), @@ -1291,8 +1292,7 @@ static const u32 a730_protect[] = { A6XX_PROTECT_RDONLY(0x1f844, 0x007b), A6XX_PROTECT_NORDWR(0x1f860, 0x0000), A6XX_PROTECT_NORDWR(0x1f878, 0x002a), - /* CP_PROTECT_REG[44, 46] are left untouched! */ - 0, + /* CP_PROTECT_REG[45, 46] are left untouched! */ 0, 0, A6XX_PROTECT_NORDWR(0x1f8c0, 0x00000), @@ -1409,7 +1409,7 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) if (adreno_is_a702(gpu)) { gpu->ubwc_config.highest_bank_bit = 14; gpu->ubwc_config.min_acc_len = 1; - gpu->ubwc_config.ubwc_mode = 2; + gpu->ubwc_config.ubwc_mode = 0; } } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 34822b0807..8917032b75 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -69,12 +69,12 @@ static inline void a6xx_llc_rmw(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 mask, u3 static inline u32 a6xx_llc_read(struct a6xx_gpu *a6xx_gpu, u32 reg) { - return msm_readl(a6xx_gpu->llc_mmio + (reg << 2)); + return readl(a6xx_gpu->llc_mmio + (reg << 2)); } static inline void a6xx_llc_write(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 value) { - msm_writel(value, a6xx_gpu->llc_mmio + (reg << 2)); + writel(value, a6xx_gpu->llc_mmio + (reg << 2)); } #define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index a847a0f7a7..789a11416f 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -8,20 +8,20 @@ #include "a6xx_gpu_state.h" #include "a6xx_gmu.xml.h" -/* Ignore diagnostics about register tables that we aren't using yet. We don't - * want to modify these headers too much from their original source. - */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-variable" +static const unsigned int *gen7_0_0_external_core_regs[] __always_unused; +static const unsigned int *gen7_2_0_external_core_regs[] __always_unused; +static const unsigned int *gen7_9_0_external_core_regs[] __always_unused; +static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused; +static const u32 gen7_9_0_cx_debugbus_blocks[] __always_unused; #include "adreno_gen7_0_0_snapshot.h" #include "adreno_gen7_2_0_snapshot.h" - -#pragma GCC diagnostic pop +#include "adreno_gen7_9_0_snapshot.h" struct a6xx_gpu_state_obj { const void *handle; u32 *data; + u32 count; /* optional, used when count potentially read from hw */ }; struct a6xx_gpu_state { @@ -192,10 +192,10 @@ static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset, } #define cxdbg_write(ptr, offset, val) \ - msm_writel((val), (ptr) + ((offset) << 2)) + writel((val), (ptr) + ((offset) << 2)) #define cxdbg_read(ptr, offset) \ - msm_readl((ptr) + ((offset) << 2)) + readl((ptr) + ((offset) << 2)) /* read a value from the CX debug bus */ static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset, @@ -384,21 +384,29 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu, struct a6xx_gpu_state *a6xx_state) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - int debugbus_blocks_count, total_debugbus_blocks; - const u32 *debugbus_blocks; + int debugbus_blocks_count, gbif_debugbus_blocks_count, total_debugbus_blocks; + const u32 *debugbus_blocks, *gbif_debugbus_blocks; int i; if (adreno_is_a730(adreno_gpu)) { debugbus_blocks = gen7_0_0_debugbus_blocks; debugbus_blocks_count = ARRAY_SIZE(gen7_0_0_debugbus_blocks); - } else { - BUG_ON(!adreno_is_a740_family(adreno_gpu)); + gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks; + gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks); + } else if (adreno_is_a740_family(adreno_gpu)) { debugbus_blocks = gen7_2_0_debugbus_blocks; debugbus_blocks_count = ARRAY_SIZE(gen7_2_0_debugbus_blocks); + gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks; + gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks); + } else { + BUG_ON(!adreno_is_a750(adreno_gpu)); + debugbus_blocks = gen7_9_0_debugbus_blocks; + debugbus_blocks_count = ARRAY_SIZE(gen7_9_0_debugbus_blocks); + gbif_debugbus_blocks = gen7_9_0_gbif_debugbus_blocks; + gbif_debugbus_blocks_count = ARRAY_SIZE(gen7_9_0_gbif_debugbus_blocks); } - total_debugbus_blocks = debugbus_blocks_count + - ARRAY_SIZE(a7xx_gbif_debugbus_blocks); + total_debugbus_blocks = debugbus_blocks_count + gbif_debugbus_blocks_count; a6xx_state->debugbus = state_kcalloc(a6xx_state, total_debugbus_blocks, sizeof(*a6xx_state->debugbus)); @@ -410,9 +418,9 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu, &a6xx_state->debugbus[i]); } - for (i = 0; i < ARRAY_SIZE(a7xx_gbif_debugbus_blocks); i++) { + for (i = 0; i < gbif_debugbus_blocks_count; i++) { a6xx_get_debugbus_block(gpu, - a6xx_state, &a7xx_gbif_debugbus_blocks[i], + a6xx_state, &a7xx_debugbus_blocks[gbif_debugbus_blocks[i]], &a6xx_state->debugbus[i + debugbus_blocks_count]); } } @@ -813,10 +821,13 @@ static void a7xx_get_clusters(struct msm_gpu *gpu, if (adreno_is_a730(adreno_gpu)) { clusters = gen7_0_0_clusters; clusters_size = ARRAY_SIZE(gen7_0_0_clusters); - } else { - BUG_ON(!adreno_is_a740_family(adreno_gpu)); + } else if (adreno_is_a740_family(adreno_gpu)) { clusters = gen7_2_0_clusters; clusters_size = ARRAY_SIZE(gen7_2_0_clusters); + } else { + BUG_ON(!adreno_is_a750(adreno_gpu)); + clusters = gen7_9_0_clusters; + clusters_size = ARRAY_SIZE(gen7_9_0_clusters); } a6xx_state->clusters = state_kcalloc(a6xx_state, @@ -948,10 +959,13 @@ static void a7xx_get_shaders(struct msm_gpu *gpu, if (adreno_is_a730(adreno_gpu)) { shader_blocks = gen7_0_0_shader_blocks; num_shader_blocks = ARRAY_SIZE(gen7_0_0_shader_blocks); - } else { - BUG_ON(!adreno_is_a740_family(adreno_gpu)); + } else if (adreno_is_a740_family(adreno_gpu)) { shader_blocks = gen7_2_0_shader_blocks; num_shader_blocks = ARRAY_SIZE(gen7_2_0_shader_blocks); + } else { + BUG_ON(!adreno_is_a750(adreno_gpu)); + shader_blocks = gen7_9_0_shader_blocks; + num_shader_blocks = ARRAY_SIZE(gen7_9_0_shader_blocks); } a6xx_state->shaders = state_kcalloc(a6xx_state, @@ -1337,10 +1351,13 @@ static void a7xx_get_registers(struct msm_gpu *gpu, if (adreno_is_a730(adreno_gpu)) { reglist = gen7_0_0_reg_list; pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers; - } else { - BUG_ON(!adreno_is_a740_family(adreno_gpu)); + } else if (adreno_is_a740_family(adreno_gpu)) { reglist = gen7_2_0_reg_list; pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers; + } else { + BUG_ON(!adreno_is_a750(adreno_gpu)); + reglist = gen7_9_0_reg_list; + pre_crashdumper_regs = gen7_9_0_pre_crashdumper_gpu_registers; } count = A7XX_PRE_CRASHDUMPER_SIZE + A7XX_POST_CRASHDUMPER_SIZE; @@ -1388,7 +1405,8 @@ static void a7xx_get_post_crashdumper_registers(struct msm_gpu *gpu, struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); const u32 *regs; - BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu))); + BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu) || + adreno_is_a750(adreno_gpu))); regs = gen7_0_0_post_crashdumper_registers; a7xx_get_ahb_gpu_registers(gpu, @@ -1417,16 +1435,18 @@ static u32 a7xx_get_cp_roq_size(struct msm_gpu *gpu) /* Read a block of data from an indexed register pair */ static void a6xx_get_indexed_regs(struct msm_gpu *gpu, struct a6xx_gpu_state *a6xx_state, - struct a6xx_indexed_registers *indexed, + const struct a6xx_indexed_registers *indexed, struct a6xx_gpu_state_obj *obj) { + u32 count = indexed->count; int i; obj->handle = (const void *) indexed; if (indexed->count_fn) - indexed->count = indexed->count_fn(gpu); + count = indexed->count_fn(gpu); - obj->data = state_kcalloc(a6xx_state, indexed->count, sizeof(u32)); + obj->data = state_kcalloc(a6xx_state, count, sizeof(u32)); + obj->count = count; if (!obj->data) return; @@ -1434,7 +1454,7 @@ static void a6xx_get_indexed_regs(struct msm_gpu *gpu, gpu_write(gpu, indexed->addr, 0); /* Read the data - each read increments the internal address by 1 */ - for (i = 0; i < indexed->count; i++) + for (i = 0; i < count; i++) obj->data[i] = gpu_read(gpu, indexed->data); } @@ -1491,10 +1511,18 @@ static void a7xx_get_indexed_registers(struct msm_gpu *gpu, struct a6xx_gpu_state *a6xx_state) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + const struct a6xx_indexed_registers *indexed_regs; int i, indexed_count, mempool_count; - BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu))); - indexed_count = ARRAY_SIZE(a7xx_indexed_reglist); + if (adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)) { + indexed_regs = a7xx_indexed_reglist; + indexed_count = ARRAY_SIZE(a7xx_indexed_reglist); + } else { + BUG_ON(!adreno_is_a750(adreno_gpu)); + indexed_regs = gen7_9_0_cp_indexed_reg_list; + indexed_count = ARRAY_SIZE(gen7_9_0_cp_indexed_reg_list); + } + mempool_count = ARRAY_SIZE(a7xx_cp_bv_mempool_indexed); a6xx_state->indexed_regs = state_kcalloc(a6xx_state, @@ -1507,7 +1535,7 @@ static void a7xx_get_indexed_registers(struct msm_gpu *gpu, /* First read the common regs */ for (i = 0; i < indexed_count; i++) - a6xx_get_indexed_regs(gpu, a6xx_state, &a7xx_indexed_reglist[i], + a6xx_get_indexed_regs(gpu, a6xx_state, &indexed_regs[i], &a6xx_state->indexed_regs[i]); gpu_rmw(gpu, REG_A6XX_CP_CHICKEN_DBG, 0, BIT(2)); @@ -1862,9 +1890,9 @@ static void a6xx_show_indexed_regs(struct a6xx_gpu_state_obj *obj, return; print_name(p, " - regs-name: ", indexed->name); - drm_printf(p, " dwords: %d\n", indexed->count); + drm_printf(p, " dwords: %d\n", obj->count); - print_ascii85(p, indexed->count << 2, obj->data); + print_ascii85(p, obj->count << 2, obj->data); } static void a6xx_show_debugbus_block(const struct a6xx_debugbus_block *block, diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h index 5ddd32063b..dd4c28a8d9 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h @@ -397,7 +397,7 @@ struct a6xx_indexed_registers { u32 (*count_fn)(struct msm_gpu *gpu); }; -static struct a6xx_indexed_registers a6xx_indexed_reglist[] = { +static const struct a6xx_indexed_registers a6xx_indexed_reglist[] = { { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR, REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL }, { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR, @@ -408,7 +408,7 @@ static struct a6xx_indexed_registers a6xx_indexed_reglist[] = { REG_A6XX_CP_ROQ_DBG_DATA, 0, a6xx_get_cp_roq_size}, }; -static struct a6xx_indexed_registers a7xx_indexed_reglist[] = { +static const struct a6xx_indexed_registers a7xx_indexed_reglist[] = { { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR, REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL }, { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR, @@ -433,12 +433,12 @@ static struct a6xx_indexed_registers a7xx_indexed_reglist[] = { REG_A6XX_CP_ROQ_DBG_DATA, 0, a7xx_get_cp_roq_size }, }; -static struct a6xx_indexed_registers a6xx_cp_mempool_indexed = { +static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = { "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR, REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, NULL, }; -static struct a6xx_indexed_registers a7xx_cp_bv_mempool_indexed[] = { +static const struct a6xx_indexed_registers a7xx_cp_bv_mempool_indexed[] = { { "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR, REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2100, NULL }, { "CP_BV_MEMPOOL", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR, @@ -517,9 +517,9 @@ static const struct a6xx_debugbus_block a650_debugbus_blocks[] = { DEBUGBUS(A6XX_DBGBUS_SPTP_5, 0x100), }; -static const struct a6xx_debugbus_block a7xx_gbif_debugbus_blocks[] = { - DEBUGBUS(A7XX_DBGBUS_GBIF_CX, 0x100), - DEBUGBUS(A7XX_DBGBUS_GBIF_GX, 0x100), +static const u32 a7xx_gbif_debugbus_blocks[] = { + A7XX_DBGBUS_GBIF_CX, + A7XX_DBGBUS_GBIF_GX, }; static const struct a6xx_debugbus_block a7xx_cx_debugbus_blocks[] = { diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h deleted file mode 100644 index fbc27930e5..0000000000 --- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h +++ /dev/null @@ -1,539 +0,0 @@ -#ifndef ADRENO_COMMON_XML -#define ADRENO_COMMON_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng gen_header.py tool in this git repository: -http://gitlab.freedesktop.org/mesa/mesa/ -git clone https://gitlab.freedesktop.org/mesa/mesa.git - -The rules-ng-ng source files this header was generated from are: - -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023) -*/ - -#ifdef __KERNEL__ -#include -#define assert(x) BUG_ON(!(x)) -#else -#include -#endif - -#ifdef __cplusplus -#define __struct_cast(X) -#else -#define __struct_cast(X) (struct X) -#endif - -enum chip { - A2XX = 2, - A3XX = 3, - A4XX = 4, - A5XX = 5, - A6XX = 6, - A7XX = 7, -}; - -enum adreno_pa_su_sc_draw { - PC_DRAW_POINTS = 0, - PC_DRAW_LINES = 1, - PC_DRAW_TRIANGLES = 2, -}; - -enum adreno_compare_func { - FUNC_NEVER = 0, - FUNC_LESS = 1, - FUNC_EQUAL = 2, - FUNC_LEQUAL = 3, - FUNC_GREATER = 4, - FUNC_NOTEQUAL = 5, - FUNC_GEQUAL = 6, - FUNC_ALWAYS = 7, -}; - -enum adreno_stencil_op { - STENCIL_KEEP = 0, - STENCIL_ZERO = 1, - STENCIL_REPLACE = 2, - STENCIL_INCR_CLAMP = 3, - STENCIL_DECR_CLAMP = 4, - STENCIL_INVERT = 5, - STENCIL_INCR_WRAP = 6, - STENCIL_DECR_WRAP = 7, -}; - -enum adreno_rb_blend_factor { - FACTOR_ZERO = 0, - FACTOR_ONE = 1, - FACTOR_SRC_COLOR = 4, - FACTOR_ONE_MINUS_SRC_COLOR = 5, - FACTOR_SRC_ALPHA = 6, - FACTOR_ONE_MINUS_SRC_ALPHA = 7, - FACTOR_DST_COLOR = 8, - FACTOR_ONE_MINUS_DST_COLOR = 9, - FACTOR_DST_ALPHA = 10, - FACTOR_ONE_MINUS_DST_ALPHA = 11, - FACTOR_CONSTANT_COLOR = 12, - FACTOR_ONE_MINUS_CONSTANT_COLOR = 13, - FACTOR_CONSTANT_ALPHA = 14, - FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15, - FACTOR_SRC_ALPHA_SATURATE = 16, - FACTOR_SRC1_COLOR = 20, - FACTOR_ONE_MINUS_SRC1_COLOR = 21, - FACTOR_SRC1_ALPHA = 22, - FACTOR_ONE_MINUS_SRC1_ALPHA = 23, -}; - -enum adreno_rb_surface_endian { - ENDIAN_NONE = 0, - ENDIAN_8IN16 = 1, - ENDIAN_8IN32 = 2, - ENDIAN_16IN32 = 3, - ENDIAN_8IN64 = 4, - ENDIAN_8IN128 = 5, -}; - -enum adreno_rb_dither_mode { - DITHER_DISABLE = 0, - DITHER_ALWAYS = 1, - DITHER_IF_ALPHA_OFF = 2, -}; - -enum adreno_rb_depth_format { - DEPTHX_16 = 0, - DEPTHX_24_8 = 1, - DEPTHX_32 = 2, -}; - -enum adreno_rb_copy_control_mode { - RB_COPY_RESOLVE = 1, - RB_COPY_CLEAR = 2, - RB_COPY_DEPTH_STENCIL = 5, -}; - -enum a3xx_rop_code { - ROP_CLEAR = 0, - ROP_NOR = 1, - ROP_AND_INVERTED = 2, - ROP_COPY_INVERTED = 3, - ROP_AND_REVERSE = 4, - ROP_INVERT = 5, - ROP_XOR = 6, - ROP_NAND = 7, - ROP_AND = 8, - ROP_EQUIV = 9, - ROP_NOOP = 10, - ROP_OR_INVERTED = 11, - ROP_COPY = 12, - ROP_OR_REVERSE = 13, - ROP_OR = 14, - ROP_SET = 15, -}; - -enum a3xx_render_mode { - RB_RENDERING_PASS = 0, - RB_TILING_PASS = 1, - RB_RESOLVE_PASS = 2, - RB_COMPUTE_PASS = 3, -}; - -enum a3xx_msaa_samples { - MSAA_ONE = 0, - MSAA_TWO = 1, - MSAA_FOUR = 2, - MSAA_EIGHT = 3, -}; - -enum a3xx_threadmode { - MULTI = 0, - SINGLE = 1, -}; - -enum a3xx_instrbuffermode { - CACHE = 0, - BUFFER = 1, -}; - -enum a3xx_threadsize { - TWO_QUADS = 0, - FOUR_QUADS = 1, -}; - -enum a3xx_color_swap { - WZYX = 0, - WXYZ = 1, - ZYXW = 2, - XYZW = 3, -}; - -enum a3xx_rb_blend_opcode { - BLEND_DST_PLUS_SRC = 0, - BLEND_SRC_MINUS_DST = 1, - BLEND_DST_MINUS_SRC = 2, - BLEND_MIN_DST_SRC = 3, - BLEND_MAX_DST_SRC = 4, -}; - -enum a4xx_tess_spacing { - EQUAL_SPACING = 0, - ODD_SPACING = 2, - EVEN_SPACING = 3, -}; - -enum a5xx_address_mode { - ADDR_32B = 0, - ADDR_64B = 1, -}; - -enum a5xx_line_mode { - BRESENHAM = 0, - RECTANGULAR = 1, -}; - -enum a6xx_tex_prefetch_cmd { - TEX_PREFETCH_UNK0 = 0, - TEX_PREFETCH_SAM = 1, - TEX_PREFETCH_GATHER4R = 2, - TEX_PREFETCH_GATHER4G = 3, - TEX_PREFETCH_GATHER4B = 4, - TEX_PREFETCH_GATHER4A = 5, - TEX_PREFETCH_UNK6 = 6, - TEX_PREFETCH_UNK7 = 7, -}; - -#define REG_AXXX_CP_RB_BASE 0x000001c0 - -#define REG_AXXX_CP_RB_CNTL 0x000001c1 -#define AXXX_CP_RB_CNTL_BUFSZ__MASK 0x0000003f -#define AXXX_CP_RB_CNTL_BUFSZ__SHIFT 0 -static inline uint32_t AXXX_CP_RB_CNTL_BUFSZ(uint32_t val) -{ - return ((val) << AXXX_CP_RB_CNTL_BUFSZ__SHIFT) & AXXX_CP_RB_CNTL_BUFSZ__MASK; -} -#define AXXX_CP_RB_CNTL_BLKSZ__MASK 0x00003f00 -#define AXXX_CP_RB_CNTL_BLKSZ__SHIFT 8 -static inline uint32_t AXXX_CP_RB_CNTL_BLKSZ(uint32_t val) -{ - return ((val) << AXXX_CP_RB_CNTL_BLKSZ__SHIFT) & AXXX_CP_RB_CNTL_BLKSZ__MASK; -} -#define AXXX_CP_RB_CNTL_BUF_SWAP__MASK 0x00030000 -#define AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT 16 -static inline uint32_t AXXX_CP_RB_CNTL_BUF_SWAP(uint32_t val) -{ - return ((val) << AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT) & AXXX_CP_RB_CNTL_BUF_SWAP__MASK; -} -#define AXXX_CP_RB_CNTL_POLL_EN 0x00100000 -#define AXXX_CP_RB_CNTL_NO_UPDATE 0x08000000 -#define AXXX_CP_RB_CNTL_RPTR_WR_EN 0x80000000 - -#define REG_AXXX_CP_RB_RPTR_ADDR 0x000001c3 -#define AXXX_CP_RB_RPTR_ADDR_SWAP__MASK 0x00000003 -#define AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT 0 -static inline uint32_t AXXX_CP_RB_RPTR_ADDR_SWAP(uint32_t val) -{ - return ((val) << AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT) & AXXX_CP_RB_RPTR_ADDR_SWAP__MASK; -} -#define AXXX_CP_RB_RPTR_ADDR_ADDR__MASK 0xfffffffc -#define AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT 2 -static inline uint32_t AXXX_CP_RB_RPTR_ADDR_ADDR(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT) & AXXX_CP_RB_RPTR_ADDR_ADDR__MASK; -} - -#define REG_AXXX_CP_RB_RPTR 0x000001c4 - -#define REG_AXXX_CP_RB_WPTR 0x000001c5 - -#define REG_AXXX_CP_RB_WPTR_DELAY 0x000001c6 - -#define REG_AXXX_CP_RB_RPTR_WR 0x000001c7 - -#define REG_AXXX_CP_RB_WPTR_BASE 0x000001c8 - -#define REG_AXXX_CP_QUEUE_THRESHOLDS 0x000001d5 -#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK 0x0000000f -#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT 0 -static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(uint32_t val) -{ - return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK; -} -#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK 0x00000f00 -#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT 8 -static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(uint32_t val) -{ - return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK; -} -#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK 0x000f0000 -#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT 16 -static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val) -{ - return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK; -} - -#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6 -#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK 0x001f0000 -#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT 16 -static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_MEQ_END(uint32_t val) -{ - return ((val) << AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK; -} -#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK 0x1f000000 -#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT 24 -static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_ROQ_END(uint32_t val) -{ - return ((val) << AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK; -} - -#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7 -#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f -#define AXXX_CP_CSQ_AVAIL_RING__SHIFT 0 -static inline uint32_t AXXX_CP_CSQ_AVAIL_RING(uint32_t val) -{ - return ((val) << AXXX_CP_CSQ_AVAIL_RING__SHIFT) & AXXX_CP_CSQ_AVAIL_RING__MASK; -} -#define AXXX_CP_CSQ_AVAIL_IB1__MASK 0x00007f00 -#define AXXX_CP_CSQ_AVAIL_IB1__SHIFT 8 -static inline uint32_t AXXX_CP_CSQ_AVAIL_IB1(uint32_t val) -{ - return ((val) << AXXX_CP_CSQ_AVAIL_IB1__SHIFT) & AXXX_CP_CSQ_AVAIL_IB1__MASK; -} -#define AXXX_CP_CSQ_AVAIL_IB2__MASK 0x007f0000 -#define AXXX_CP_CSQ_AVAIL_IB2__SHIFT 16 -static inline uint32_t AXXX_CP_CSQ_AVAIL_IB2(uint32_t val) -{ - return ((val) << AXXX_CP_CSQ_AVAIL_IB2__SHIFT) & AXXX_CP_CSQ_AVAIL_IB2__MASK; -} - -#define REG_AXXX_CP_STQ_AVAIL 0x000001d8 -#define AXXX_CP_STQ_AVAIL_ST__MASK 0x0000007f -#define AXXX_CP_STQ_AVAIL_ST__SHIFT 0 -static inline uint32_t AXXX_CP_STQ_AVAIL_ST(uint32_t val) -{ - return ((val) << AXXX_CP_STQ_AVAIL_ST__SHIFT) & AXXX_CP_STQ_AVAIL_ST__MASK; -} - -#define REG_AXXX_CP_MEQ_AVAIL 0x000001d9 -#define AXXX_CP_MEQ_AVAIL_MEQ__MASK 0x0000001f -#define AXXX_CP_MEQ_AVAIL_MEQ__SHIFT 0 -static inline uint32_t AXXX_CP_MEQ_AVAIL_MEQ(uint32_t val) -{ - return ((val) << AXXX_CP_MEQ_AVAIL_MEQ__SHIFT) & AXXX_CP_MEQ_AVAIL_MEQ__MASK; -} - -#define REG_AXXX_SCRATCH_UMSK 0x000001dc -#define AXXX_SCRATCH_UMSK_UMSK__MASK 0x000000ff -#define AXXX_SCRATCH_UMSK_UMSK__SHIFT 0 -static inline uint32_t AXXX_SCRATCH_UMSK_UMSK(uint32_t val) -{ - return ((val) << AXXX_SCRATCH_UMSK_UMSK__SHIFT) & AXXX_SCRATCH_UMSK_UMSK__MASK; -} -#define AXXX_SCRATCH_UMSK_SWAP__MASK 0x00030000 -#define AXXX_SCRATCH_UMSK_SWAP__SHIFT 16 -static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val) -{ - return ((val) << AXXX_SCRATCH_UMSK_SWAP__SHIFT) & AXXX_SCRATCH_UMSK_SWAP__MASK; -} - -#define REG_AXXX_SCRATCH_ADDR 0x000001dd - -#define REG_AXXX_CP_ME_RDADDR 0x000001ea - -#define REG_AXXX_CP_STATE_DEBUG_INDEX 0x000001ec - -#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed - -#define REG_AXXX_CP_INT_CNTL 0x000001f2 -#define AXXX_CP_INT_CNTL_SW_INT_MASK 0x00080000 -#define AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK 0x00800000 -#define AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK 0x01000000 -#define AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK 0x02000000 -#define AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK 0x04000000 -#define AXXX_CP_INT_CNTL_IB_ERROR_MASK 0x08000000 -#define AXXX_CP_INT_CNTL_IB2_INT_MASK 0x20000000 -#define AXXX_CP_INT_CNTL_IB1_INT_MASK 0x40000000 -#define AXXX_CP_INT_CNTL_RB_INT_MASK 0x80000000 - -#define REG_AXXX_CP_INT_STATUS 0x000001f3 - -#define REG_AXXX_CP_INT_ACK 0x000001f4 - -#define REG_AXXX_CP_ME_CNTL 0x000001f6 -#define AXXX_CP_ME_CNTL_BUSY 0x20000000 -#define AXXX_CP_ME_CNTL_HALT 0x10000000 - -#define REG_AXXX_CP_ME_STATUS 0x000001f7 - -#define REG_AXXX_CP_ME_RAM_WADDR 0x000001f8 - -#define REG_AXXX_CP_ME_RAM_RADDR 0x000001f9 - -#define REG_AXXX_CP_ME_RAM_DATA 0x000001fa - -#define REG_AXXX_CP_DEBUG 0x000001fc -#define AXXX_CP_DEBUG_PREDICATE_DISABLE 0x00800000 -#define AXXX_CP_DEBUG_PROG_END_PTR_ENABLE 0x01000000 -#define AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE 0x02000000 -#define AXXX_CP_DEBUG_PREFETCH_PASS_NOPS 0x04000000 -#define AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE 0x08000000 -#define AXXX_CP_DEBUG_PREFETCH_MATCH_DISABLE 0x10000000 -#define AXXX_CP_DEBUG_SIMPLE_ME_FLOW_CONTROL 0x40000000 -#define AXXX_CP_DEBUG_MIU_WRITE_PACK_DISABLE 0x80000000 - -#define REG_AXXX_CP_CSQ_RB_STAT 0x000001fd -#define AXXX_CP_CSQ_RB_STAT_RPTR__MASK 0x0000007f -#define AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT 0 -static inline uint32_t AXXX_CP_CSQ_RB_STAT_RPTR(uint32_t val) -{ - return ((val) << AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_RPTR__MASK; -} -#define AXXX_CP_CSQ_RB_STAT_WPTR__MASK 0x007f0000 -#define AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT 16 -static inline uint32_t AXXX_CP_CSQ_RB_STAT_WPTR(uint32_t val) -{ - return ((val) << AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_WPTR__MASK; -} - -#define REG_AXXX_CP_CSQ_IB1_STAT 0x000001fe -#define AXXX_CP_CSQ_IB1_STAT_RPTR__MASK 0x0000007f -#define AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT 0 -static inline uint32_t AXXX_CP_CSQ_IB1_STAT_RPTR(uint32_t val) -{ - return ((val) << AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_RPTR__MASK; -} -#define AXXX_CP_CSQ_IB1_STAT_WPTR__MASK 0x007f0000 -#define AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT 16 -static inline uint32_t AXXX_CP_CSQ_IB1_STAT_WPTR(uint32_t val) -{ - return ((val) << AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_WPTR__MASK; -} - -#define REG_AXXX_CP_CSQ_IB2_STAT 0x000001ff -#define AXXX_CP_CSQ_IB2_STAT_RPTR__MASK 0x0000007f -#define AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT 0 -static inline uint32_t AXXX_CP_CSQ_IB2_STAT_RPTR(uint32_t val) -{ - return ((val) << AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_RPTR__MASK; -} -#define AXXX_CP_CSQ_IB2_STAT_WPTR__MASK 0x007f0000 -#define AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT 16 -static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val) -{ - return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK; -} - -#define REG_AXXX_CP_NON_PREFETCH_CNTRS 0x00000440 - -#define REG_AXXX_CP_STQ_ST_STAT 0x00000443 - -#define REG_AXXX_CP_ST_BASE 0x0000044d - -#define REG_AXXX_CP_ST_BUFSZ 0x0000044e - -#define REG_AXXX_CP_MEQ_STAT 0x0000044f - -#define REG_AXXX_CP_MIU_TAG_STAT 0x00000452 - -#define REG_AXXX_CP_BIN_MASK_LO 0x00000454 - -#define REG_AXXX_CP_BIN_MASK_HI 0x00000455 - -#define REG_AXXX_CP_BIN_SELECT_LO 0x00000456 - -#define REG_AXXX_CP_BIN_SELECT_HI 0x00000457 - -#define REG_AXXX_CP_IB1_BASE 0x00000458 - -#define REG_AXXX_CP_IB1_BUFSZ 0x00000459 - -#define REG_AXXX_CP_IB2_BASE 0x0000045a - -#define REG_AXXX_CP_IB2_BUFSZ 0x0000045b - -#define REG_AXXX_CP_STAT 0x0000047f -#define AXXX_CP_STAT_CP_BUSY 0x80000000 -#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY 0x40000000 -#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY 0x20000000 -#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY 0x10000000 -#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY 0x08000000 -#define AXXX_CP_STAT_ME_BUSY 0x04000000 -#define AXXX_CP_STAT_MIU_WR_C_BUSY 0x02000000 -#define AXXX_CP_STAT_CP_3D_BUSY 0x00800000 -#define AXXX_CP_STAT_CP_NRT_BUSY 0x00400000 -#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY 0x00200000 -#define AXXX_CP_STAT_RCIU_ME_BUSY 0x00100000 -#define AXXX_CP_STAT_RCIU_PFP_BUSY 0x00080000 -#define AXXX_CP_STAT_MEQ_RING_BUSY 0x00040000 -#define AXXX_CP_STAT_PFP_BUSY 0x00020000 -#define AXXX_CP_STAT_ST_QUEUE_BUSY 0x00010000 -#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY 0x00002000 -#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY 0x00001000 -#define AXXX_CP_STAT_RING_QUEUE_BUSY 0x00000800 -#define AXXX_CP_STAT_CSF_BUSY 0x00000400 -#define AXXX_CP_STAT_CSF_ST_BUSY 0x00000200 -#define AXXX_CP_STAT_EVENT_BUSY 0x00000100 -#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY 0x00000080 -#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY 0x00000040 -#define AXXX_CP_STAT_CSF_RING_BUSY 0x00000020 -#define AXXX_CP_STAT_RCIU_BUSY 0x00000010 -#define AXXX_CP_STAT_RBIU_BUSY 0x00000008 -#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY 0x00000004 -#define AXXX_CP_STAT_MIU_RD_REQ_BUSY 0x00000002 -#define AXXX_CP_STAT_MIU_WR_BUSY 0x00000001 - -#define REG_AXXX_CP_SCRATCH_REG0 0x00000578 - -#define REG_AXXX_CP_SCRATCH_REG1 0x00000579 - -#define REG_AXXX_CP_SCRATCH_REG2 0x0000057a - -#define REG_AXXX_CP_SCRATCH_REG3 0x0000057b - -#define REG_AXXX_CP_SCRATCH_REG4 0x0000057c - -#define REG_AXXX_CP_SCRATCH_REG5 0x0000057d - -#define REG_AXXX_CP_SCRATCH_REG6 0x0000057e - -#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f - -#define REG_AXXX_CP_ME_VS_EVENT_SRC 0x00000600 - -#define REG_AXXX_CP_ME_VS_EVENT_ADDR 0x00000601 - -#define REG_AXXX_CP_ME_VS_EVENT_DATA 0x00000602 - -#define REG_AXXX_CP_ME_VS_EVENT_ADDR_SWM 0x00000603 - -#define REG_AXXX_CP_ME_VS_EVENT_DATA_SWM 0x00000604 - -#define REG_AXXX_CP_ME_PS_EVENT_SRC 0x00000605 - -#define REG_AXXX_CP_ME_PS_EVENT_ADDR 0x00000606 - -#define REG_AXXX_CP_ME_PS_EVENT_DATA 0x00000607 - -#define REG_AXXX_CP_ME_PS_EVENT_ADDR_SWM 0x00000608 - -#define REG_AXXX_CP_ME_PS_EVENT_DATA_SWM 0x00000609 - -#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a - -#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b - -#define REG_AXXX_CP_ME_CF_EVENT_DATA 0x0000060c - -#define REG_AXXX_CP_ME_NRT_ADDR 0x0000060d - -#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e - -#define REG_AXXX_CP_ME_VS_FETCH_DONE_SRC 0x00000612 - -#define REG_AXXX_CP_ME_VS_FETCH_DONE_ADDR 0x00000613 - -#define REG_AXXX_CP_ME_VS_FETCH_DONE_DATA 0x00000614 - -#ifdef __cplusplus -#endif - -#endif /* ADRENO_COMMON_XML */ diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h new file mode 100644 index 0000000000..260d66eccf --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h @@ -0,0 +1,1446 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef __ADRENO_GEN7_9_0_SNAPSHOT_H +#define __ADRENO_GEN7_9_0_SNAPSHOT_H + +#include "a6xx_gpu_state.h" + +static const u32 gen7_9_0_debugbus_blocks[] = { + A7XX_DBGBUS_CP_0_0, + A7XX_DBGBUS_CP_0_1, + A7XX_DBGBUS_RBBM, + A7XX_DBGBUS_HLSQ, + A7XX_DBGBUS_UCHE_0, + A7XX_DBGBUS_UCHE_1, + A7XX_DBGBUS_TESS_BR, + A7XX_DBGBUS_TESS_BV, + A7XX_DBGBUS_PC_BR, + A7XX_DBGBUS_PC_BV, + A7XX_DBGBUS_VFDP_BR, + A7XX_DBGBUS_VFDP_BV, + A7XX_DBGBUS_VPC_BR, + A7XX_DBGBUS_VPC_BV, + A7XX_DBGBUS_TSE_BR, + A7XX_DBGBUS_TSE_BV, + A7XX_DBGBUS_RAS_BR, + A7XX_DBGBUS_RAS_BV, + A7XX_DBGBUS_VSC, + A7XX_DBGBUS_COM_0, + A7XX_DBGBUS_LRZ_BR, + A7XX_DBGBUS_LRZ_BV, + A7XX_DBGBUS_UFC_0, + A7XX_DBGBUS_UFC_1, + A7XX_DBGBUS_GMU_GX, + A7XX_DBGBUS_DBGC, + A7XX_DBGBUS_GPC_BR, + A7XX_DBGBUS_GPC_BV, + A7XX_DBGBUS_LARC, + A7XX_DBGBUS_HLSQ_SPTP, + A7XX_DBGBUS_RB_0, + A7XX_DBGBUS_RB_1, + A7XX_DBGBUS_RB_2, + A7XX_DBGBUS_RB_3, + A7XX_DBGBUS_RB_4, + A7XX_DBGBUS_RB_5, + A7XX_DBGBUS_UCHE_WRAPPER, + A7XX_DBGBUS_CCU_0, + A7XX_DBGBUS_CCU_1, + A7XX_DBGBUS_CCU_2, + A7XX_DBGBUS_CCU_3, + A7XX_DBGBUS_CCU_4, + A7XX_DBGBUS_CCU_5, + A7XX_DBGBUS_VFD_BR_0, + A7XX_DBGBUS_VFD_BR_1, + A7XX_DBGBUS_VFD_BR_2, + A7XX_DBGBUS_VFD_BV_0, + A7XX_DBGBUS_VFD_BV_1, + A7XX_DBGBUS_VFD_BV_2, + A7XX_DBGBUS_USP_0, + A7XX_DBGBUS_USP_1, + A7XX_DBGBUS_USP_2, + A7XX_DBGBUS_USP_3, + A7XX_DBGBUS_USP_4, + A7XX_DBGBUS_USP_5, + A7XX_DBGBUS_TP_0, + A7XX_DBGBUS_TP_1, + A7XX_DBGBUS_TP_2, + A7XX_DBGBUS_TP_3, + A7XX_DBGBUS_TP_4, + A7XX_DBGBUS_TP_5, + A7XX_DBGBUS_TP_6, + A7XX_DBGBUS_TP_7, + A7XX_DBGBUS_TP_8, + A7XX_DBGBUS_TP_9, + A7XX_DBGBUS_TP_10, + A7XX_DBGBUS_TP_11, + A7XX_DBGBUS_USPTP_0, + A7XX_DBGBUS_USPTP_1, + A7XX_DBGBUS_USPTP_2, + A7XX_DBGBUS_USPTP_3, + A7XX_DBGBUS_USPTP_4, + A7XX_DBGBUS_USPTP_5, + A7XX_DBGBUS_USPTP_6, + A7XX_DBGBUS_USPTP_7, + A7XX_DBGBUS_USPTP_8, + A7XX_DBGBUS_USPTP_9, + A7XX_DBGBUS_USPTP_10, + A7XX_DBGBUS_USPTP_11, + A7XX_DBGBUS_CCHE_0, + A7XX_DBGBUS_CCHE_1, + A7XX_DBGBUS_CCHE_2, + A7XX_DBGBUS_VPC_DSTR_0, + A7XX_DBGBUS_VPC_DSTR_1, + A7XX_DBGBUS_VPC_DSTR_2, + A7XX_DBGBUS_HLSQ_DP_STR_0, + A7XX_DBGBUS_HLSQ_DP_STR_1, + A7XX_DBGBUS_HLSQ_DP_STR_2, + A7XX_DBGBUS_HLSQ_DP_STR_3, + A7XX_DBGBUS_HLSQ_DP_STR_4, + A7XX_DBGBUS_HLSQ_DP_STR_5, + A7XX_DBGBUS_UFC_DSTR_0, + A7XX_DBGBUS_UFC_DSTR_1, + A7XX_DBGBUS_UFC_DSTR_2, + A7XX_DBGBUS_CGC_SUBCORE, + A7XX_DBGBUS_CGC_CORE, +}; + +static const u32 gen7_9_0_gbif_debugbus_blocks[] = { + A7XX_DBGBUS_GBIF_GX, +}; + +static const u32 gen7_9_0_cx_debugbus_blocks[] = { + A7XX_DBGBUS_CX, + A7XX_DBGBUS_GMU_CX, + A7XX_DBGBUS_GBIF_CX, +}; + +static struct gen7_shader_block gen7_9_0_shader_blocks[] = { + { A7XX_TP0_TMO_DATA, 0x0200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_TP0_SMO_DATA, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_TP0_MIPMAP_BASE_DATA, 0x03C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_INST_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_INST_DATA_1, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_0_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_1_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_2_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_3_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_4_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_5_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_6_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_7_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_CB_RAM, 0x0390, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_13_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_14_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_INST_TAG, 0x00C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_INST_DATA_2, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_TMO_TAG, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_SMO_TAG, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_STATE_DATA, 0x0040, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_HWAVE_RAM, 0x0100, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_L0_INST_BUF, 0x0050, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_8_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_9_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_10_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_11_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_SP_LB_12_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP }, + { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_L2STC_TAG_RAM, 0x0200, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_L2STC_INFO_CMD, 0x0474, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0180, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CPS_MISC_RAM, 0x0640, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CPS_MISC_RAM, 0x00B0, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CPS_MISC_RAM_1, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM, 0x0200, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0050, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0008, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM_TAG, 0x0014, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM_TAG, 0x0010, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM_TAG, 0x0004, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0020, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x03C0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0280, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0050, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0008, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM_1, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_STPROC_META, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INST_RAM_2, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_DATAPATH_META, 0x0020, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_INDIRECT_META, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE }, + { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE }, +}; + +/* + * Block : ['PRE_CRASHDUMPER', 'GBIF'] + * pairs : 2 (Regs:5), 5 (Regs:38) + */ +static const u32 gen7_9_0_pre_crashdumper_gpu_registers[] = { + 0x00210, 0x00213, 0x00536, 0x00536, 0x03c00, 0x03c0b, 0x03c40, 0x03c42, + 0x03c45, 0x03c47, 0x03c49, 0x03c4a, 0x03cc0, 0x03cd1, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_pre_crashdumper_gpu_registers), 8)); + +/* + * Block : ['BROADCAST', 'CP', 'GRAS', 'GXCLKCTL'] + * Block : ['PC', 'RBBM', 'RDVM', 'UCHE'] + * Block : ['VFD', 'VPC', 'VSC'] + * Pipeline: A7XX_PIPE_NONE + * pairs : 196 (Regs:1778) + */ +static const u32 gen7_9_0_gpu_registers[] = { + 0x00000, 0x00000, 0x00002, 0x00002, 0x00011, 0x00012, 0x00016, 0x0001b, + 0x0001f, 0x00032, 0x00038, 0x0003c, 0x00044, 0x00044, 0x00047, 0x00047, + 0x00049, 0x0004a, 0x0004c, 0x0004c, 0x00056, 0x00056, 0x00073, 0x0007d, + 0x00090, 0x000a8, 0x000ad, 0x000ad, 0x00117, 0x00117, 0x00120, 0x00122, + 0x00130, 0x0013f, 0x00142, 0x0015f, 0x00162, 0x00164, 0x00166, 0x00171, + 0x00173, 0x00174, 0x00176, 0x0017b, 0x0017e, 0x00180, 0x00183, 0x00192, + 0x00195, 0x00196, 0x00199, 0x0019a, 0x0019d, 0x001a2, 0x001aa, 0x001ae, + 0x001b9, 0x001b9, 0x001bb, 0x001bb, 0x001be, 0x001be, 0x001c1, 0x001c2, + 0x001c5, 0x001c5, 0x001c7, 0x001c7, 0x001c9, 0x001c9, 0x001cb, 0x001ce, + 0x001d1, 0x001df, 0x001e1, 0x001e3, 0x001e5, 0x001e5, 0x001e7, 0x001e9, + 0x00200, 0x0020d, 0x00215, 0x00253, 0x00260, 0x00260, 0x00264, 0x00270, + 0x00272, 0x00274, 0x00281, 0x00281, 0x00283, 0x00283, 0x00289, 0x0028d, + 0x00290, 0x002a2, 0x002c0, 0x002c1, 0x00300, 0x00401, 0x00410, 0x00451, + 0x00460, 0x004a3, 0x004c0, 0x004d1, 0x00500, 0x00500, 0x00507, 0x0050b, + 0x0050f, 0x0050f, 0x00511, 0x00511, 0x00533, 0x00535, 0x00540, 0x0055b, + 0x00564, 0x00567, 0x00574, 0x00577, 0x00584, 0x0059b, 0x005fb, 0x005ff, + 0x00800, 0x00808, 0x00810, 0x00813, 0x00820, 0x00821, 0x00823, 0x00827, + 0x00830, 0x00834, 0x0083f, 0x00841, 0x00843, 0x00847, 0x0084f, 0x00886, + 0x008a0, 0x008ab, 0x008c0, 0x008c0, 0x008c4, 0x008c4, 0x008c6, 0x008c6, + 0x008d0, 0x008dd, 0x008e0, 0x008e6, 0x008f0, 0x008f3, 0x00900, 0x00903, + 0x00908, 0x00911, 0x00928, 0x0093e, 0x00942, 0x0094d, 0x00980, 0x00984, + 0x0098d, 0x0098f, 0x009b0, 0x009b4, 0x009c2, 0x009c9, 0x009ce, 0x009d7, + 0x009e0, 0x009e7, 0x00a00, 0x00a00, 0x00a02, 0x00a03, 0x00a10, 0x00a4f, + 0x00a61, 0x00a9f, 0x00ad0, 0x00adb, 0x00b00, 0x00b31, 0x00b35, 0x00b3c, + 0x00b40, 0x00b40, 0x00b70, 0x00b73, 0x00b78, 0x00b79, 0x00b7c, 0x00b7d, + 0x00b80, 0x00b81, 0x00b84, 0x00b85, 0x00b88, 0x00b89, 0x00b8c, 0x00b8d, + 0x00b90, 0x00b93, 0x00b98, 0x00b99, 0x00b9c, 0x00b9d, 0x00ba0, 0x00ba1, + 0x00ba4, 0x00ba5, 0x00ba8, 0x00ba9, 0x00bac, 0x00bad, 0x00bb0, 0x00bb1, + 0x00bb4, 0x00bb5, 0x00bb8, 0x00bb9, 0x00bbc, 0x00bbd, 0x00bc0, 0x00bc1, + 0x00c00, 0x00c00, 0x00c02, 0x00c04, 0x00c06, 0x00c06, 0x00c10, 0x00cd9, + 0x00ce0, 0x00d0c, 0x00df0, 0x00df4, 0x00e01, 0x00e02, 0x00e07, 0x00e0e, + 0x00e10, 0x00e13, 0x00e17, 0x00e19, 0x00e1c, 0x00e2b, 0x00e30, 0x00e32, + 0x00e3a, 0x00e3d, 0x00e50, 0x00e5b, 0x02840, 0x0287f, 0x0ec00, 0x0ec01, + 0x0ec05, 0x0ec05, 0x0ec07, 0x0ec07, 0x0ec0a, 0x0ec0a, 0x0ec12, 0x0ec12, + 0x0ec26, 0x0ec28, 0x0ec2b, 0x0ec2d, 0x0ec2f, 0x0ec2f, 0x0ec40, 0x0ec41, + 0x0ec45, 0x0ec45, 0x0ec47, 0x0ec47, 0x0ec4a, 0x0ec4a, 0x0ec52, 0x0ec52, + 0x0ec66, 0x0ec68, 0x0ec6b, 0x0ec6d, 0x0ec6f, 0x0ec6f, 0x0ec80, 0x0ec81, + 0x0ec85, 0x0ec85, 0x0ec87, 0x0ec87, 0x0ec8a, 0x0ec8a, 0x0ec92, 0x0ec92, + 0x0eca6, 0x0eca8, 0x0ecab, 0x0ecad, 0x0ecaf, 0x0ecaf, 0x0ecc0, 0x0ecc1, + 0x0ecc5, 0x0ecc5, 0x0ecc7, 0x0ecc7, 0x0ecca, 0x0ecca, 0x0ecd2, 0x0ecd2, + 0x0ece6, 0x0ece8, 0x0eceb, 0x0eced, 0x0ecef, 0x0ecef, 0x0ed00, 0x0ed01, + 0x0ed05, 0x0ed05, 0x0ed07, 0x0ed07, 0x0ed0a, 0x0ed0a, 0x0ed12, 0x0ed12, + 0x0ed26, 0x0ed28, 0x0ed2b, 0x0ed2d, 0x0ed2f, 0x0ed2f, 0x0ed40, 0x0ed41, + 0x0ed45, 0x0ed45, 0x0ed47, 0x0ed47, 0x0ed4a, 0x0ed4a, 0x0ed52, 0x0ed52, + 0x0ed66, 0x0ed68, 0x0ed6b, 0x0ed6d, 0x0ed6f, 0x0ed6f, 0x0ed80, 0x0ed81, + 0x0ed85, 0x0ed85, 0x0ed87, 0x0ed87, 0x0ed8a, 0x0ed8a, 0x0ed92, 0x0ed92, + 0x0eda6, 0x0eda8, 0x0edab, 0x0edad, 0x0edaf, 0x0edaf, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_gpu_registers), 8)); + +static const u32 gen7_9_0_gxclkctl_registers[] = { + 0x18800, 0x18800, 0x18808, 0x1880b, 0x18820, 0x18822, 0x18830, 0x18830, + 0x18834, 0x1883b, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_gxclkctl_registers), 8)); + +/* + * Block : ['GMUAO', 'GMUCX', 'GMUCX_RAM'] + * Pipeline: A7XX_PIPE_NONE + * pairs : 134 (Regs:429) + */ +static const u32 gen7_9_0_gmu_registers[] = { + 0x10001, 0x10001, 0x10003, 0x10003, 0x10401, 0x10401, 0x10403, 0x10403, + 0x10801, 0x10801, 0x10803, 0x10803, 0x10c01, 0x10c01, 0x10c03, 0x10c03, + 0x11001, 0x11001, 0x11003, 0x11003, 0x11401, 0x11401, 0x11403, 0x11403, + 0x11801, 0x11801, 0x11803, 0x11803, 0x11c01, 0x11c01, 0x11c03, 0x11c03, + 0x1f400, 0x1f40b, 0x1f40f, 0x1f411, 0x1f500, 0x1f500, 0x1f507, 0x1f507, + 0x1f509, 0x1f50b, 0x1f700, 0x1f701, 0x1f704, 0x1f706, 0x1f708, 0x1f709, + 0x1f70c, 0x1f70d, 0x1f710, 0x1f711, 0x1f713, 0x1f716, 0x1f718, 0x1f71d, + 0x1f720, 0x1f724, 0x1f729, 0x1f729, 0x1f730, 0x1f747, 0x1f750, 0x1f756, + 0x1f758, 0x1f759, 0x1f75c, 0x1f75c, 0x1f760, 0x1f761, 0x1f764, 0x1f76b, + 0x1f770, 0x1f775, 0x1f780, 0x1f785, 0x1f790, 0x1f798, 0x1f7a0, 0x1f7a8, + 0x1f7b0, 0x1f7b3, 0x1f800, 0x1f804, 0x1f807, 0x1f808, 0x1f80b, 0x1f80c, + 0x1f80f, 0x1f80f, 0x1f811, 0x1f811, 0x1f813, 0x1f817, 0x1f819, 0x1f81c, + 0x1f824, 0x1f82a, 0x1f82d, 0x1f830, 0x1f840, 0x1f853, 0x1f860, 0x1f860, + 0x1f862, 0x1f866, 0x1f868, 0x1f869, 0x1f870, 0x1f879, 0x1f87f, 0x1f881, + 0x1f890, 0x1f896, 0x1f8a0, 0x1f8a2, 0x1f8a4, 0x1f8af, 0x1f8b8, 0x1f8b9, + 0x1f8c0, 0x1f8c1, 0x1f8c3, 0x1f8c4, 0x1f8d0, 0x1f8d0, 0x1f8ec, 0x1f8ec, + 0x1f8f0, 0x1f8f1, 0x1f910, 0x1f917, 0x1f920, 0x1f921, 0x1f924, 0x1f925, + 0x1f928, 0x1f929, 0x1f92c, 0x1f92d, 0x1f942, 0x1f944, 0x1f948, 0x1f94a, + 0x1f94f, 0x1f951, 0x1f954, 0x1f955, 0x1f95d, 0x1f95d, 0x1f962, 0x1f96b, + 0x1f970, 0x1f971, 0x1f973, 0x1f977, 0x1f97c, 0x1f97c, 0x1f980, 0x1f981, + 0x1f984, 0x1f986, 0x1f992, 0x1f993, 0x1f996, 0x1f99e, 0x1f9c5, 0x1f9d4, + 0x1f9f0, 0x1f9f1, 0x1f9f8, 0x1f9fa, 0x1f9fc, 0x1f9fc, 0x1fa00, 0x1fa03, + 0x20000, 0x20013, 0x20018, 0x2001a, 0x20020, 0x20021, 0x20024, 0x20025, + 0x2002a, 0x2002c, 0x20030, 0x20031, 0x20034, 0x20036, 0x23801, 0x23801, + 0x23803, 0x23803, 0x23805, 0x23805, 0x23807, 0x23807, 0x23809, 0x23809, + 0x2380b, 0x2380b, 0x2380d, 0x2380d, 0x2380f, 0x2380f, 0x23811, 0x23811, + 0x23813, 0x23813, 0x23815, 0x23815, 0x23817, 0x23817, 0x23819, 0x23819, + 0x2381b, 0x2381b, 0x2381d, 0x2381d, 0x2381f, 0x23820, 0x23822, 0x23822, + 0x23824, 0x23824, 0x23826, 0x23826, 0x23828, 0x23828, 0x2382a, 0x2382a, + 0x2382c, 0x2382c, 0x2382e, 0x2382e, 0x23830, 0x23830, 0x23832, 0x23832, + 0x23834, 0x23834, 0x23836, 0x23836, 0x23838, 0x23838, 0x2383a, 0x2383a, + 0x2383c, 0x2383c, 0x2383e, 0x2383e, 0x23840, 0x23847, 0x23b00, 0x23b01, + 0x23b03, 0x23b03, 0x23b05, 0x23b0e, 0x23b10, 0x23b13, 0x23b15, 0x23b16, + 0x23b28, 0x23b28, 0x23b30, 0x23b30, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_gmu_registers), 8)); + +/* + * Block : ['GMUGX'] + * Pipeline: A7XX_PIPE_NONE + * pairs : 44 (Regs:454) + */ +static const u32 gen7_9_0_gmugx_registers[] = { + 0x1a400, 0x1a41f, 0x1a440, 0x1a45f, 0x1a480, 0x1a49f, 0x1a4c0, 0x1a4df, + 0x1a500, 0x1a51f, 0x1a540, 0x1a55f, 0x1a580, 0x1a59f, 0x1a600, 0x1a61f, + 0x1a640, 0x1a65f, 0x1a780, 0x1a781, 0x1a783, 0x1a785, 0x1a787, 0x1a789, + 0x1a78b, 0x1a78d, 0x1a78f, 0x1a791, 0x1a793, 0x1a795, 0x1a797, 0x1a799, + 0x1a79b, 0x1a79d, 0x1a79f, 0x1a7a1, 0x1a7a3, 0x1a7a3, 0x1a7a8, 0x1a7b9, + 0x1a7c0, 0x1a7c1, 0x1a7c4, 0x1a7c5, 0x1a7c8, 0x1a7c9, 0x1a7cc, 0x1a7cd, + 0x1a7d0, 0x1a7d1, 0x1a7d4, 0x1a7d5, 0x1a7d8, 0x1a7d9, 0x1a7dc, 0x1a7dd, + 0x1a7e0, 0x1a7e1, 0x1a7fc, 0x1a7fd, 0x1a800, 0x1a808, 0x1a816, 0x1a816, + 0x1a81e, 0x1a81e, 0x1a826, 0x1a826, 0x1a82e, 0x1a82e, 0x1a836, 0x1a836, + 0x1a83e, 0x1a83e, 0x1a846, 0x1a846, 0x1a84e, 0x1a84e, 0x1a856, 0x1a856, + 0x1a883, 0x1a884, 0x1a890, 0x1a8b3, 0x1a900, 0x1a92b, 0x1a940, 0x1a940, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_gmugx_registers), 8)); + +/* + * Block : ['CX_MISC'] + * Pipeline: A7XX_PIPE_NONE + * pairs : 7 (Regs:56) + */ +static const u32 gen7_9_0_cx_misc_registers[] = { + 0x27800, 0x27800, 0x27810, 0x27814, 0x27820, 0x27824, 0x27828, 0x2782a, + 0x27832, 0x27857, 0x27880, 0x27881, 0x27c00, 0x27c01, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_cx_misc_registers), 8)); + +/* + * Block : ['DBGC'] + * Pipeline: A7XX_PIPE_NONE + * pairs : 19 (Regs:155) + */ +static const u32 gen7_9_0_dbgc_registers[] = { + 0x00600, 0x0061c, 0x0061e, 0x00634, 0x00640, 0x00643, 0x0064e, 0x00652, + 0x00654, 0x0065e, 0x00699, 0x00699, 0x0069b, 0x0069e, 0x006c2, 0x006e4, + 0x006e6, 0x006e6, 0x006e9, 0x006e9, 0x006eb, 0x006eb, 0x006f1, 0x006f4, + 0x00700, 0x00707, 0x00718, 0x00718, 0x00720, 0x00729, 0x00740, 0x0074a, + 0x00758, 0x00758, 0x00760, 0x00762, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_dbgc_registers), 8)); + +/* + * Block : ['CX_DBGC'] + * Pipeline: A7XX_PIPE_NONE + * pairs : 7 (Regs:75) + */ +static const u32 gen7_9_0_cx_dbgc_registers[] = { + 0x18400, 0x1841c, 0x1841e, 0x18434, 0x18440, 0x18443, 0x1844e, 0x18452, + 0x18454, 0x1845e, 0x18520, 0x18520, 0x18580, 0x18581, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_cx_dbgc_registers), 8)); + +/* + * Block : ['BROADCAST', 'CP', 'CX_DBGC', 'CX_MISC', 'DBGC', 'GBIF'] + * Block : ['GMUAO', 'GMUCX', 'GMUGX', 'GRAS', 'GXCLKCTL', 'PC'] + * Block : ['RBBM', 'RDVM', 'UCHE', 'VFD', 'VPC', 'VSC'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_NONE + * pairs : 29 (Regs:573) + */ +static const u32 gen7_9_0_non_context_pipe_br_registers[] = { + 0x00887, 0x0088c, 0x08600, 0x08602, 0x08610, 0x0861b, 0x08620, 0x08620, + 0x08630, 0x08630, 0x08637, 0x08639, 0x08640, 0x08640, 0x09600, 0x09603, + 0x0960a, 0x09616, 0x09624, 0x0963a, 0x09640, 0x09640, 0x09e00, 0x09e00, + 0x09e02, 0x09e07, 0x09e0a, 0x09e16, 0x09e18, 0x09e1a, 0x09e1c, 0x09e1c, + 0x09e20, 0x09e25, 0x09e30, 0x09e31, 0x09e40, 0x09e51, 0x09e64, 0x09e6c, + 0x09e70, 0x09e72, 0x09e78, 0x09e79, 0x09e80, 0x09fff, 0x0a600, 0x0a600, + 0x0a603, 0x0a603, 0x0a610, 0x0a61f, 0x0a630, 0x0a631, 0x0a638, 0x0a63c, + 0x0a640, 0x0a65f, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_pipe_br_registers), 8)); + +/* + * Block : ['BROADCAST', 'CP', 'CX_DBGC', 'CX_MISC', 'DBGC', 'GBIF'] + * Block : ['GMUAO', 'GMUCX', 'GMUGX', 'GRAS', 'GXCLKCTL', 'PC'] + * Block : ['RBBM', 'RDVM', 'UCHE', 'VFD', 'VPC', 'VSC'] + * Pipeline: A7XX_PIPE_BV + * Cluster : A7XX_CLUSTER_NONE + * pairs : 29 (Regs:573) + */ +static const u32 gen7_9_0_non_context_pipe_bv_registers[] = { + 0x00887, 0x0088c, 0x08600, 0x08602, 0x08610, 0x0861b, 0x08620, 0x08620, + 0x08630, 0x08630, 0x08637, 0x08639, 0x08640, 0x08640, 0x09600, 0x09603, + 0x0960a, 0x09616, 0x09624, 0x0963a, 0x09640, 0x09640, 0x09e00, 0x09e00, + 0x09e02, 0x09e07, 0x09e0a, 0x09e16, 0x09e18, 0x09e1a, 0x09e1c, 0x09e1c, + 0x09e20, 0x09e25, 0x09e30, 0x09e31, 0x09e40, 0x09e51, 0x09e64, 0x09e6c, + 0x09e70, 0x09e72, 0x09e78, 0x09e79, 0x09e80, 0x09fff, 0x0a600, 0x0a600, + 0x0a603, 0x0a603, 0x0a610, 0x0a61f, 0x0a630, 0x0a631, 0x0a638, 0x0a63c, + 0x0a640, 0x0a65f, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_pipe_bv_registers), 8)); + +/* + * Block : ['BROADCAST', 'CP', 'CX_DBGC', 'CX_MISC', 'DBGC', 'GBIF'] + * Block : ['GMUAO', 'GMUCX', 'GMUGX', 'GRAS', 'GXCLKCTL', 'PC'] + * Block : ['RBBM', 'RDVM', 'UCHE', 'VFD', 'VPC', 'VSC'] + * Pipeline: A7XX_PIPE_LPAC + * Cluster : A7XX_CLUSTER_NONE + * pairs : 2 (Regs:7) + */ +static const u32 gen7_9_0_non_context_pipe_lpac_registers[] = { + 0x00887, 0x0088c, 0x00f80, 0x00f80, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_pipe_lpac_registers), 8)); + +/* + * Block : ['RB'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_NONE + * pairs : 5 (Regs:37) + */ +static const u32 gen7_9_0_non_context_rb_pipe_br_rac_registers[] = { + 0x08e10, 0x08e1c, 0x08e20, 0x08e25, 0x08e51, 0x08e5a, 0x08e6a, 0x08e6d, + 0x08ea0, 0x08ea3, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_rb_pipe_br_rac_registers), 8)); + +/* + * Block : ['RB'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_NONE + * pairs : 15 (Regs:66) + */ +static const u32 gen7_9_0_non_context_rb_pipe_br_rbp_registers[] = { + 0x08e01, 0x08e01, 0x08e04, 0x08e04, 0x08e06, 0x08e09, 0x08e0c, 0x08e0c, + 0x08e28, 0x08e28, 0x08e2c, 0x08e35, 0x08e3b, 0x08e40, 0x08e50, 0x08e50, + 0x08e5b, 0x08e5d, 0x08e5f, 0x08e5f, 0x08e61, 0x08e61, 0x08e63, 0x08e66, + 0x08e68, 0x08e69, 0x08e70, 0x08e7d, 0x08e80, 0x08e8f, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_rb_pipe_br_rbp_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_NONE + * Location: A7XX_HLSQ_STATE + * pairs : 4 (Regs:28) + */ +static const u32 gen7_9_0_non_context_sp_pipe_br_hlsq_state_registers[] = { + 0x0ae52, 0x0ae52, 0x0ae60, 0x0ae67, 0x0ae69, 0x0ae75, 0x0aec0, 0x0aec5, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_hlsq_state_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_NONE + * Location: A7XX_SP_TOP + * pairs : 10 (Regs:61) + */ +static const u32 gen7_9_0_non_context_sp_pipe_br_sp_top_registers[] = { + 0x0ae00, 0x0ae00, 0x0ae02, 0x0ae04, 0x0ae06, 0x0ae0a, 0x0ae0c, 0x0ae0c, + 0x0ae0f, 0x0ae0f, 0x0ae28, 0x0ae2b, 0x0ae35, 0x0ae35, 0x0ae3a, 0x0ae3f, + 0x0ae50, 0x0ae52, 0x0ae80, 0x0aea3, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_sp_top_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_NONE + * Location: A7XX_USPTP + * pairs : 12 (Regs:62) + */ +static const u32 gen7_9_0_non_context_sp_pipe_br_usptp_registers[] = { + 0x0ae00, 0x0ae00, 0x0ae02, 0x0ae04, 0x0ae06, 0x0ae0a, 0x0ae0c, 0x0ae0c, + 0x0ae0f, 0x0ae0f, 0x0ae28, 0x0ae2b, 0x0ae30, 0x0ae32, 0x0ae35, 0x0ae35, + 0x0ae3a, 0x0ae3b, 0x0ae3e, 0x0ae3f, 0x0ae50, 0x0ae52, 0x0ae80, 0x0aea3, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_usptp_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_NONE + * Location: A7XX_HLSQ_DP_STR + * pairs : 2 (Regs:5) + */ +static const u32 gen7_9_0_non_context_sp_pipe_br_hlsq_dp_str_registers[] = { + 0x0ae6b, 0x0ae6c, 0x0ae73, 0x0ae75, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_hlsq_dp_str_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_LPAC + * Cluster : A7XX_CLUSTER_NONE + * Location: A7XX_HLSQ_STATE + * pairs : 1 (Regs:5) + */ +static const u32 gen7_9_0_non_context_sp_pipe_lpac_hlsq_state_registers[] = { + 0x0af88, 0x0af8c, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_lpac_hlsq_state_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_LPAC + * Cluster : A7XX_CLUSTER_NONE + * Location: A7XX_SP_TOP + * pairs : 1 (Regs:6) + */ +static const u32 gen7_9_0_non_context_sp_pipe_lpac_sp_top_registers[] = { + 0x0af80, 0x0af85, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_lpac_sp_top_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_LPAC + * Cluster : A7XX_CLUSTER_NONE + * Location: A7XX_USPTP + * pairs : 2 (Regs:9) + */ +static const u32 gen7_9_0_non_context_sp_pipe_lpac_usptp_registers[] = { + 0x0af80, 0x0af85, 0x0af90, 0x0af92, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_lpac_usptp_registers), 8)); + +/* + * Block : ['TPL1'] + * Pipeline: A7XX_PIPE_NONE + * Cluster : A7XX_CLUSTER_NONE + * Location: A7XX_USPTP + * pairs : 5 (Regs:29) + */ +static const u32 gen7_9_0_non_context_tpl1_pipe_none_usptp_registers[] = { + 0x0b602, 0x0b602, 0x0b604, 0x0b604, 0x0b608, 0x0b60c, 0x0b610, 0x0b621, + 0x0b630, 0x0b633, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_tpl1_pipe_none_usptp_registers), 8)); + +/* + * Block : ['TPL1'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_NONE + * Location: A7XX_USPTP + * pairs : 1 (Regs:1) + */ +static const u32 gen7_9_0_non_context_tpl1_pipe_br_usptp_registers[] = { + 0x0b600, 0x0b600, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_tpl1_pipe_br_usptp_registers), 8)); + +/* + * Block : ['TPL1'] + * Pipeline: A7XX_PIPE_LPAC + * Cluster : A7XX_CLUSTER_NONE + * Location: A7XX_USPTP + * pairs : 1 (Regs:1) + */ +static const u32 gen7_9_0_non_context_tpl1_pipe_lpac_usptp_registers[] = { + 0x0b780, 0x0b780, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_tpl1_pipe_lpac_usptp_registers), 8)); + +/* + * Block : ['GRAS'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_GRAS + * pairs : 14 (Regs:293) + */ +static const u32 gen7_9_0_gras_pipe_br_cluster_gras_registers[] = { + 0x08000, 0x0800c, 0x08010, 0x08092, 0x08094, 0x08099, 0x0809b, 0x0809d, + 0x080a0, 0x080a7, 0x080af, 0x080f1, 0x080f4, 0x080f6, 0x080f8, 0x080fa, + 0x08100, 0x08107, 0x08109, 0x0810b, 0x08110, 0x08116, 0x08120, 0x0813f, + 0x08400, 0x08406, 0x0840a, 0x0840b, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_gras_pipe_br_cluster_gras_registers), 8)); + +/* + * Block : ['GRAS'] + * Pipeline: A7XX_PIPE_BV + * Cluster : A7XX_CLUSTER_GRAS + * pairs : 14 (Regs:293) + */ +static const u32 gen7_9_0_gras_pipe_bv_cluster_gras_registers[] = { + 0x08000, 0x0800c, 0x08010, 0x08092, 0x08094, 0x08099, 0x0809b, 0x0809d, + 0x080a0, 0x080a7, 0x080af, 0x080f1, 0x080f4, 0x080f6, 0x080f8, 0x080fa, + 0x08100, 0x08107, 0x08109, 0x0810b, 0x08110, 0x08116, 0x08120, 0x0813f, + 0x08400, 0x08406, 0x0840a, 0x0840b, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_gras_pipe_bv_cluster_gras_registers), 8)); + +/* + * Block : ['PC'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_FE + * pairs : 6 (Regs:31) + */ +static const u32 gen7_9_0_pc_pipe_br_cluster_fe_registers[] = { + 0x09800, 0x09804, 0x09806, 0x0980a, 0x09810, 0x09811, 0x09884, 0x09886, + 0x09970, 0x09972, 0x09b00, 0x09b0c, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_pc_pipe_br_cluster_fe_registers), 8)); + +/* + * Block : ['PC'] + * Pipeline: A7XX_PIPE_BV + * Cluster : A7XX_CLUSTER_FE + * pairs : 6 (Regs:31) + */ +static const u32 gen7_9_0_pc_pipe_bv_cluster_fe_registers[] = { + 0x09800, 0x09804, 0x09806, 0x0980a, 0x09810, 0x09811, 0x09884, 0x09886, + 0x09970, 0x09972, 0x09b00, 0x09b0c, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_pc_pipe_bv_cluster_fe_registers), 8)); + +/* + * Block : ['VFD'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_FE + * pairs : 2 (Regs:236) + */ +static const u32 gen7_9_0_vfd_pipe_br_cluster_fe_registers[] = { + 0x0a000, 0x0a009, 0x0a00e, 0x0a0ef, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_vfd_pipe_br_cluster_fe_registers), 8)); + +/* + * Block : ['VFD'] + * Pipeline: A7XX_PIPE_BV + * Cluster : A7XX_CLUSTER_FE + * pairs : 2 (Regs:236) + */ +static const u32 gen7_9_0_vfd_pipe_bv_cluster_fe_registers[] = { + 0x0a000, 0x0a009, 0x0a00e, 0x0a0ef, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_vfd_pipe_bv_cluster_fe_registers), 8)); + +/* + * Block : ['VPC'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_FE + * pairs : 2 (Regs:18) + */ +static const u32 gen7_9_0_vpc_pipe_br_cluster_fe_registers[] = { + 0x09300, 0x0930a, 0x09311, 0x09317, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_br_cluster_fe_registers), 8)); + +/* + * Block : ['VPC'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_PC_VS + * pairs : 3 (Regs:30) + */ +static const u32 gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers[] = { + 0x09101, 0x0910c, 0x09300, 0x0930a, 0x09311, 0x09317, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers), 8)); + +/* + * Block : ['VPC'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_VPC_PS + * pairs : 5 (Regs:76) + */ +static const u32 gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers[] = { + 0x09200, 0x0920f, 0x09212, 0x09216, 0x09218, 0x0923c, 0x09300, 0x0930a, + 0x09311, 0x09317, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers), 8)); + +/* + * Block : ['VPC'] + * Pipeline: A7XX_PIPE_BV + * Cluster : A7XX_CLUSTER_FE + * pairs : 2 (Regs:18) + */ +static const u32 gen7_9_0_vpc_pipe_bv_cluster_fe_registers[] = { + 0x09300, 0x0930a, 0x09311, 0x09317, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_bv_cluster_fe_registers), 8)); + +/* + * Block : ['VPC'] + * Pipeline: A7XX_PIPE_BV + * Cluster : A7XX_CLUSTER_PC_VS + * pairs : 3 (Regs:30) + */ +static const u32 gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers[] = { + 0x09101, 0x0910c, 0x09300, 0x0930a, 0x09311, 0x09317, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers), 8)); + +/* + * Block : ['VPC'] + * Pipeline: A7XX_PIPE_BV + * Cluster : A7XX_CLUSTER_VPC_PS + * pairs : 5 (Regs:76) + */ +static const u32 gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers[] = { + 0x09200, 0x0920f, 0x09212, 0x09216, 0x09218, 0x0923c, 0x09300, 0x0930a, + 0x09311, 0x09317, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers), 8)); + +/* + * Block : ['RB'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_PS + * pairs : 39 (Regs:133) + */ +static const u32 gen7_9_0_rb_pipe_br_cluster_ps_rac_registers[] = { + 0x08802, 0x08802, 0x08804, 0x08806, 0x08809, 0x0880a, 0x0880e, 0x08811, + 0x08818, 0x0881e, 0x08821, 0x08821, 0x08823, 0x08826, 0x08829, 0x08829, + 0x0882b, 0x0882e, 0x08831, 0x08831, 0x08833, 0x08836, 0x08839, 0x08839, + 0x0883b, 0x0883e, 0x08841, 0x08841, 0x08843, 0x08846, 0x08849, 0x08849, + 0x0884b, 0x0884e, 0x08851, 0x08851, 0x08853, 0x08856, 0x08859, 0x08859, + 0x0885b, 0x0885e, 0x08860, 0x08864, 0x08870, 0x08870, 0x08873, 0x08876, + 0x08878, 0x08879, 0x08882, 0x08885, 0x08887, 0x08889, 0x08891, 0x08891, + 0x08898, 0x08899, 0x088c0, 0x088c1, 0x088e5, 0x088e5, 0x088f4, 0x088f5, + 0x08a00, 0x08a05, 0x08a10, 0x08a15, 0x08a20, 0x08a25, 0x08a30, 0x08a35, + 0x08c00, 0x08c01, 0x08c18, 0x08c1f, 0x08c26, 0x08c34, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_rb_pipe_br_cluster_ps_rac_registers), 8)); + +/* + * Block : ['RB'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_PS + * pairs : 34 (Regs:100) + */ +static const u32 gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers[] = { + 0x08800, 0x08801, 0x08803, 0x08803, 0x0880b, 0x0880d, 0x08812, 0x08812, + 0x08820, 0x08820, 0x08822, 0x08822, 0x08827, 0x08828, 0x0882a, 0x0882a, + 0x0882f, 0x08830, 0x08832, 0x08832, 0x08837, 0x08838, 0x0883a, 0x0883a, + 0x0883f, 0x08840, 0x08842, 0x08842, 0x08847, 0x08848, 0x0884a, 0x0884a, + 0x0884f, 0x08850, 0x08852, 0x08852, 0x08857, 0x08858, 0x0885a, 0x0885a, + 0x0885f, 0x0885f, 0x08865, 0x08865, 0x08871, 0x08872, 0x08877, 0x08877, + 0x08880, 0x08881, 0x08886, 0x08886, 0x08890, 0x08890, 0x088d0, 0x088e4, + 0x088e8, 0x088ea, 0x088f0, 0x088f0, 0x08900, 0x0891a, 0x08927, 0x08928, + 0x08c17, 0x08c17, 0x08c20, 0x08c25, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_SP_VS + * Location: A7XX_HLSQ_STATE + * pairs : 29 (Regs:215) + */ +static const u32 gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_registers[] = { + 0x0a800, 0x0a801, 0x0a81b, 0x0a81d, 0x0a822, 0x0a822, 0x0a824, 0x0a824, + 0x0a827, 0x0a82a, 0x0a830, 0x0a830, 0x0a832, 0x0a835, 0x0a83a, 0x0a83a, + 0x0a83c, 0x0a83c, 0x0a83f, 0x0a841, 0x0a85b, 0x0a85d, 0x0a862, 0x0a862, + 0x0a864, 0x0a864, 0x0a867, 0x0a867, 0x0a870, 0x0a870, 0x0a872, 0x0a872, + 0x0a88c, 0x0a88e, 0x0a893, 0x0a893, 0x0a895, 0x0a895, 0x0a898, 0x0a898, + 0x0a89a, 0x0a89d, 0x0a8a0, 0x0a8af, 0x0a8c0, 0x0a8c3, 0x0a974, 0x0a977, + 0x0ab00, 0x0ab03, 0x0ab05, 0x0ab05, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, + 0x0ab40, 0x0abbf, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_SP_VS + * Location: A7XX_SP_TOP + * pairs : 22 (Regs:73) + */ +static const u32 gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registers[] = { + 0x0a800, 0x0a800, 0x0a81c, 0x0a81d, 0x0a822, 0x0a824, 0x0a82d, 0x0a82d, + 0x0a82f, 0x0a831, 0x0a834, 0x0a835, 0x0a83a, 0x0a83c, 0x0a840, 0x0a840, + 0x0a85c, 0x0a85d, 0x0a862, 0x0a864, 0x0a868, 0x0a868, 0x0a870, 0x0a871, + 0x0a88d, 0x0a88e, 0x0a893, 0x0a895, 0x0a899, 0x0a899, 0x0a8a0, 0x0a8af, + 0x0a974, 0x0a977, 0x0ab00, 0x0ab00, 0x0ab02, 0x0ab02, 0x0ab04, 0x0ab05, + 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_SP_VS + * Location: A7XX_USPTP + * pairs : 16 (Regs:269) + */ +static const u32 gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_registers[] = { + 0x0a800, 0x0a81b, 0x0a81e, 0x0a821, 0x0a823, 0x0a827, 0x0a82d, 0x0a82d, + 0x0a82f, 0x0a833, 0x0a836, 0x0a839, 0x0a83b, 0x0a85b, 0x0a85e, 0x0a861, + 0x0a863, 0x0a868, 0x0a870, 0x0a88c, 0x0a88f, 0x0a892, 0x0a894, 0x0a899, + 0x0a8c0, 0x0a8c3, 0x0ab00, 0x0ab05, 0x0ab21, 0x0ab22, 0x0ab40, 0x0abbf, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_SP_PS + * Location: A7XX_HLSQ_STATE + * pairs : 21 (Regs:334) + */ +static const u32 gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_registers[] = { + 0x0a980, 0x0a984, 0x0a99e, 0x0a99e, 0x0a9a7, 0x0a9a7, 0x0a9aa, 0x0a9aa, + 0x0a9ae, 0x0a9b0, 0x0a9b2, 0x0a9b5, 0x0a9ba, 0x0a9ba, 0x0a9bc, 0x0a9bc, + 0x0a9c4, 0x0a9c4, 0x0a9c6, 0x0a9c6, 0x0a9cd, 0x0a9cd, 0x0a9e0, 0x0a9fc, + 0x0aa00, 0x0aa00, 0x0aa30, 0x0aa31, 0x0aa40, 0x0aabf, 0x0aaf2, 0x0aaf3, + 0x0ab00, 0x0ab03, 0x0ab05, 0x0ab05, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, + 0x0ab40, 0x0abbf, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_SP_PS + * Location: A7XX_HLSQ_DP + * pairs : 3 (Regs:19) + */ +static const u32 gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers[] = { + 0x0a9b1, 0x0a9b1, 0x0a9c6, 0x0a9cb, 0x0a9d4, 0x0a9df, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_SP_PS + * Location: A7XX_SP_TOP + * pairs : 18 (Regs:77) + */ +static const u32 gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers[] = { + 0x0a980, 0x0a980, 0x0a982, 0x0a984, 0x0a99e, 0x0a9a2, 0x0a9a7, 0x0a9a8, + 0x0a9aa, 0x0a9aa, 0x0a9ae, 0x0a9ae, 0x0a9b0, 0x0a9b1, 0x0a9b3, 0x0a9b5, + 0x0a9ba, 0x0a9bc, 0x0a9c5, 0x0a9c5, 0x0a9e0, 0x0a9f9, 0x0aa00, 0x0aa03, + 0x0aaf2, 0x0aaf3, 0x0ab00, 0x0ab00, 0x0ab02, 0x0ab02, 0x0ab04, 0x0ab05, + 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_SP_PS + * Location: A7XX_USPTP + * pairs : 17 (Regs:333) + */ +static const u32 gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers[] = { + 0x0a980, 0x0a982, 0x0a985, 0x0a9a6, 0x0a9a8, 0x0a9a9, 0x0a9ab, 0x0a9ae, + 0x0a9b0, 0x0a9b3, 0x0a9b6, 0x0a9b9, 0x0a9bb, 0x0a9bf, 0x0a9c2, 0x0a9c3, + 0x0a9c5, 0x0a9c5, 0x0a9cd, 0x0a9cd, 0x0a9d0, 0x0a9d3, 0x0aa01, 0x0aa03, + 0x0aa30, 0x0aa31, 0x0aa40, 0x0aabf, 0x0ab00, 0x0ab05, 0x0ab21, 0x0ab22, + 0x0ab40, 0x0abbf, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_SP_PS + * Location: A7XX_HLSQ_DP_STR + * pairs : 1 (Regs:6) + */ +static const u32 gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers[] = { + 0x0a9c6, 0x0a9cb, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_BV + * Cluster : A7XX_CLUSTER_SP_VS + * Location: A7XX_HLSQ_STATE + * pairs : 28 (Regs:213) + */ +static const u32 gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_registers[] = { + 0x0a800, 0x0a801, 0x0a81b, 0x0a81d, 0x0a822, 0x0a822, 0x0a824, 0x0a824, + 0x0a827, 0x0a82a, 0x0a830, 0x0a830, 0x0a832, 0x0a835, 0x0a83a, 0x0a83a, + 0x0a83c, 0x0a83c, 0x0a83f, 0x0a841, 0x0a85b, 0x0a85d, 0x0a862, 0x0a862, + 0x0a864, 0x0a864, 0x0a867, 0x0a867, 0x0a870, 0x0a870, 0x0a872, 0x0a872, + 0x0a88c, 0x0a88e, 0x0a893, 0x0a893, 0x0a895, 0x0a895, 0x0a898, 0x0a898, + 0x0a89a, 0x0a89d, 0x0a8a0, 0x0a8af, 0x0a8c0, 0x0a8c3, 0x0a974, 0x0a977, + 0x0ab00, 0x0ab02, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, 0x0ab40, 0x0abbf, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_BV + * Cluster : A7XX_CLUSTER_SP_VS + * Location: A7XX_SP_TOP + * pairs : 21 (Regs:71) + */ +static const u32 gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registers[] = { + 0x0a800, 0x0a800, 0x0a81c, 0x0a81d, 0x0a822, 0x0a824, 0x0a82d, 0x0a82d, + 0x0a82f, 0x0a831, 0x0a834, 0x0a835, 0x0a83a, 0x0a83c, 0x0a840, 0x0a840, + 0x0a85c, 0x0a85d, 0x0a862, 0x0a864, 0x0a868, 0x0a868, 0x0a870, 0x0a871, + 0x0a88d, 0x0a88e, 0x0a893, 0x0a895, 0x0a899, 0x0a899, 0x0a8a0, 0x0a8af, + 0x0a974, 0x0a977, 0x0ab00, 0x0ab00, 0x0ab02, 0x0ab02, 0x0ab0a, 0x0ab1b, + 0x0ab20, 0x0ab20, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_BV + * Cluster : A7XX_CLUSTER_SP_VS + * Location: A7XX_USPTP + * pairs : 16 (Regs:266) + */ +static const u32 gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_registers[] = { + 0x0a800, 0x0a81b, 0x0a81e, 0x0a821, 0x0a823, 0x0a827, 0x0a82d, 0x0a82d, + 0x0a82f, 0x0a833, 0x0a836, 0x0a839, 0x0a83b, 0x0a85b, 0x0a85e, 0x0a861, + 0x0a863, 0x0a868, 0x0a870, 0x0a88c, 0x0a88f, 0x0a892, 0x0a894, 0x0a899, + 0x0a8c0, 0x0a8c3, 0x0ab00, 0x0ab02, 0x0ab21, 0x0ab22, 0x0ab40, 0x0abbf, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_LPAC + * Cluster : A7XX_CLUSTER_SP_PS + * Location: A7XX_HLSQ_STATE + * pairs : 14 (Regs:299) + */ +static const u32 gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_state_registers[] = { + 0x0a9b0, 0x0a9b0, 0x0a9b2, 0x0a9b5, 0x0a9ba, 0x0a9ba, 0x0a9bc, 0x0a9bc, + 0x0a9c4, 0x0a9c4, 0x0a9cd, 0x0a9cd, 0x0a9e2, 0x0a9e3, 0x0a9e6, 0x0a9fc, + 0x0aa00, 0x0aa00, 0x0aa31, 0x0aa35, 0x0aa40, 0x0aabf, 0x0aaf3, 0x0aaf3, + 0x0ab00, 0x0ab01, 0x0ab40, 0x0abbf, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_state_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_LPAC + * Cluster : A7XX_CLUSTER_SP_PS + * Location: A7XX_HLSQ_DP + * pairs : 2 (Regs:13) + */ +static const u32 gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_dp_registers[] = { + 0x0a9b1, 0x0a9b1, 0x0a9d4, 0x0a9df, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_dp_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_LPAC + * Cluster : A7XX_CLUSTER_SP_PS + * Location: A7XX_SP_TOP + * pairs : 9 (Regs:34) + */ +static const u32 gen7_9_0_sp_pipe_lpac_cluster_sp_ps_sp_top_registers[] = { + 0x0a9b0, 0x0a9b1, 0x0a9b3, 0x0a9b5, 0x0a9ba, 0x0a9bc, 0x0a9c5, 0x0a9c5, + 0x0a9e2, 0x0a9e3, 0x0a9e6, 0x0a9f9, 0x0aa00, 0x0aa00, 0x0aaf3, 0x0aaf3, + 0x0ab00, 0x0ab00, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_sp_top_registers), 8)); + +/* + * Block : ['SP'] + * Pipeline: A7XX_PIPE_LPAC + * Cluster : A7XX_CLUSTER_SP_PS + * Location: A7XX_USPTP + * pairs : 11 (Regs:279) + */ +static const u32 gen7_9_0_sp_pipe_lpac_cluster_sp_ps_usptp_registers[] = { + 0x0a9b0, 0x0a9b3, 0x0a9b6, 0x0a9b9, 0x0a9bb, 0x0a9be, 0x0a9c2, 0x0a9c3, + 0x0a9c5, 0x0a9c5, 0x0a9cd, 0x0a9cd, 0x0a9d0, 0x0a9d3, 0x0aa31, 0x0aa31, + 0x0aa40, 0x0aabf, 0x0ab00, 0x0ab01, 0x0ab40, 0x0abbf, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_usptp_registers), 8)); + +/* + * Block : ['TPL1'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_SP_VS + * Location: A7XX_USPTP + * pairs : 3 (Regs:10) + */ +static const u32 gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_registers[] = { + 0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_registers), 8)); + +/* + * Block : ['TPL1'] + * Pipeline: A7XX_PIPE_BR + * Cluster : A7XX_CLUSTER_SP_PS + * Location: A7XX_USPTP + * pairs : 6 (Regs:42) + */ +static const u32 gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers[] = { + 0x0b180, 0x0b183, 0x0b190, 0x0b195, 0x0b2c0, 0x0b2d5, 0x0b300, 0x0b307, + 0x0b309, 0x0b309, 0x0b310, 0x0b310, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers), 8)); + +/* + * Block : ['TPL1'] + * Pipeline: A7XX_PIPE_BV + * Cluster : A7XX_CLUSTER_SP_VS + * Location: A7XX_USPTP + * pairs : 3 (Regs:10) + */ +static const u32 gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_registers[] = { + 0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_registers), 8)); + +/* + * Block : ['TPL1'] + * Pipeline: A7XX_PIPE_LPAC + * Cluster : A7XX_CLUSTER_SP_PS + * Location: A7XX_USPTP + * pairs : 5 (Regs:7) + */ +static const u32 gen7_9_0_tpl1_pipe_lpac_cluster_sp_ps_usptp_registers[] = { + 0x0b180, 0x0b181, 0x0b300, 0x0b301, 0x0b307, 0x0b307, 0x0b309, 0x0b309, + 0x0b310, 0x0b310, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_lpac_cluster_sp_ps_usptp_registers), 8)); + +static const struct gen7_sel_reg gen7_9_0_rb_rac_sel = { + .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST, + .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, + .val = 0, +}; + +static const struct gen7_sel_reg gen7_9_0_rb_rbp_sel = { + .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST, + .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, + .val = 0x9, +}; + +static struct gen7_cluster_registers gen7_9_0_clusters[] = { + { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT, + gen7_9_0_non_context_pipe_br_registers, }, + { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT, + gen7_9_0_non_context_pipe_bv_registers, }, + { A7XX_CLUSTER_NONE, A7XX_PIPE_LPAC, STATE_NON_CONTEXT, + gen7_9_0_non_context_pipe_lpac_registers, }, + { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT, + gen7_9_0_non_context_rb_pipe_br_rac_registers, &gen7_9_0_rb_rac_sel, }, + { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT, + gen7_9_0_non_context_rb_pipe_br_rbp_registers, &gen7_9_0_rb_rbp_sel, }, + { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + gen7_9_0_rb_pipe_br_cluster_ps_rac_registers, &gen7_9_0_rb_rac_sel, }, + { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + gen7_9_0_rb_pipe_br_cluster_ps_rac_registers, &gen7_9_0_rb_rac_sel, }, + { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers, &gen7_9_0_rb_rbp_sel, }, + { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers, &gen7_9_0_rb_rbp_sel, }, + { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + gen7_9_0_gras_pipe_br_cluster_gras_registers, }, + { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + gen7_9_0_gras_pipe_br_cluster_gras_registers, }, + { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + gen7_9_0_gras_pipe_bv_cluster_gras_registers, }, + { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + gen7_9_0_gras_pipe_bv_cluster_gras_registers, }, + { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + gen7_9_0_pc_pipe_br_cluster_fe_registers, }, + { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + gen7_9_0_pc_pipe_br_cluster_fe_registers, }, + { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + gen7_9_0_pc_pipe_bv_cluster_fe_registers, }, + { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + gen7_9_0_pc_pipe_bv_cluster_fe_registers, }, + { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + gen7_9_0_vfd_pipe_br_cluster_fe_registers, }, + { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + gen7_9_0_vfd_pipe_br_cluster_fe_registers, }, + { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + gen7_9_0_vfd_pipe_bv_cluster_fe_registers, }, + { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + gen7_9_0_vfd_pipe_bv_cluster_fe_registers, }, + { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + gen7_9_0_vpc_pipe_br_cluster_fe_registers, }, + { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + gen7_9_0_vpc_pipe_br_cluster_fe_registers, }, + { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers, }, + { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers, }, + { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0, + gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers, }, + { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1, + gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers, }, + { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + gen7_9_0_vpc_pipe_bv_cluster_fe_registers, }, + { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + gen7_9_0_vpc_pipe_bv_cluster_fe_registers, }, + { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers, }, + { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers, }, + { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0, + gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, }, + { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1, + gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, }, +}; + +static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = { + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, + gen7_9_0_non_context_sp_pipe_br_hlsq_state_registers, 0xae00}, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP, + gen7_9_0_non_context_sp_pipe_br_sp_top_registers, 0xae00}, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + gen7_9_0_non_context_sp_pipe_br_usptp_registers, 0xae00}, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP_STR, + gen7_9_0_non_context_sp_pipe_br_hlsq_dp_str_registers, 0xae00}, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE, + gen7_9_0_non_context_sp_pipe_lpac_hlsq_state_registers, 0xaf80}, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP, + gen7_9_0_non_context_sp_pipe_lpac_sp_top_registers, 0xaf80}, + { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + gen7_9_0_non_context_sp_pipe_lpac_usptp_registers, 0xaf80}, + { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_NONE, 0, A7XX_USPTP, + gen7_9_0_non_context_tpl1_pipe_none_usptp_registers, 0xb600}, + { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + gen7_9_0_non_context_tpl1_pipe_br_usptp_registers, 0xb600}, + { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + gen7_9_0_non_context_tpl1_pipe_lpac_usptp_registers, 0xb780}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, + gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_registers, 0xa800}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP, + gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registers, 0xa800}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_registers, 0xa800}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_HLSQ_STATE, + gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_registers, 0xa800}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_SP_TOP, + gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registers, 0xa800}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP, + gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_registers, 0xa800}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE, + gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_registers, 0xa800}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP, + gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registers, 0xa800}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_registers, 0xa800}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_HLSQ_STATE, + gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_registers, 0xa800}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_SP_TOP, + gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registers, 0xa800}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP, + gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE, + gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP, + gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP, + gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP_STR, + gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE, + gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_state_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_DP, + gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_dp_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP, + gen7_9_0_sp_pipe_lpac_cluster_sp_ps_sp_top_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + gen7_9_0_sp_pipe_lpac_cluster_sp_ps_usptp_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE, + gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP, + gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP, + gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP_STR, + gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP, + gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_SP_TOP, + gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP, + gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP_STR, + gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP, + gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_SP_TOP, + gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP, + gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP_STR, + gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_registers, 0xb000}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP, + gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_registers, 0xb000}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_registers, 0xb000}, + { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP, + gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_registers, 0xb000}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP, + gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP, + gen7_9_0_tpl1_pipe_lpac_cluster_sp_ps_usptp_registers, 0xb000}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP, + gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP, + gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000}, + { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP, + gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000}, +}; + +static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = { + { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR, + REG_A6XX_CP_SQE_STAT_DATA, 0x00040}, + { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR, + REG_A6XX_CP_DRAW_STATE_DATA, 0x00200}, + { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR, + REG_A6XX_CP_ROQ_DBG_DATA, 0x00800}, + { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR, + REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x08000}, + { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR, + REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x00200}, + { "CP_BV_ROQ_DBG_ADDR", REG_A7XX_CP_BV_ROQ_DBG_ADDR, + REG_A7XX_CP_BV_ROQ_DBG_DATA, 0x00800}, + { "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR, + REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x08000}, + { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR, + REG_A7XX_CP_BV_SQE_STAT_DATA, 0x00040}, + { "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TBL_DBG_ADDR, + REG_A7XX_CP_RESOURCE_TBL_DBG_DATA, 0x04100}, + { "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR, + REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x00200}, + { "CP_LPAC_ROQ", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR, + REG_A7XX_CP_LPAC_ROQ_DBG_DATA, 0x00200}, + { "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR, + REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x08000}, + { "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR, + REG_A7XX_CP_SQE_AC_STAT_DATA, 0x00040}, + { "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR, + REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x00040}, + { "CP_AQE_ROQ_0", REG_A7XX_CP_AQE_ROQ_DBG_ADDR_0, + REG_A7XX_CP_AQE_ROQ_DBG_DATA_0, 0x00100}, + { "CP_AQE_ROQ_1", REG_A7XX_CP_AQE_ROQ_DBG_ADDR_1, + REG_A7XX_CP_AQE_ROQ_DBG_DATA_1, 0x00100}, + { "CP_AQE_UCODE_DBG_0", REG_A7XX_CP_AQE_UCODE_DBG_ADDR_0, + REG_A7XX_CP_AQE_UCODE_DBG_DATA_0, 0x08000}, + { "CP_AQE_UCODE_DBG_1", REG_A7XX_CP_AQE_UCODE_DBG_ADDR_1, + REG_A7XX_CP_AQE_UCODE_DBG_DATA_1, 0x08000}, + { "CP_AQE_STAT_0", REG_A7XX_CP_AQE_STAT_ADDR_0, + REG_A7XX_CP_AQE_STAT_DATA_0, 0x00040}, + { "CP_AQE_STAT_1", REG_A7XX_CP_AQE_STAT_ADDR_1, + REG_A7XX_CP_AQE_STAT_DATA_1, 0x00040}, +}; + +static struct gen7_reg_list gen7_9_0_reg_list[] = { + { gen7_9_0_gpu_registers, NULL}, + { gen7_9_0_cx_misc_registers, NULL}, + { gen7_9_0_cx_dbgc_registers, NULL}, + { gen7_9_0_dbgc_registers, NULL}, + { NULL, NULL}, +}; + +static const u32 gen7_9_0_cpr_registers[] = { + 0x26800, 0x26805, 0x26808, 0x2680d, 0x26814, 0x26815, 0x2681c, 0x2681c, + 0x26820, 0x26839, 0x26840, 0x26841, 0x26848, 0x26849, 0x26850, 0x26851, + 0x26880, 0x268a1, 0x26980, 0x269b0, 0x269c0, 0x269c8, 0x269e0, 0x269ee, + 0x269fb, 0x269ff, 0x26a02, 0x26a07, 0x26a09, 0x26a0b, 0x26a10, 0x26b0f, + 0x27440, 0x27441, 0x27444, 0x27444, 0x27480, 0x274a2, 0x274ac, 0x274c4, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_cpr_registers), 8)); + +static const u32 gen7_9_0_dpm_registers[] = { + 0x1aa00, 0x1aa06, 0x1aa09, 0x1aa0a, 0x1aa0c, 0x1aa0d, 0x1aa0f, 0x1aa12, + 0x1aa14, 0x1aa47, 0x1aa50, 0x1aa51, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_dpm_registers), 8)); + +static const u32 gen7_9_0_dpm_leakage_registers[] = { + 0x21c00, 0x21c00, 0x21c08, 0x21c09, 0x21c0e, 0x21c0f, 0x21c4f, 0x21c50, + 0x21c52, 0x21c52, 0x21c54, 0x21c56, 0x21c58, 0x21c5a, 0x21c5c, 0x21c60, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_dpm_leakage_registers), 8)); + +static const u32 gen7_9_0_gfx_gpu_acd_registers[] = { + 0x18c00, 0x18c16, 0x18c20, 0x18c2d, 0x18c30, 0x18c31, 0x18c35, 0x18c35, + 0x18c37, 0x18c37, 0x18c3a, 0x18c3a, 0x18c42, 0x18c42, 0x18c56, 0x18c58, + 0x18c5b, 0x18c5d, 0x18c5f, 0x18c62, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_gfx_gpu_acd_registers), 8)); + +static const u32 gen7_9_0_gpucc_registers[] = { + 0x24000, 0x2400f, 0x24400, 0x2440f, 0x24800, 0x24805, 0x24c00, 0x24cff, + 0x25400, 0x25404, 0x25800, 0x25804, 0x25c00, 0x25c04, 0x26000, 0x26004, + 0x26400, 0x26405, 0x26414, 0x2641d, 0x2642a, 0x26430, 0x26432, 0x26434, + 0x26441, 0x2644b, 0x2644d, 0x26463, 0x26466, 0x26468, 0x26478, 0x2647a, + 0x26489, 0x2648a, 0x2649c, 0x2649e, 0x264a0, 0x264a6, 0x264c5, 0x264c7, + 0x264d6, 0x264d8, 0x264e8, 0x264e9, 0x264f9, 0x264fc, 0x2650b, 0x2650b, + 0x2651c, 0x2651e, 0x26540, 0x2654e, 0x26554, 0x26573, 0x26576, 0x2657a, + UINT_MAX, UINT_MAX, + +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_gpucc_registers), 8)); + +static const u32 gen7_9_0_isense_registers[] = { + 0x22c3a, 0x22c3c, 0x22c41, 0x22c41, 0x22c46, 0x22c47, 0x22c4c, 0x22c4c, + 0x22c51, 0x22c51, 0x22c56, 0x22c56, 0x22c5b, 0x22c5b, 0x22c60, 0x22c60, + 0x22c65, 0x22c65, 0x22c6a, 0x22c70, 0x22c75, 0x22c75, 0x22c7a, 0x22c7a, + 0x22c7f, 0x22c7f, 0x22c84, 0x22c85, 0x22c8a, 0x22c8a, 0x22c8f, 0x22c8f, + 0x23000, 0x23009, 0x2300e, 0x2300e, 0x23013, 0x23013, 0x23018, 0x23018, + 0x2301d, 0x2301d, 0x23022, 0x23022, 0x23027, 0x23032, 0x23037, 0x23037, + 0x2303c, 0x2303c, 0x23041, 0x23041, 0x23046, 0x23046, 0x2304b, 0x2304b, + 0x23050, 0x23050, 0x23055, 0x23055, 0x2305a, 0x2305a, 0x2305f, 0x2305f, + 0x23064, 0x23064, 0x23069, 0x2306a, 0x2306f, 0x2306f, 0x23074, 0x23075, + 0x2307a, 0x2307e, 0x23083, 0x23083, 0x23088, 0x23088, 0x2308d, 0x2308d, + 0x23092, 0x23092, 0x230e2, 0x230e2, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_isense_registers), 8)); + +static const u32 gen7_9_0_rscc_registers[] = { + 0x14000, 0x14036, 0x14040, 0x14047, 0x14080, 0x14084, 0x14089, 0x1408c, + 0x14091, 0x14094, 0x14099, 0x1409c, 0x140a1, 0x140a4, 0x140a9, 0x140ac, + 0x14100, 0x14104, 0x14114, 0x14119, 0x14124, 0x14132, 0x14154, 0x1416b, + 0x14340, 0x14342, 0x14344, 0x1437c, 0x143f0, 0x143f8, 0x143fa, 0x143fe, + 0x14400, 0x14404, 0x14406, 0x1440a, 0x1440c, 0x14410, 0x14412, 0x14416, + 0x14418, 0x1441c, 0x1441e, 0x14422, 0x14424, 0x14424, 0x14498, 0x144a0, + 0x144a2, 0x144a6, 0x144a8, 0x144ac, 0x144ae, 0x144b2, 0x144b4, 0x144b8, + 0x144ba, 0x144be, 0x144c0, 0x144c4, 0x144c6, 0x144ca, 0x144cc, 0x144cc, + 0x14540, 0x14548, 0x1454a, 0x1454e, 0x14550, 0x14554, 0x14556, 0x1455a, + 0x1455c, 0x14560, 0x14562, 0x14566, 0x14568, 0x1456c, 0x1456e, 0x14572, + 0x14574, 0x14574, 0x145e8, 0x145f0, 0x145f2, 0x145f6, 0x145f8, 0x145fc, + 0x145fe, 0x14602, 0x14604, 0x14608, 0x1460a, 0x1460e, 0x14610, 0x14614, + 0x14616, 0x1461a, 0x1461c, 0x1461c, 0x14690, 0x14698, 0x1469a, 0x1469e, + 0x146a0, 0x146a4, 0x146a6, 0x146aa, 0x146ac, 0x146b0, 0x146b2, 0x146b6, + 0x146b8, 0x146bc, 0x146be, 0x146c2, 0x146c4, 0x146c4, 0x14738, 0x14740, + 0x14742, 0x14746, 0x14748, 0x1474c, 0x1474e, 0x14752, 0x14754, 0x14758, + 0x1475a, 0x1475e, 0x14760, 0x14764, 0x14766, 0x1476a, 0x1476c, 0x1476c, + 0x147e0, 0x147e8, 0x147ea, 0x147ee, 0x147f0, 0x147f4, 0x147f6, 0x147fa, + 0x147fc, 0x14800, 0x14802, 0x14806, 0x14808, 0x1480c, 0x1480e, 0x14812, + 0x14814, 0x14814, 0x14888, 0x14890, 0x14892, 0x14896, 0x14898, 0x1489c, + 0x1489e, 0x148a2, 0x148a4, 0x148a8, 0x148aa, 0x148ae, 0x148b0, 0x148b4, + 0x148b6, 0x148ba, 0x148bc, 0x148bc, 0x14930, 0x14938, 0x1493a, 0x1493e, + 0x14940, 0x14944, 0x14946, 0x1494a, 0x1494c, 0x14950, 0x14952, 0x14956, + 0x14958, 0x1495c, 0x1495e, 0x14962, 0x14964, 0x14964, + UINT_MAX, UINT_MAX, +}; +static_assert(IS_ALIGNED(sizeof(gen7_9_0_rscc_registers), 8)); + +static const u32 *gen7_9_0_external_core_regs[] = { + gen7_9_0_gpucc_registers, + gen7_9_0_gxclkctl_registers, + gen7_9_0_cpr_registers, + gen7_9_0_dpm_registers, + gen7_9_0_dpm_leakage_registers, + gen7_9_0_gfx_gpu_acd_registers, +}; +#endif /*_ADRENO_GEN7_9_0_SNAPSHOT_H */ diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h deleted file mode 100644 index 7067376e25..0000000000 --- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h +++ /dev/null @@ -1,2803 +0,0 @@ -#ifndef ADRENO_PM4_XML -#define ADRENO_PM4_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng gen_header.py tool in this git repository: -http://gitlab.freedesktop.org/mesa/mesa/ -git clone https://gitlab.freedesktop.org/mesa/mesa.git - -The rules-ng-ng source files this header was generated from are: - -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85856 bytes, from Fri Feb 23 13:07:00 2024) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023) -*/ - -#ifdef __KERNEL__ -#include -#define assert(x) BUG_ON(!(x)) -#else -#include -#endif - -#ifdef __cplusplus -#define __struct_cast(X) -#else -#define __struct_cast(X) (struct X) -#endif - -enum vgt_event_type { - VS_DEALLOC = 0, - PS_DEALLOC = 1, - VS_DONE_TS = 2, - PS_DONE_TS = 3, - CACHE_FLUSH_TS = 4, - CONTEXT_DONE = 5, - CACHE_FLUSH = 6, - VIZQUERY_START = 7, - HLSQ_FLUSH = 7, - VIZQUERY_END = 8, - SC_WAIT_WC = 9, - WRITE_PRIMITIVE_COUNTS = 9, - START_PRIMITIVE_CTRS = 11, - STOP_PRIMITIVE_CTRS = 12, - RST_PIX_CNT = 13, - RST_VTX_CNT = 14, - TILE_FLUSH = 15, - STAT_EVENT = 16, - CACHE_FLUSH_AND_INV_TS_EVENT = 20, - ZPASS_DONE = 21, - CACHE_FLUSH_AND_INV_EVENT = 22, - RB_DONE_TS = 22, - PERFCOUNTER_START = 23, - PERFCOUNTER_STOP = 24, - VS_FETCH_DONE = 27, - FACENESS_FLUSH = 28, - WT_DONE_TS = 8, - START_FRAGMENT_CTRS = 13, - STOP_FRAGMENT_CTRS = 14, - START_COMPUTE_CTRS = 15, - STOP_COMPUTE_CTRS = 16, - FLUSH_SO_0 = 17, - FLUSH_SO_1 = 18, - FLUSH_SO_2 = 19, - FLUSH_SO_3 = 20, - PC_CCU_INVALIDATE_DEPTH = 24, - PC_CCU_INVALIDATE_COLOR = 25, - PC_CCU_RESOLVE_TS = 26, - PC_CCU_FLUSH_DEPTH_TS = 28, - PC_CCU_FLUSH_COLOR_TS = 29, - BLIT = 30, - LRZ_CLEAR = 37, - LRZ_FLUSH = 38, - BLIT_OP_FILL_2D = 39, - BLIT_OP_COPY_2D = 40, - UNK_40 = 40, - BLIT_OP_SCALE_2D = 42, - CONTEXT_DONE_2D = 43, - UNK_2C = 44, - UNK_2D = 45, - CACHE_INVALIDATE = 49, - LABEL = 63, - DUMMY_EVENT = 1, - CCU_INVALIDATE_DEPTH = 24, - CCU_INVALIDATE_COLOR = 25, - CCU_RESOLVE_CLEAN = 26, - CCU_FLUSH_DEPTH = 28, - CCU_FLUSH_COLOR = 29, - CCU_RESOLVE = 30, - CCU_END_RESOLVE_GROUP = 31, - CCU_CLEAN_DEPTH = 32, - CCU_CLEAN_COLOR = 33, - CACHE_RESET = 48, - CACHE_CLEAN = 49, - CACHE_FLUSH7 = 50, - CACHE_INVALIDATE7 = 51, -}; - -enum pc_di_primtype { - DI_PT_NONE = 0, - DI_PT_POINTLIST_PSIZE = 1, - DI_PT_LINELIST = 2, - DI_PT_LINESTRIP = 3, - DI_PT_TRILIST = 4, - DI_PT_TRIFAN = 5, - DI_PT_TRISTRIP = 6, - DI_PT_LINELOOP = 7, - DI_PT_RECTLIST = 8, - DI_PT_POINTLIST = 9, - DI_PT_LINE_ADJ = 10, - DI_PT_LINESTRIP_ADJ = 11, - DI_PT_TRI_ADJ = 12, - DI_PT_TRISTRIP_ADJ = 13, - DI_PT_PATCHES0 = 31, - DI_PT_PATCHES1 = 32, - DI_PT_PATCHES2 = 33, - DI_PT_PATCHES3 = 34, - DI_PT_PATCHES4 = 35, - DI_PT_PATCHES5 = 36, - DI_PT_PATCHES6 = 37, - DI_PT_PATCHES7 = 38, - DI_PT_PATCHES8 = 39, - DI_PT_PATCHES9 = 40, - DI_PT_PATCHES10 = 41, - DI_PT_PATCHES11 = 42, - DI_PT_PATCHES12 = 43, - DI_PT_PATCHES13 = 44, - DI_PT_PATCHES14 = 45, - DI_PT_PATCHES15 = 46, - DI_PT_PATCHES16 = 47, - DI_PT_PATCHES17 = 48, - DI_PT_PATCHES18 = 49, - DI_PT_PATCHES19 = 50, - DI_PT_PATCHES20 = 51, - DI_PT_PATCHES21 = 52, - DI_PT_PATCHES22 = 53, - DI_PT_PATCHES23 = 54, - DI_PT_PATCHES24 = 55, - DI_PT_PATCHES25 = 56, - DI_PT_PATCHES26 = 57, - DI_PT_PATCHES27 = 58, - DI_PT_PATCHES28 = 59, - DI_PT_PATCHES29 = 60, - DI_PT_PATCHES30 = 61, - DI_PT_PATCHES31 = 62, -}; - -enum pc_di_src_sel { - DI_SRC_SEL_DMA = 0, - DI_SRC_SEL_IMMEDIATE = 1, - DI_SRC_SEL_AUTO_INDEX = 2, - DI_SRC_SEL_AUTO_XFB = 3, -}; - -enum pc_di_face_cull_sel { - DI_FACE_CULL_NONE = 0, - DI_FACE_CULL_FETCH = 1, - DI_FACE_BACKFACE_CULL = 2, - DI_FACE_FRONTFACE_CULL = 3, -}; - -enum pc_di_index_size { - INDEX_SIZE_IGN = 0, - INDEX_SIZE_16_BIT = 0, - INDEX_SIZE_32_BIT = 1, - INDEX_SIZE_8_BIT = 2, - INDEX_SIZE_INVALID = 0, -}; - -enum pc_di_vis_cull_mode { - IGNORE_VISIBILITY = 0, - USE_VISIBILITY = 1, -}; - -enum adreno_pm4_packet_type { - CP_TYPE0_PKT = 0x00000000, - CP_TYPE1_PKT = 0x40000000, - CP_TYPE2_PKT = 0x80000000, - CP_TYPE3_PKT = 0xc0000000, - CP_TYPE4_PKT = 0x40000000, - CP_TYPE7_PKT = 0x70000000, -}; - -enum adreno_pm4_type3_packets { - CP_ME_INIT = 72, - CP_NOP = 16, - CP_PREEMPT_ENABLE = 28, - CP_PREEMPT_TOKEN = 30, - CP_INDIRECT_BUFFER = 63, - CP_INDIRECT_BUFFER_CHAIN = 87, - CP_INDIRECT_BUFFER_PFD = 55, - CP_WAIT_FOR_IDLE = 38, - CP_WAIT_REG_MEM = 60, - CP_WAIT_REG_EQ = 82, - CP_WAIT_REG_GTE = 83, - CP_WAIT_UNTIL_READ = 92, - CP_WAIT_IB_PFD_COMPLETE = 93, - CP_REG_RMW = 33, - CP_SET_BIN_DATA = 47, - CP_SET_BIN_DATA5 = 47, - CP_REG_TO_MEM = 62, - CP_MEM_WRITE = 61, - CP_MEM_WRITE_CNTR = 79, - CP_COND_EXEC = 68, - CP_COND_WRITE = 69, - CP_COND_WRITE5 = 69, - CP_EVENT_WRITE = 70, - CP_EVENT_WRITE7 = 70, - CP_EVENT_WRITE_SHD = 88, - CP_EVENT_WRITE_CFL = 89, - CP_EVENT_WRITE_ZPD = 91, - CP_RUN_OPENCL = 49, - CP_DRAW_INDX = 34, - CP_DRAW_INDX_2 = 54, - CP_DRAW_INDX_BIN = 52, - CP_DRAW_INDX_2_BIN = 53, - CP_VIZ_QUERY = 35, - CP_SET_STATE = 37, - CP_SET_CONSTANT = 45, - CP_IM_LOAD = 39, - CP_IM_LOAD_IMMEDIATE = 43, - CP_LOAD_CONSTANT_CONTEXT = 46, - CP_INVALIDATE_STATE = 59, - CP_SET_SHADER_BASES = 74, - CP_SET_BIN_MASK = 80, - CP_SET_BIN_SELECT = 81, - CP_CONTEXT_UPDATE = 94, - CP_INTERRUPT = 64, - CP_IM_STORE = 44, - CP_SET_DRAW_INIT_FLAGS = 75, - CP_SET_PROTECTED_MODE = 95, - CP_BOOTSTRAP_UCODE = 111, - CP_LOAD_STATE = 48, - CP_LOAD_STATE4 = 48, - CP_COND_INDIRECT_BUFFER_PFE = 58, - CP_COND_INDIRECT_BUFFER_PFD = 50, - CP_INDIRECT_BUFFER_PFE = 63, - CP_SET_BIN = 76, - CP_TEST_TWO_MEMS = 113, - CP_REG_WR_NO_CTXT = 120, - CP_RECORD_PFP_TIMESTAMP = 17, - CP_SET_SECURE_MODE = 102, - CP_WAIT_FOR_ME = 19, - CP_SET_DRAW_STATE = 67, - CP_DRAW_INDX_OFFSET = 56, - CP_DRAW_INDIRECT = 40, - CP_DRAW_INDX_INDIRECT = 41, - CP_DRAW_INDIRECT_MULTI = 42, - CP_DRAW_AUTO = 36, - CP_DRAW_PRED_ENABLE_GLOBAL = 25, - CP_DRAW_PRED_ENABLE_LOCAL = 26, - CP_DRAW_PRED_SET = 78, - CP_WIDE_REG_WRITE = 116, - CP_SCRATCH_TO_REG = 77, - CP_REG_TO_SCRATCH = 74, - CP_WAIT_MEM_WRITES = 18, - CP_COND_REG_EXEC = 71, - CP_MEM_TO_REG = 66, - CP_EXEC_CS_INDIRECT = 65, - CP_EXEC_CS = 51, - CP_PERFCOUNTER_ACTION = 80, - CP_SMMU_TABLE_UPDATE = 83, - CP_SET_MARKER = 101, - CP_SET_PSEUDO_REG = 86, - CP_CONTEXT_REG_BUNCH = 92, - CP_YIELD_ENABLE = 28, - CP_SKIP_IB2_ENABLE_GLOBAL = 29, - CP_SKIP_IB2_ENABLE_LOCAL = 35, - CP_SET_SUBDRAW_SIZE = 53, - CP_WHERE_AM_I = 98, - CP_SET_VISIBILITY_OVERRIDE = 100, - CP_PREEMPT_ENABLE_GLOBAL = 105, - CP_PREEMPT_ENABLE_LOCAL = 106, - CP_CONTEXT_SWITCH_YIELD = 107, - CP_SET_RENDER_MODE = 108, - CP_COMPUTE_CHECKPOINT = 110, - CP_MEM_TO_MEM = 115, - CP_BLIT = 44, - CP_REG_TEST = 57, - CP_SET_MODE = 99, - CP_LOAD_STATE6_GEOM = 50, - CP_LOAD_STATE6_FRAG = 52, - CP_LOAD_STATE6 = 54, - IN_IB_PREFETCH_END = 23, - IN_SUBBLK_PREFETCH = 31, - IN_INSTR_PREFETCH = 32, - IN_INSTR_MATCH = 71, - IN_CONST_PREFETCH = 73, - IN_INCR_UPDT_STATE = 85, - IN_INCR_UPDT_CONST = 86, - IN_INCR_UPDT_INSTR = 87, - PKT4 = 4, - IN_IB_END = 10, - IN_GMU_INTERRUPT = 11, - IN_PREEMPT = 15, - CP_SCRATCH_WRITE = 76, - CP_REG_TO_MEM_OFFSET_MEM = 116, - CP_REG_TO_MEM_OFFSET_REG = 114, - CP_WAIT_MEM_GTE = 20, - CP_WAIT_TWO_REGS = 112, - CP_MEMCPY = 117, - CP_SET_BIN_DATA5_OFFSET = 46, - CP_SET_UNK_BIN_DATA = 45, - CP_CONTEXT_SWITCH = 84, - CP_SET_CTXSWITCH_IB = 85, - CP_REG_WRITE = 109, - CP_START_BIN = 80, - CP_END_BIN = 81, - CP_PREEMPT_DISABLE = 108, - CP_WAIT_TIMESTAMP = 20, - CP_GLOBAL_TIMESTAMP = 21, - CP_LOCAL_TIMESTAMP = 22, - CP_THREAD_CONTROL = 23, - CP_RESOURCE_LIST = 24, - CP_BV_BR_COUNT_OPS = 27, - CP_MODIFY_TIMESTAMP = 28, - CP_CONTEXT_REG_BUNCH2 = 93, - CP_MEM_TO_SCRATCH_MEM = 73, - CP_FIXED_STRIDE_DRAW_TABLE = 127, - CP_RESET_CONTEXT_STATE = 31, -}; - -enum adreno_state_block { - SB_VERT_TEX = 0, - SB_VERT_MIPADDR = 1, - SB_FRAG_TEX = 2, - SB_FRAG_MIPADDR = 3, - SB_VERT_SHADER = 4, - SB_GEOM_SHADER = 5, - SB_FRAG_SHADER = 6, - SB_COMPUTE_SHADER = 7, -}; - -enum adreno_state_type { - ST_SHADER = 0, - ST_CONSTANTS = 1, -}; - -enum adreno_state_src { - SS_DIRECT = 0, - SS_INVALID_ALL_IC = 2, - SS_INVALID_PART_IC = 3, - SS_INDIRECT = 4, - SS_INDIRECT_TCM = 5, - SS_INDIRECT_STM = 6, -}; - -enum a4xx_state_block { - SB4_VS_TEX = 0, - SB4_HS_TEX = 1, - SB4_DS_TEX = 2, - SB4_GS_TEX = 3, - SB4_FS_TEX = 4, - SB4_CS_TEX = 5, - SB4_VS_SHADER = 8, - SB4_HS_SHADER = 9, - SB4_DS_SHADER = 10, - SB4_GS_SHADER = 11, - SB4_FS_SHADER = 12, - SB4_CS_SHADER = 13, - SB4_SSBO = 14, - SB4_CS_SSBO = 15, -}; - -enum a4xx_state_type { - ST4_SHADER = 0, - ST4_CONSTANTS = 1, - ST4_UBO = 2, -}; - -enum a4xx_state_src { - SS4_DIRECT = 0, - SS4_INDIRECT = 2, -}; - -enum a6xx_state_block { - SB6_VS_TEX = 0, - SB6_HS_TEX = 1, - SB6_DS_TEX = 2, - SB6_GS_TEX = 3, - SB6_FS_TEX = 4, - SB6_CS_TEX = 5, - SB6_VS_SHADER = 8, - SB6_HS_SHADER = 9, - SB6_DS_SHADER = 10, - SB6_GS_SHADER = 11, - SB6_FS_SHADER = 12, - SB6_CS_SHADER = 13, - SB6_IBO = 14, - SB6_CS_IBO = 15, -}; - -enum a6xx_state_type { - ST6_SHADER = 0, - ST6_CONSTANTS = 1, - ST6_UBO = 2, - ST6_IBO = 3, -}; - -enum a6xx_state_src { - SS6_DIRECT = 0, - SS6_BINDLESS = 1, - SS6_INDIRECT = 2, - SS6_UBO = 3, -}; - -enum a4xx_index_size { - INDEX4_SIZE_8_BIT = 0, - INDEX4_SIZE_16_BIT = 1, - INDEX4_SIZE_32_BIT = 2, -}; - -enum a6xx_patch_type { - TESS_QUADS = 0, - TESS_TRIANGLES = 1, - TESS_ISOLINES = 2, -}; - -enum a6xx_draw_indirect_opcode { - INDIRECT_OP_NORMAL = 2, - INDIRECT_OP_INDEXED = 4, - INDIRECT_OP_INDIRECT_COUNT = 6, - INDIRECT_OP_INDIRECT_COUNT_INDEXED = 7, -}; - -enum cp_draw_pred_src { - PRED_SRC_MEM = 5, -}; - -enum cp_draw_pred_test { - NE_0_PASS = 0, - EQ_0_PASS = 1, -}; - -enum cp_cond_function { - WRITE_ALWAYS = 0, - WRITE_LT = 1, - WRITE_LE = 2, - WRITE_EQ = 3, - WRITE_NE = 4, - WRITE_GE = 5, - WRITE_GT = 6, -}; - -enum poll_memory_type { - POLL_REGISTER = 0, - POLL_MEMORY = 1, - POLL_SCRATCH = 2, - POLL_ON_CHIP = 3, -}; - -enum render_mode_cmd { - BYPASS = 1, - BINNING = 2, - GMEM = 3, - BLIT2D = 5, - BLIT2DSCALE = 7, - END2D = 8, -}; - -enum event_write_src { - EV_WRITE_USER_32B = 0, - EV_WRITE_USER_64B = 1, - EV_WRITE_TIMESTAMP_SUM = 2, - EV_WRITE_ALWAYSON = 3, - EV_WRITE_REGS_CONTENT = 4, -}; - -enum event_write_dst { - EV_DST_RAM = 0, - EV_DST_ONCHIP = 1, -}; - -enum cp_blit_cmd { - BLIT_OP_FILL = 0, - BLIT_OP_COPY = 1, - BLIT_OP_SCALE = 3, -}; - -enum a6xx_marker { - RM6_BYPASS = 1, - RM6_BINNING = 2, - RM6_GMEM = 4, - RM6_ENDVIS = 5, - RM6_RESOLVE = 6, - RM6_YIELD = 7, - RM6_COMPUTE = 8, - RM6_BLIT2DSCALE = 12, - RM6_IB1LIST_START = 13, - RM6_IB1LIST_END = 14, - RM6_IFPC_ENABLE = 256, - RM6_IFPC_DISABLE = 257, -}; - -enum pseudo_reg { - SMMU_INFO = 0, - NON_SECURE_SAVE_ADDR = 1, - SECURE_SAVE_ADDR = 2, - NON_PRIV_SAVE_ADDR = 3, - COUNTER = 4, - DRAW_STRM_ADDRESS = 8, - DRAW_STRM_SIZE_ADDRESS = 9, - PRIM_STRM_ADDRESS = 10, - UNK_STRM_ADDRESS = 11, - UNK_STRM_SIZE_ADDRESS = 12, - BINDLESS_BASE_0_ADDR = 16, - BINDLESS_BASE_1_ADDR = 17, - BINDLESS_BASE_2_ADDR = 18, - BINDLESS_BASE_3_ADDR = 19, - BINDLESS_BASE_4_ADDR = 20, - BINDLESS_BASE_5_ADDR = 21, - BINDLESS_BASE_6_ADDR = 22, -}; - -enum source_type { - SOURCE_REG = 0, - SOURCE_SCRATCH_MEM = 1, -}; - -enum compare_mode { - PRED_TEST = 1, - REG_COMPARE = 2, - RENDER_MODE = 3, - REG_COMPARE_IMM = 4, - THREAD_MODE = 5, -}; - -enum ctxswitch_ib { - RESTORE_IB = 0, - YIELD_RESTORE_IB = 1, - SAVE_IB = 2, - RB_SAVE_IB = 3, -}; - -enum reg_tracker { - TRACK_CNTL_REG = 1, - TRACK_RENDER_CNTL = 2, - UNK_EVENT_WRITE = 4, - TRACK_LRZ = 8, -}; - -enum ts_wait_value_src { - TS_WAIT_GE_32B = 0, - TS_WAIT_GE_64B = 1, - TS_WAIT_GE_TIMESTAMP_SUM = 2, -}; - -enum ts_wait_type { - TS_WAIT_RAM = 0, - TS_WAIT_ONCHIP = 1, -}; - -enum pipe_count_op { - PIPE_CLEAR_BV_BR = 1, - PIPE_SET_BR_OFFSET = 2, - PIPE_BR_WAIT_FOR_BV = 3, - PIPE_BV_WAIT_FOR_BR = 4, -}; - -enum timestamp_op { - MODIFY_TIMESTAMP_CLEAR = 0, - MODIFY_TIMESTAMP_ADD_GLOBAL = 1, - MODIFY_TIMESTAMP_ADD_LOCAL = 2, -}; - -enum cp_thread { - CP_SET_THREAD_BR = 1, - CP_SET_THREAD_BV = 2, - CP_SET_THREAD_BOTH = 3, -}; - -#define REG_CP_LOAD_STATE_0 0x00000000 -#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff -#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0 -static inline uint32_t CP_LOAD_STATE_0_DST_OFF(uint32_t val) -{ - return ((val) << CP_LOAD_STATE_0_DST_OFF__SHIFT) & CP_LOAD_STATE_0_DST_OFF__MASK; -} -#define CP_LOAD_STATE_0_STATE_SRC__MASK 0x00070000 -#define CP_LOAD_STATE_0_STATE_SRC__SHIFT 16 -static inline uint32_t CP_LOAD_STATE_0_STATE_SRC(enum adreno_state_src val) -{ - return ((val) << CP_LOAD_STATE_0_STATE_SRC__SHIFT) & CP_LOAD_STATE_0_STATE_SRC__MASK; -} -#define CP_LOAD_STATE_0_STATE_BLOCK__MASK 0x00380000 -#define CP_LOAD_STATE_0_STATE_BLOCK__SHIFT 19 -static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val) -{ - return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK; -} -#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0xffc00000 -#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22 -static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val) -{ - return ((val) << CP_LOAD_STATE_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE_0_NUM_UNIT__MASK; -} - -#define REG_CP_LOAD_STATE_1 0x00000001 -#define CP_LOAD_STATE_1_STATE_TYPE__MASK 0x00000003 -#define CP_LOAD_STATE_1_STATE_TYPE__SHIFT 0 -static inline uint32_t CP_LOAD_STATE_1_STATE_TYPE(enum adreno_state_type val) -{ - return ((val) << CP_LOAD_STATE_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE_1_STATE_TYPE__MASK; -} -#define CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK 0xfffffffc -#define CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT 2 -static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK; -} - -#define REG_CP_LOAD_STATE4_0 0x00000000 -#define CP_LOAD_STATE4_0_DST_OFF__MASK 0x00003fff -#define CP_LOAD_STATE4_0_DST_OFF__SHIFT 0 -static inline uint32_t CP_LOAD_STATE4_0_DST_OFF(uint32_t val) -{ - return ((val) << CP_LOAD_STATE4_0_DST_OFF__SHIFT) & CP_LOAD_STATE4_0_DST_OFF__MASK; -} -#define CP_LOAD_STATE4_0_STATE_SRC__MASK 0x00030000 -#define CP_LOAD_STATE4_0_STATE_SRC__SHIFT 16 -static inline uint32_t CP_LOAD_STATE4_0_STATE_SRC(enum a4xx_state_src val) -{ - return ((val) << CP_LOAD_STATE4_0_STATE_SRC__SHIFT) & CP_LOAD_STATE4_0_STATE_SRC__MASK; -} -#define CP_LOAD_STATE4_0_STATE_BLOCK__MASK 0x003c0000 -#define CP_LOAD_STATE4_0_STATE_BLOCK__SHIFT 18 -static inline uint32_t CP_LOAD_STATE4_0_STATE_BLOCK(enum a4xx_state_block val) -{ - return ((val) << CP_LOAD_STATE4_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE4_0_STATE_BLOCK__MASK; -} -#define CP_LOAD_STATE4_0_NUM_UNIT__MASK 0xffc00000 -#define CP_LOAD_STATE4_0_NUM_UNIT__SHIFT 22 -static inline uint32_t CP_LOAD_STATE4_0_NUM_UNIT(uint32_t val) -{ - return ((val) << CP_LOAD_STATE4_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE4_0_NUM_UNIT__MASK; -} - -#define REG_CP_LOAD_STATE4_1 0x00000001 -#define CP_LOAD_STATE4_1_STATE_TYPE__MASK 0x00000003 -#define CP_LOAD_STATE4_1_STATE_TYPE__SHIFT 0 -static inline uint32_t CP_LOAD_STATE4_1_STATE_TYPE(enum a4xx_state_type val) -{ - return ((val) << CP_LOAD_STATE4_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE4_1_STATE_TYPE__MASK; -} -#define CP_LOAD_STATE4_1_EXT_SRC_ADDR__MASK 0xfffffffc -#define CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT 2 -static inline uint32_t CP_LOAD_STATE4_1_EXT_SRC_ADDR(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE4_1_EXT_SRC_ADDR__MASK; -} - -#define REG_CP_LOAD_STATE4_2 0x00000002 -#define CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK 0xffffffff -#define CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT 0 -static inline uint32_t CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(uint32_t val) -{ - return ((val) << CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK; -} - -#define REG_CP_LOAD_STATE6_0 0x00000000 -#define CP_LOAD_STATE6_0_DST_OFF__MASK 0x00003fff -#define CP_LOAD_STATE6_0_DST_OFF__SHIFT 0 -static inline uint32_t CP_LOAD_STATE6_0_DST_OFF(uint32_t val) -{ - return ((val) << CP_LOAD_STATE6_0_DST_OFF__SHIFT) & CP_LOAD_STATE6_0_DST_OFF__MASK; -} -#define CP_LOAD_STATE6_0_STATE_TYPE__MASK 0x0000c000 -#define CP_LOAD_STATE6_0_STATE_TYPE__SHIFT 14 -static inline uint32_t CP_LOAD_STATE6_0_STATE_TYPE(enum a6xx_state_type val) -{ - return ((val) << CP_LOAD_STATE6_0_STATE_TYPE__SHIFT) & CP_LOAD_STATE6_0_STATE_TYPE__MASK; -} -#define CP_LOAD_STATE6_0_STATE_SRC__MASK 0x00030000 -#define CP_LOAD_STATE6_0_STATE_SRC__SHIFT 16 -static inline uint32_t CP_LOAD_STATE6_0_STATE_SRC(enum a6xx_state_src val) -{ - return ((val) << CP_LOAD_STATE6_0_STATE_SRC__SHIFT) & CP_LOAD_STATE6_0_STATE_SRC__MASK; -} -#define CP_LOAD_STATE6_0_STATE_BLOCK__MASK 0x003c0000 -#define CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT 18 -static inline uint32_t CP_LOAD_STATE6_0_STATE_BLOCK(enum a6xx_state_block val) -{ - return ((val) << CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE6_0_STATE_BLOCK__MASK; -} -#define CP_LOAD_STATE6_0_NUM_UNIT__MASK 0xffc00000 -#define CP_LOAD_STATE6_0_NUM_UNIT__SHIFT 22 -static inline uint32_t CP_LOAD_STATE6_0_NUM_UNIT(uint32_t val) -{ - return ((val) << CP_LOAD_STATE6_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE6_0_NUM_UNIT__MASK; -} - -#define REG_CP_LOAD_STATE6_1 0x00000001 -#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK 0xfffffffc -#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT 2 -static inline uint32_t CP_LOAD_STATE6_1_EXT_SRC_ADDR(uint32_t val) -{ - assert(!(val & 0x3)); - return (((val >> 2)) << CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK; -} - -#define REG_CP_LOAD_STATE6_2 0x00000002 -#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK 0xffffffff -#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT 0 -static inline uint32_t CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(uint32_t val) -{ - return ((val) << CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK; -} - -#define REG_CP_LOAD_STATE6_EXT_SRC_ADDR 0x00000001 - -#define REG_CP_DRAW_INDX_0 0x00000000 -#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff -#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0 -static inline uint32_t CP_DRAW_INDX_0_VIZ_QUERY(uint32_t val) -{ - return ((val) << CP_DRAW_INDX_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_0_VIZ_QUERY__MASK; -} - -#define REG_CP_DRAW_INDX_1 0x00000001 -#define CP_DRAW_INDX_1_PRIM_TYPE__MASK 0x0000003f -#define CP_DRAW_INDX_1_PRIM_TYPE__SHIFT 0 -static inline uint32_t CP_DRAW_INDX_1_PRIM_TYPE(enum pc_di_primtype val) -{ - return ((val) << CP_DRAW_INDX_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_1_PRIM_TYPE__MASK; -} -#define CP_DRAW_INDX_1_SOURCE_SELECT__MASK 0x000000c0 -#define CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT 6 -static inline uint32_t CP_DRAW_INDX_1_SOURCE_SELECT(enum pc_di_src_sel val) -{ - return ((val) << CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_1_SOURCE_SELECT__MASK; -} -#define CP_DRAW_INDX_1_VIS_CULL__MASK 0x00000600 -#define CP_DRAW_INDX_1_VIS_CULL__SHIFT 9 -static inline uint32_t CP_DRAW_INDX_1_VIS_CULL(enum pc_di_vis_cull_mode val) -{ - return ((val) << CP_DRAW_INDX_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_1_VIS_CULL__MASK; -} -#define CP_DRAW_INDX_1_INDEX_SIZE__MASK 0x00000800 -#define CP_DRAW_INDX_1_INDEX_SIZE__SHIFT 11 -static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val) -{ - return ((val) << CP_DRAW_INDX_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_1_INDEX_SIZE__MASK; -} -#define CP_DRAW_INDX_1_NOT_EOP 0x00001000 -#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000 -#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000 -#define CP_DRAW_INDX_1_NUM_INSTANCES__MASK 0xff000000 -#define CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT 24 -static inline uint32_t CP_DRAW_INDX_1_NUM_INSTANCES(uint32_t val) -{ - return ((val) << CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_1_NUM_INSTANCES__MASK; -} - -#define REG_CP_DRAW_INDX_2 0x00000002 -#define CP_DRAW_INDX_2_NUM_INDICES__MASK 0xffffffff -#define CP_DRAW_INDX_2_NUM_INDICES__SHIFT 0 -static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val) -{ - return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK; -} - -#define REG_CP_DRAW_INDX_3 0x00000003 -#define CP_DRAW_INDX_3_INDX_BASE__MASK 0xffffffff -#define CP_DRAW_INDX_3_INDX_BASE__SHIFT 0 -static inline uint32_t CP_DRAW_INDX_3_INDX_BASE(uint32_t val) -{ - return ((val) << CP_DRAW_INDX_3_INDX_BASE__SHIFT) & CP_DRAW_INDX_3_INDX_BASE__MASK; -} - -#define REG_CP_DRAW_INDX_4 0x00000004 -#define CP_DRAW_INDX_4_INDX_SIZE__MASK 0xffffffff -#define CP_DRAW_INDX_4_INDX_SIZE__SHIFT 0 -static inline uint32_t CP_DRAW_INDX_4_INDX_SIZE(uint32_t val) -{ - return ((val) << CP_DRAW_INDX_4_INDX_SIZE__SHIFT) & CP_DRAW_INDX_4_INDX_SIZE__MASK; -} - -#define REG_CP_DRAW_INDX_2_0 0x00000000 -#define CP_DRAW_INDX_2_0_VIZ_QUERY__MASK 0xffffffff -#define CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT 0 -static inline uint32_t CP_DRAW_INDX_2_0_VIZ_QUERY(uint32_t val) -{ - return ((val) << CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_2_0_VIZ_QUERY__MASK; -} - -#define REG_CP_DRAW_INDX_2_1 0x00000001 -#define CP_DRAW_INDX_2_1_PRIM_TYPE__MASK 0x0000003f -#define CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT 0 -static inline uint32_t CP_DRAW_INDX_2_1_PRIM_TYPE(enum pc_di_primtype val) -{ - return ((val) << CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_2_1_PRIM_TYPE__MASK; -} -#define CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK 0x000000c0 -#define CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT 6 -static inline uint32_t CP_DRAW_INDX_2_1_SOURCE_SELECT(enum pc_di_src_sel val) -{ - return ((val) << CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK; -} -#define CP_DRAW_INDX_2_1_VIS_CULL__MASK 0x00000600 -#define CP_DRAW_INDX_2_1_VIS_CULL__SHIFT 9 -static inline uint32_t CP_DRAW_INDX_2_1_VIS_CULL(enum pc_di_vis_cull_mode val) -{ - return ((val) << CP_DRAW_INDX_2_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_2_1_VIS_CULL__MASK; -} -#define CP_DRAW_INDX_2_1_INDEX_SIZE__MASK 0x00000800 -#define CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT 11 -static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val) -{ - return ((val) << CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_2_1_INDEX_SIZE__MASK; -} -#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000 -#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000 -#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000 -#define CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK 0xff000000 -#define CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT 24 -static inline uint32_t CP_DRAW_INDX_2_1_NUM_INSTANCES(uint32_t val) -{ - return ((val) << CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK; -} - -#define REG_CP_DRAW_INDX_2_2 0x00000002 -#define CP_DRAW_INDX_2_2_NUM_INDICES__MASK 0xffffffff -#define CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT 0 -static inline uint32_t CP_DRAW_INDX_2_2_NUM_INDICES(uint32_t val) -{ - return ((val) << CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_2_NUM_INDICES__MASK; -} - -#define REG_CP_DRAW_INDX_OFFSET_0 0x00000000 -#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK 0x0000003f -#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT 0 -static inline uint32_t CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(enum pc_di_primtype val) -{ - return ((val) << CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK; -} -#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK 0x000000c0 -#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT 6 -static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel val) -{ - return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK; -} -#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000300 -#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8 -static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val) -{ - return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK; -} -#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00 -#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10 -static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val) -{ - return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK; -} -#define CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__MASK 0x00003000 -#define CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__SHIFT 12 -static inline uint32_t CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(enum a6xx_patch_type val) -{ - return ((val) << CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__MASK; -} -#define CP_DRAW_INDX_OFFSET_0_GS_ENABLE 0x00010000 -#define CP_DRAW_INDX_OFFSET_0_TESS_ENABLE 0x00020000 - -#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001 -#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK 0xffffffff -#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT 0 -static inline uint32_t CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES(uint32_t val) -{ - return ((val) << CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK; -} - -#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002 -#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK 0xffffffff -#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT 0 -static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val) -{ - return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK; -} - -#define REG_CP_DRAW_INDX_OFFSET_3 0x00000003 -#define CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK 0xffffffff -#define CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT 0 -static inline uint32_t CP_DRAW_INDX_OFFSET_3_FIRST_INDX(uint32_t val) -{ - return ((val) << CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT) & CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK; -} - -#define REG_A5XX_CP_DRAW_INDX_OFFSET_4 0x00000004 -#define A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK 0xffffffff -#define A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT 0 -static inline uint32_t A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO(uint32_t val) -{ - return ((val) << A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT) & A5XX_CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK; -} - -#define REG_A5XX_CP_DRAW_INDX_OFFSET_5 0x00000005 -#define A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK 0xffffffff -#define A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT 0 -static inline uint32_t A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI(uint32_t val) -{ - return ((val) << A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT) & A5XX_CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK; -} - -#define REG_A5XX_CP_DRAW_INDX_OFFSET_INDX_BASE 0x00000004 - -#define REG_A5XX_CP_DRAW_INDX_OFFSET_6 0x00000006 -#define A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK 0xffffffff -#define A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT 0 -static inline uint32_t A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES(uint32_t val) -{ - return ((val) << A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT) & A5XX_CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK; -} - -#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004 -#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK 0xffffffff -#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT 0 -static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE(uint64_t val) -{ - return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK; -} - -#define REG_CP_DRAW_INDX_OFFSET_5 0x00000005 -#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK 0xffffffff -#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT 0 -static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val) -{ - return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK; -} - -#define REG_A4XX_CP_DRAW_INDIRECT_0 0x00000000 -#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f -#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT 0 -static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val) -{ - return ((val) << A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK; -} -#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0 -#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT 6 -static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val) -{ - return ((val) << A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK; -} -#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK 0x00000300 -#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT 8 -static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val) -{ - return ((val) << A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK; -} -#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00 -#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT 10 -static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val) -{ - return ((val) << A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK; -} -#define A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__MASK 0x00003000 -#define A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__SHIFT 12 -static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_type val) -{ - return ((val) << A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__MASK; -} -#define A4XX_CP_DRAW_INDIRECT_0_GS_ENABLE 0x00010000 -#define A4XX_CP_DRAW_INDIRECT_0_TESS_ENABLE 0x00020000 - -#define REG_A4XX_CP_DRAW_INDIRECT_1 0x00000001 -#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK 0xffffffff -#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT 0 -static inline uint32_t A4XX_CP_DRAW_INDIRECT_1_INDIRECT(uint32_t val) -{ - return ((val) << A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK; -} - -#define REG_A5XX_CP_DRAW_INDIRECT_1 0x00000001 -#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK 0xffffffff -#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT 0 -static inline uint32_t A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO(uint32_t val) -{ - return ((val) << A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK; -} - -#define REG_A5XX_CP_DRAW_INDIRECT_2 0x00000002 -#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK 0xffffffff -#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT 0 -static inline uint32_t A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI(uint32_t val) -{ - return ((val) << A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK; -} - -#define REG_A5XX_CP_DRAW_INDIRECT_INDIRECT 0x00000001 - -#define REG_A4XX_CP_DRAW_INDX_INDIRECT_0 0x00000000 -#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f -#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT 0 -static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val) -{ - return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK; -} -#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0 -#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT 6 -static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val) -{ - return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK; -} -#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK 0x00000300 -#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT 8 -static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val) -{ - return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK; -} -#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00 -#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT 10 -static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val) -{ - return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK; -} -#define A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__MASK 0x00003000 -#define A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__SHIFT 12 -static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_type val) -{ - return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__MASK; -} -#define A4XX_CP_DRAW_INDX_INDIRECT_0_GS_ENABLE 0x00010000 -#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_ENABLE 0x00020000 - -#define REG_A4XX_CP_DRAW_INDX_INDIRECT_1 0x00000001 -#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK 0xffffffff -#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT 0 -static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE(uint32_t val) -{ - return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK; -} - -#define REG_A4XX_CP_DRAW_INDX_INDIRECT_2 0x00000002 -#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK 0xffffffff -#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT 0 -static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE(uint32_t val) -{ - return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK; -} - -#define REG_A4XX_CP_DRAW_INDX_INDIRECT_3 0x00000003 -#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK 0xffffffff -#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT 0 -static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT(uint32_t val) -{ - return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK; -} - -#define REG_A5XX_CP_DRAW_INDX_INDIRECT_1 0x00000001 -#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK 0xffffffff -#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT 0 -static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO(uint32_t val) -{ - return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK; -} - -#define REG_A5XX_CP_DRAW_INDX_INDIRECT_2 0x00000002 -#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK 0xffffffff -#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT 0 -static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI(uint32_t val) -{ - return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK; -} - -#define REG_A5XX_CP_DRAW_INDX_INDIRECT_INDX_BASE 0x00000001 - -#define REG_A5XX_CP_DRAW_INDX_INDIRECT_3 0x00000003 -#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK 0xffffffff -#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT 0 -static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(uint32_t val) -{ - return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK; -} - -#define REG_A5XX_CP_DRAW_INDX_INDIRECT_4 0x00000004 -#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK 0xffffffff -#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT 0 -static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO(uint32_t val) -{ - return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK; -} - -#define REG_A5XX_CP_DRAW_INDX_INDIRECT_5 0x00000005 -#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK 0xffffffff -#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT 0 -static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI(uint32_t val) -{ - return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK; -} - -#define REG_A5XX_CP_DRAW_INDX_INDIRECT_INDIRECT 0x00000004 - -#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_0 0x00000000 -#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__MASK 0x0000003f -#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__SHIFT 0 -static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE(enum pc_di_primtype val) -{ - return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__MASK; -} -#define A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__MASK 0x000000c0 -#define A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__SHIFT 6 -static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT(enum pc_di_src_sel val) -{ - return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__MASK; -} -#define A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__MASK 0x00000300 -#define A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__SHIFT 8 -static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL(enum pc_di_vis_cull_mode val) -{ - return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__MASK; -} -#define A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__MASK 0x00000c00 -#define A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__SHIFT 10 -static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE(enum a4xx_index_size val) -{ - return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__MASK; -} -#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__MASK 0x00003000 -#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__SHIFT 12 -static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE(enum a6xx_patch_type val) -{ - return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__MASK; -} -#define A6XX_CP_DRAW_INDIRECT_MULTI_0_GS_ENABLE 0x00010000 -#define A6XX_CP_DRAW_INDIRECT_MULTI_0_TESS_ENABLE 0x00020000 - -#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_1 0x00000001 -#define A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__MASK 0x0000000f -#define A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__SHIFT 0 -static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(enum a6xx_draw_indirect_opcode val) -{ - return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__MASK; -} -#define A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK 0x003fff00 -#define A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT 8 -static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(uint32_t val) -{ - return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK; -} - -#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_DRAW_COUNT 0x00000002 - -#define REG_INDIRECT_OP_NORMAL_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000003 - -#define REG_INDIRECT_OP_NORMAL_CP_DRAW_INDIRECT_MULTI_STRIDE 0x00000005 - -#define REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_INDEX 0x00000003 - -#define REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_MAX_INDICES 0x00000005 - -#define REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000006 - -#define REG_INDIRECT_OP_INDEXED_CP_DRAW_INDIRECT_MULTI_STRIDE 0x00000008 - -#define REG_INDIRECT_OP_INDIRECT_COUNT_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000003 - -#define REG_INDIRECT_OP_INDIRECT_COUNT_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT 0x00000005 - -#define REG_INDIRECT_OP_INDIRECT_COUNT_CP_DRAW_INDIRECT_MULTI_STRIDE 0x00000007 - -#define REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_INDEX 0x00000003 - -#define REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_MAX_INDICES 0x00000005 - -#define REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000006 - -#define REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT 0x00000008 - -#define REG_INDIRECT_OP_INDIRECT_COUNT_INDEXED_CP_DRAW_INDIRECT_MULTI_STRIDE 0x0000000a - -#define REG_CP_DRAW_AUTO_0 0x00000000 -#define CP_DRAW_AUTO_0_PRIM_TYPE__MASK 0x0000003f -#define CP_DRAW_AUTO_0_PRIM_TYPE__SHIFT 0 -static inline uint32_t CP_DRAW_AUTO_0_PRIM_TYPE(enum pc_di_primtype val) -{ - return ((val) << CP_DRAW_AUTO_0_PRIM_TYPE__SHIFT) & CP_DRAW_AUTO_0_PRIM_TYPE__MASK; -} -#define CP_DRAW_AUTO_0_SOURCE_SELECT__MASK 0x000000c0 -#define CP_DRAW_AUTO_0_SOURCE_SELECT__SHIFT 6 -static inline uint32_t CP_DRAW_AUTO_0_SOURCE_SELECT(enum pc_di_src_sel val) -{ - return ((val) << CP_DRAW_AUTO_0_SOURCE_SELECT__SHIFT) & CP_DRAW_AUTO_0_SOURCE_SELECT__MASK; -} -#define CP_DRAW_AUTO_0_VIS_CULL__MASK 0x00000300 -#define CP_DRAW_AUTO_0_VIS_CULL__SHIFT 8 -static inline uint32_t CP_DRAW_AUTO_0_VIS_CULL(enum pc_di_vis_cull_mode val) -{ - return ((val) << CP_DRAW_AUTO_0_VIS_CULL__SHIFT) & CP_DRAW_AUTO_0_VIS_CULL__MASK; -} -#define CP_DRAW_AUTO_0_INDEX_SIZE__MASK 0x00000c00 -#define CP_DRAW_AUTO_0_INDEX_SIZE__SHIFT 10 -static inline uint32_t CP_DRAW_AUTO_0_INDEX_SIZE(enum a4xx_index_size val) -{ - return ((val) << CP_DRAW_AUTO_0_INDEX_SIZE__SHIFT) & CP_DRAW_AUTO_0_INDEX_SIZE__MASK; -} -#define CP_DRAW_AUTO_0_PATCH_TYPE__MASK 0x00003000 -#define CP_DRAW_AUTO_0_PATCH_TYPE__SHIFT 12 -static inline uint32_t CP_DRAW_AUTO_0_PATCH_TYPE(enum a6xx_patch_type val) -{ - return ((val) << CP_DRAW_AUTO_0_PATCH_TYPE__SHIFT) & CP_DRAW_AUTO_0_PATCH_TYPE__MASK; -} -#define CP_DRAW_AUTO_0_GS_ENABLE 0x00010000 -#define CP_DRAW_AUTO_0_TESS_ENABLE 0x00020000 - -#define REG_CP_DRAW_AUTO_1 0x00000001 -#define CP_DRAW_AUTO_1_NUM_INSTANCES__MASK 0xffffffff -#define CP_DRAW_AUTO_1_NUM_INSTANCES__SHIFT 0 -static inline uint32_t CP_DRAW_AUTO_1_NUM_INSTANCES(uint32_t val) -{ - return ((val) << CP_DRAW_AUTO_1_NUM_INSTANCES__SHIFT) & CP_DRAW_AUTO_1_NUM_INSTANCES__MASK; -} - -#define REG_CP_DRAW_AUTO_NUM_VERTICES_BASE 0x00000002 - -#define REG_CP_DRAW_AUTO_4 0x00000004 -#define CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET__MASK 0xffffffff -#define CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET__SHIFT 0 -static inline uint32_t CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET(uint32_t val) -{ - return ((val) << CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET__SHIFT) & CP_DRAW_AUTO_4_NUM_VERTICES_OFFSET__MASK; -} - -#define REG_CP_DRAW_AUTO_5 0x00000005 -#define CP_DRAW_AUTO_5_STRIDE__MASK 0xffffffff -#define CP_DRAW_AUTO_5_STRIDE__SHIFT 0 -static inline uint32_t CP_DRAW_AUTO_5_STRIDE(uint32_t val) -{ - return ((val) << CP_DRAW_AUTO_5_STRIDE__SHIFT) & CP_DRAW_AUTO_5_STRIDE__MASK; -} - -#define REG_CP_DRAW_PRED_ENABLE_GLOBAL_0 0x00000000 -#define CP_DRAW_PRED_ENABLE_GLOBAL_0_ENABLE 0x00000001 - -#define REG_CP_DRAW_PRED_ENABLE_LOCAL_0 0x00000000 -#define CP_DRAW_PRED_ENABLE_LOCAL_0_ENABLE 0x00000001 - -#define REG_CP_DRAW_PRED_SET_0 0x00000000 -#define CP_DRAW_PRED_SET_0_SRC__MASK 0x000000f0 -#define CP_DRAW_PRED_SET_0_SRC__SHIFT 4 -static inline uint32_t CP_DRAW_PRED_SET_0_SRC(enum cp_draw_pred_src val) -{ - return ((val) << CP_DRAW_PRED_SET_0_SRC__SHIFT) & CP_DRAW_PRED_SET_0_SRC__MASK; -} -#define CP_DRAW_PRED_SET_0_TEST__MASK 0x00000100 -#define CP_DRAW_PRED_SET_0_TEST__SHIFT 8 -static inline uint32_t CP_DRAW_PRED_SET_0_TEST(enum cp_draw_pred_test val) -{ - return ((val) << CP_DRAW_PRED_SET_0_TEST__SHIFT) & CP_DRAW_PRED_SET_0_TEST__MASK; -} - -#define REG_CP_DRAW_PRED_SET_MEM_ADDR 0x00000001 - -#define REG_CP_SET_DRAW_STATE_(i0) (0x00000000 + 0x3*(i0)) - -static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; } -#define CP_SET_DRAW_STATE__0_COUNT__MASK 0x0000ffff -#define CP_SET_DRAW_STATE__0_COUNT__SHIFT 0 -static inline uint32_t CP_SET_DRAW_STATE__0_COUNT(uint32_t val) -{ - return ((val) << CP_SET_DRAW_STATE__0_COUNT__SHIFT) & CP_SET_DRAW_STATE__0_COUNT__MASK; -} -#define CP_SET_DRAW_STATE__0_DIRTY 0x00010000 -#define CP_SET_DRAW_STATE__0_DISABLE 0x00020000 -#define CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS 0x00040000 -#define CP_SET_DRAW_STATE__0_LOAD_IMMED 0x00080000 -#define CP_SET_DRAW_STATE__0_BINNING 0x00100000 -#define CP_SET_DRAW_STATE__0_GMEM 0x00200000 -#define CP_SET_DRAW_STATE__0_SYSMEM 0x00400000 -#define CP_SET_DRAW_STATE__0_GROUP_ID__MASK 0x1f000000 -#define CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT 24 -static inline uint32_t CP_SET_DRAW_STATE__0_GROUP_ID(uint32_t val) -{ - return ((val) << CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE__0_GROUP_ID__MASK; -} - -static inline uint32_t REG_CP_SET_DRAW_STATE__1(uint32_t i0) { return 0x00000001 + 0x3*i0; } -#define CP_SET_DRAW_STATE__1_ADDR_LO__MASK 0xffffffff -#define CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT 0 -static inline uint32_t CP_SET_DRAW_STATE__1_ADDR_LO(uint32_t val) -{ - return ((val) << CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT) & CP_SET_DRAW_STATE__1_ADDR_LO__MASK; -} - -static inline uint32_t REG_CP_SET_DRAW_STATE__2(uint32_t i0) { return 0x00000002 + 0x3*i0; } -#define CP_SET_DRAW_STATE__2_ADDR_HI__MASK 0xffffffff -#define CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT 0 -static inline uint32_t CP_SET_DRAW_STATE__2_ADDR_HI(uint32_t val) -{ - return ((val) << CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT) & CP_SET_DRAW_STATE__2_ADDR_HI__MASK; -} - -#define REG_CP_SET_BIN_0 0x00000000 - -#define REG_CP_SET_BIN_1 0x00000001 -#define CP_SET_BIN_1_X1__MASK 0x0000ffff -#define CP_SET_BIN_1_X1__SHIFT 0 -static inline uint32_t CP_SET_BIN_1_X1(uint32_t val) -{ - return ((val) << CP_SET_BIN_1_X1__SHIFT) & CP_SET_BIN_1_X1__MASK; -} -#define CP_SET_BIN_1_Y1__MASK 0xffff0000 -#define CP_SET_BIN_1_Y1__SHIFT 16 -static inline uint32_t CP_SET_BIN_1_Y1(uint32_t val) -{ - return ((val) << CP_SET_BIN_1_Y1__SHIFT) & CP_SET_BIN_1_Y1__MASK; -} - -#define REG_CP_SET_BIN_2 0x00000002 -#define CP_SET_BIN_2_X2__MASK 0x0000ffff -#define CP_SET_BIN_2_X2__SHIFT 0 -static inline uint32_t CP_SET_BIN_2_X2(uint32_t val) -{ - return ((val) << CP_SET_BIN_2_X2__SHIFT) & CP_SET_BIN_2_X2__MASK; -} -#define CP_SET_BIN_2_Y2__MASK 0xffff0000 -#define CP_SET_BIN_2_Y2__SHIFT 16 -static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val) -{ - return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK; -} - -#define REG_CP_SET_BIN_DATA_0 0x00000000 -#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK 0xffffffff -#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT 0 -static inline uint32_t CP_SET_BIN_DATA_0_BIN_DATA_ADDR(uint32_t val) -{ - return ((val) << CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT) & CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK; -} - -#define REG_CP_SET_BIN_DATA_1 0x00000001 -#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK 0xffffffff -#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT 0 -static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val) -{ - return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK; -} - -#define REG_CP_SET_BIN_DATA5_0 0x00000000 -#define CP_SET_BIN_DATA5_0_VSC_SIZE__MASK 0x003f0000 -#define CP_SET_BIN_DATA5_0_VSC_SIZE__SHIFT 16 -static inline uint32_t CP_SET_BIN_DATA5_0_VSC_SIZE(uint32_t val) -{ - return ((val) << CP_SET_BIN_DATA5_0_VSC_SIZE__SHIFT) & CP_SET_BIN_DATA5_0_VSC_SIZE__MASK; -} -#define CP_SET_BIN_DATA5_0_VSC_N__MASK 0x07c00000 -#define CP_SET_BIN_DATA5_0_VSC_N__SHIFT 22 -static inline uint32_t CP_SET_BIN_DATA5_0_VSC_N(uint32_t val) -{ - return ((val) << CP_SET_BIN_DATA5_0_VSC_N__SHIFT) & CP_SET_BIN_DATA5_0_VSC_N__MASK; -} - -#define REG_CP_SET_BIN_DATA5_1 0x00000001 -#define CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__MASK 0xffffffff -#define CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__SHIFT 0 -static inline uint32_t CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO(uint32_t val) -{ - return ((val) << CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__SHIFT) & CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__MASK; -} - -#define REG_CP_SET_BIN_DATA5_2 0x00000002 -#define CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__MASK 0xffffffff -#define CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__SHIFT 0 -static inline uint32_t CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI(uint32_t val) -{ - return ((val) << CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__SHIFT) & CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__MASK; -} - -#define REG_CP_SET_BIN_DATA5_3 0x00000003 -#define CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__MASK 0xffffffff -#define CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__SHIFT 0 -static inline uint32_t CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO(uint32_t val) -{ - return ((val) << CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__SHIFT) & CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__MASK; -} - -#define REG_CP_SET_BIN_DATA5_4 0x00000004 -#define CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK 0xffffffff -#define CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT 0 -static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val) -{ - return ((val) << CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK; -} - -#define REG_CP_SET_BIN_DATA5_5 0x00000005 -#define CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__MASK 0xffffffff -#define CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__SHIFT 0 -static inline uint32_t CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO(uint32_t val) -{ - return ((val) << CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__SHIFT) & CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__MASK; -} - -#define REG_CP_SET_BIN_DATA5_6 0x00000006 -#define CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK 0xffffffff -#define CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT 0 -static inline uint32_t CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI(uint32_t val) -{ - return ((val) << CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT) & CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK; -} - -#define REG_CP_SET_BIN_DATA5_7 0x00000007 - -#define REG_CP_SET_BIN_DATA5_9 0x00000009 - -#define REG_CP_SET_BIN_DATA5_OFFSET_0 0x00000000 -#define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK 0x003f0000 -#define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT 16 -static inline uint32_t CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE(uint32_t val) -{ - return ((val) << CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT) & CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK; -} -#define CP_SET_BIN_DATA5_OFFSET_0_VSC_N__MASK 0x07c00000 -#define CP_SET_BIN_DATA5_OFFSET_0_VSC_N__SHIFT 22 -static inline uint32_t CP_SET_BIN_DATA5_OFFSET_0_VSC_N(uint32_t val) -{ - return ((val) << CP_SET_BIN_DATA5_OFFSET_0_VSC_N__SHIFT) & CP_SET_BIN_DATA5_OFFSET_0_VSC_N__MASK; -} - -#define REG_CP_SET_BIN_DATA5_OFFSET_1 0x00000001 -#define CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__MASK 0xffffffff -#define CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__SHIFT 0 -static inline uint32_t CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET(uint32_t val) -{ - return ((val) << CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__MASK; -} - -#define REG_CP_SET_BIN_DATA5_OFFSET_2 0x00000002 -#define CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__MASK 0xffffffff -#define CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__SHIFT 0 -static inline uint32_t CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET(uint32_t val) -{ - return ((val) << CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__MASK; -} - -#define REG_CP_SET_BIN_DATA5_OFFSET_3 0x00000003 -#define CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__MASK 0xffffffff -#define CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__SHIFT 0 -static inline uint32_t CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET(uint32_t val) -{ - return ((val) << CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__MASK; -} - -#define REG_CP_REG_RMW_0 0x00000000 -#define CP_REG_RMW_0_DST_REG__MASK 0x0003ffff -#define CP_REG_RMW_0_DST_REG__SHIFT 0 -static inline uint32_t CP_REG_RMW_0_DST_REG(uint32_t val) -{ - return ((val) << CP_REG_RMW_0_DST_REG__SHIFT) & CP_REG_RMW_0_DST_REG__MASK; -} -#define CP_REG_RMW_0_ROTATE__MASK 0x1f000000 -#define CP_REG_RMW_0_ROTATE__SHIFT 24 -static inline uint32_t CP_REG_RMW_0_ROTATE(uint32_t val) -{ - return ((val) << CP_REG_RMW_0_ROTATE__SHIFT) & CP_REG_RMW_0_ROTATE__MASK; -} -#define CP_REG_RMW_0_SRC1_ADD 0x20000000 -#define CP_REG_RMW_0_SRC1_IS_REG 0x40000000 -#define CP_REG_RMW_0_SRC0_IS_REG 0x80000000 - -#define REG_CP_REG_RMW_1 0x00000001 -#define CP_REG_RMW_1_SRC0__MASK 0xffffffff -#define CP_REG_RMW_1_SRC0__SHIFT 0 -static inline uint32_t CP_REG_RMW_1_SRC0(uint32_t val) -{ - return ((val) << CP_REG_RMW_1_SRC0__SHIFT) & CP_REG_RMW_1_SRC0__MASK; -} - -#define REG_CP_REG_RMW_2 0x00000002 -#define CP_REG_RMW_2_SRC1__MASK 0xffffffff -#define CP_REG_RMW_2_SRC1__SHIFT 0 -static inline uint32_t CP_REG_RMW_2_SRC1(uint32_t val) -{ - return ((val) << CP_REG_RMW_2_SRC1__SHIFT) & CP_REG_RMW_2_SRC1__MASK; -} - -#define REG_CP_REG_TO_MEM_0 0x00000000 -#define CP_REG_TO_MEM_0_REG__MASK 0x0003ffff -#define CP_REG_TO_MEM_0_REG__SHIFT 0 -static inline uint32_t CP_REG_TO_MEM_0_REG(uint32_t val) -{ - return ((val) << CP_REG_TO_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_0_REG__MASK; -} -#define CP_REG_TO_MEM_0_CNT__MASK 0x3ffc0000 -#define CP_REG_TO_MEM_0_CNT__SHIFT 18 -static inline uint32_t CP_REG_TO_MEM_0_CNT(uint32_t val) -{ - return ((val) << CP_REG_TO_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_0_CNT__MASK; -} -#define CP_REG_TO_MEM_0_64B 0x40000000 -#define CP_REG_TO_MEM_0_ACCUMULATE 0x80000000 - -#define REG_CP_REG_TO_MEM_1 0x00000001 -#define CP_REG_TO_MEM_1_DEST__MASK 0xffffffff -#define CP_REG_TO_MEM_1_DEST__SHIFT 0 -static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val) -{ - return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK; -} - -#define REG_CP_REG_TO_MEM_2 0x00000002 -#define CP_REG_TO_MEM_2_DEST_HI__MASK 0xffffffff -#define CP_REG_TO_MEM_2_DEST_HI__SHIFT 0 -static inline uint32_t CP_REG_TO_MEM_2_DEST_HI(uint32_t val) -{ - return ((val) << CP_REG_TO_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_2_DEST_HI__MASK; -} - -#define REG_CP_REG_TO_MEM_OFFSET_REG_0 0x00000000 -#define CP_REG_TO_MEM_OFFSET_REG_0_REG__MASK 0x0003ffff -#define CP_REG_TO_MEM_OFFSET_REG_0_REG__SHIFT 0 -static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_0_REG(uint32_t val) -{ - return ((val) << CP_REG_TO_MEM_OFFSET_REG_0_REG__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_0_REG__MASK; -} -#define CP_REG_TO_MEM_OFFSET_REG_0_CNT__MASK 0x3ffc0000 -#define CP_REG_TO_MEM_OFFSET_REG_0_CNT__SHIFT 18 -static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_0_CNT(uint32_t val) -{ - return ((val) << CP_REG_TO_MEM_OFFSET_REG_0_CNT__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_0_CNT__MASK; -} -#define CP_REG_TO_MEM_OFFSET_REG_0_64B 0x40000000 -#define CP_REG_TO_MEM_OFFSET_REG_0_ACCUMULATE 0x80000000 - -#define REG_CP_REG_TO_MEM_OFFSET_REG_1 0x00000001 -#define CP_REG_TO_MEM_OFFSET_REG_1_DEST__MASK 0xffffffff -#define CP_REG_TO_MEM_OFFSET_REG_1_DEST__SHIFT 0 -static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_1_DEST(uint32_t val) -{ - return ((val) << CP_REG_TO_MEM_OFFSET_REG_1_DEST__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_1_DEST__MASK; -} - -#define REG_CP_REG_TO_MEM_OFFSET_REG_2 0x00000002 -#define CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__MASK 0xffffffff -#define CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__SHIFT 0 -static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI(uint32_t val) -{ - return ((val) << CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__MASK; -} - -#define REG_CP_REG_TO_MEM_OFFSET_REG_3 0x00000003 -#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__MASK 0x0003ffff -#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__SHIFT 0 -static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0(uint32_t val) -{ - return ((val) << CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__MASK; -} -#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0_SCRATCH 0x00080000 - -#define REG_CP_REG_TO_MEM_OFFSET_MEM_0 0x00000000 -#define CP_REG_TO_MEM_OFFSET_MEM_0_REG__MASK 0x0003ffff -#define CP_REG_TO_MEM_OFFSET_MEM_0_REG__SHIFT 0 -static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_0_REG(uint32_t val) -{ - return ((val) << CP_REG_TO_MEM_OFFSET_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_0_REG__MASK; -} -#define CP_REG_TO_MEM_OFFSET_MEM_0_CNT__MASK 0x3ffc0000 -#define CP_REG_TO_MEM_OFFSET_MEM_0_CNT__SHIFT 18 -static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_0_CNT(uint32_t val) -{ - return ((val) << CP_REG_TO_MEM_OFFSET_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_0_CNT__MASK; -} -#define CP_REG_TO_MEM_OFFSET_MEM_0_64B 0x40000000 -#define CP_REG_TO_MEM_OFFSET_MEM_0_ACCUMULATE 0x80000000 - -#define REG_CP_REG_TO_MEM_OFFSET_MEM_1 0x00000001 -#define CP_REG_TO_MEM_OFFSET_MEM_1_DEST__MASK 0xffffffff -#define CP_REG_TO_MEM_OFFSET_MEM_1_DEST__SHIFT 0 -static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_1_DEST(uint32_t val) -{ - return ((val) << CP_REG_TO_MEM_OFFSET_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_1_DEST__MASK; -} - -#define REG_CP_REG_TO_MEM_OFFSET_MEM_2 0x00000002 -#define CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__MASK 0xffffffff -#define CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__SHIFT 0 -static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI(uint32_t val) -{ - return ((val) << CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__MASK; -} - -#define REG_CP_REG_TO_MEM_OFFSET_MEM_3 0x00000003 -#define CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__MASK 0xffffffff -#define CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__SHIFT 0 -static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO(uint32_t val) -{ - return ((val) << CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__MASK; -} - -#define REG_CP_REG_TO_MEM_OFFSET_MEM_4 0x00000004 -#define CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__MASK 0xffffffff -#define CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__SHIFT 0 -static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI(uint32_t val) -{ - return ((val) << CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__MASK; -} - -#define REG_CP_MEM_TO_REG_0 0x00000000 -#define CP_MEM_TO_REG_0_REG__MASK 0x0003ffff -#define CP_MEM_TO_REG_0_REG__SHIFT 0 -static inline uint32_t CP_MEM_TO_REG_0_REG(uint32_t val) -{ - return ((val) << CP_MEM_TO_REG_0_REG__SHIFT) & CP_MEM_TO_REG_0_REG__MASK; -} -#define CP_MEM_TO_REG_0_CNT__MASK 0x3ff80000 -#define CP_MEM_TO_REG_0_CNT__SHIFT 19 -static inline uint32_t CP_MEM_TO_REG_0_CNT(uint32_t val) -{ - return ((val) << CP_MEM_TO_REG_0_CNT__SHIFT) & CP_MEM_TO_REG_0_CNT__MASK; -} -#define CP_MEM_TO_REG_0_SHIFT_BY_2 0x40000000 -#define CP_MEM_TO_REG_0_UNK31 0x80000000 - -#define REG_CP_MEM_TO_REG_1 0x00000001 -#define CP_MEM_TO_REG_1_SRC__MASK 0xffffffff -#define CP_MEM_TO_REG_1_SRC__SHIFT 0 -static inline uint32_t CP_MEM_TO_REG_1_SRC(uint32_t val) -{ - return ((val) << CP_MEM_TO_REG_1_SRC__SHIFT) & CP_MEM_TO_REG_1_SRC__MASK; -} - -#define REG_CP_MEM_TO_REG_2 0x00000002 -#define CP_MEM_TO_REG_2_SRC_HI__MASK 0xffffffff -#define CP_MEM_TO_REG_2_SRC_HI__SHIFT 0 -static inline uint32_t CP_MEM_TO_REG_2_SRC_HI(uint32_t val) -{ - return ((val) << CP_MEM_TO_REG_2_SRC_HI__SHIFT) & CP_MEM_TO_REG_2_SRC_HI__MASK; -} - -#define REG_CP_MEM_TO_MEM_0 0x00000000 -#define CP_MEM_TO_MEM_0_NEG_A 0x00000001 -#define CP_MEM_TO_MEM_0_NEG_B 0x00000002 -#define CP_MEM_TO_MEM_0_NEG_C 0x00000004 -#define CP_MEM_TO_MEM_0_DOUBLE 0x20000000 -#define CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES 0x40000000 -#define CP_MEM_TO_MEM_0_UNK31 0x80000000 - -#define REG_CP_MEMCPY_0 0x00000000 -#define CP_MEMCPY_0_DWORDS__MASK 0xffffffff -#define CP_MEMCPY_0_DWORDS__SHIFT 0 -static inline uint32_t CP_MEMCPY_0_DWORDS(uint32_t val) -{ - return ((val) << CP_MEMCPY_0_DWORDS__SHIFT) & CP_MEMCPY_0_DWORDS__MASK; -} - -#define REG_CP_MEMCPY_1 0x00000001 -#define CP_MEMCPY_1_SRC_LO__MASK 0xffffffff -#define CP_MEMCPY_1_SRC_LO__SHIFT 0 -static inline uint32_t CP_MEMCPY_1_SRC_LO(uint32_t val) -{ - return ((val) << CP_MEMCPY_1_SRC_LO__SHIFT) & CP_MEMCPY_1_SRC_LO__MASK; -} - -#define REG_CP_MEMCPY_2 0x00000002 -#define CP_MEMCPY_2_SRC_HI__MASK 0xffffffff -#define CP_MEMCPY_2_SRC_HI__SHIFT 0 -static inline uint32_t CP_MEMCPY_2_SRC_HI(uint32_t val) -{ - return ((val) << CP_MEMCPY_2_SRC_HI__SHIFT) & CP_MEMCPY_2_SRC_HI__MASK; -} - -#define REG_CP_MEMCPY_3 0x00000003 -#define CP_MEMCPY_3_DST_LO__MASK 0xffffffff -#define CP_MEMCPY_3_DST_LO__SHIFT 0 -static inline uint32_t CP_MEMCPY_3_DST_LO(uint32_t val) -{ - return ((val) << CP_MEMCPY_3_DST_LO__SHIFT) & CP_MEMCPY_3_DST_LO__MASK; -} - -#define REG_CP_MEMCPY_4 0x00000004 -#define CP_MEMCPY_4_DST_HI__MASK 0xffffffff -#define CP_MEMCPY_4_DST_HI__SHIFT 0 -static inline uint32_t CP_MEMCPY_4_DST_HI(uint32_t val) -{ - return ((val) << CP_MEMCPY_4_DST_HI__SHIFT) & CP_MEMCPY_4_DST_HI__MASK; -} - -#define REG_CP_REG_TO_SCRATCH_0 0x00000000 -#define CP_REG_TO_SCRATCH_0_REG__MASK 0x0003ffff -#define CP_REG_TO_SCRATCH_0_REG__SHIFT 0 -static inline uint32_t CP_REG_TO_SCRATCH_0_REG(uint32_t val) -{ - return ((val) << CP_REG_TO_SCRATCH_0_REG__SHIFT) & CP_REG_TO_SCRATCH_0_REG__MASK; -} -#define CP_REG_TO_SCRATCH_0_SCRATCH__MASK 0x00700000 -#define CP_REG_TO_SCRATCH_0_SCRATCH__SHIFT 20 -static inline uint32_t CP_REG_TO_SCRATCH_0_SCRATCH(uint32_t val) -{ - return ((val) << CP_REG_TO_SCRATCH_0_SCRATCH__SHIFT) & CP_REG_TO_SCRATCH_0_SCRATCH__MASK; -} -#define CP_REG_TO_SCRATCH_0_CNT__MASK 0x07000000 -#define CP_REG_TO_SCRATCH_0_CNT__SHIFT 24 -static inline uint32_t CP_REG_TO_SCRATCH_0_CNT(uint32_t val) -{ - return ((val) << CP_REG_TO_SCRATCH_0_CNT__SHIFT) & CP_REG_TO_SCRATCH_0_CNT__MASK; -} - -#define REG_CP_SCRATCH_TO_REG_0 0x00000000 -#define CP_SCRATCH_TO_REG_0_REG__MASK 0x0003ffff -#define CP_SCRATCH_TO_REG_0_REG__SHIFT 0 -static inline uint32_t CP_SCRATCH_TO_REG_0_REG(uint32_t val) -{ - return ((val) << CP_SCRATCH_TO_REG_0_REG__SHIFT) & CP_SCRATCH_TO_REG_0_REG__MASK; -} -#define CP_SCRATCH_TO_REG_0_UNK18 0x00040000 -#define CP_SCRATCH_TO_REG_0_SCRATCH__MASK 0x00700000 -#define CP_SCRATCH_TO_REG_0_SCRATCH__SHIFT 20 -static inline uint32_t CP_SCRATCH_TO_REG_0_SCRATCH(uint32_t val) -{ - return ((val) << CP_SCRATCH_TO_REG_0_SCRATCH__SHIFT) & CP_SCRATCH_TO_REG_0_SCRATCH__MASK; -} -#define CP_SCRATCH_TO_REG_0_CNT__MASK 0x07000000 -#define CP_SCRATCH_TO_REG_0_CNT__SHIFT 24 -static inline uint32_t CP_SCRATCH_TO_REG_0_CNT(uint32_t val) -{ - return ((val) << CP_SCRATCH_TO_REG_0_CNT__SHIFT) & CP_SCRATCH_TO_REG_0_CNT__MASK; -} - -#define REG_CP_SCRATCH_WRITE_0 0x00000000 -#define CP_SCRATCH_WRITE_0_SCRATCH__MASK 0x00700000 -#define CP_SCRATCH_WRITE_0_SCRATCH__SHIFT 20 -static inline uint32_t CP_SCRATCH_WRITE_0_SCRATCH(uint32_t val) -{ - return ((val) << CP_SCRATCH_WRITE_0_SCRATCH__SHIFT) & CP_SCRATCH_WRITE_0_SCRATCH__MASK; -} - -#define REG_CP_MEM_WRITE_0 0x00000000 -#define CP_MEM_WRITE_0_ADDR_LO__MASK 0xffffffff -#define CP_MEM_WRITE_0_ADDR_LO__SHIFT 0 -static inline uint32_t CP_MEM_WRITE_0_ADDR_LO(uint32_t val) -{ - return ((val) << CP_MEM_WRITE_0_ADDR_LO__SHIFT) & CP_MEM_WRITE_0_ADDR_LO__MASK; -} - -#define REG_CP_MEM_WRITE_1 0x00000001 -#define CP_MEM_WRITE_1_ADDR_HI__MASK 0xffffffff -#define CP_MEM_WRITE_1_ADDR_HI__SHIFT 0 -static inline uint32_t CP_MEM_WRITE_1_ADDR_HI(uint32_t val) -{ - return ((val) << CP_MEM_WRITE_1_ADDR_HI__SHIFT) & CP_MEM_WRITE_1_ADDR_HI__MASK; -} - -#define REG_CP_COND_WRITE_0 0x00000000 -#define CP_COND_WRITE_0_FUNCTION__MASK 0x00000007 -#define CP_COND_WRITE_0_FUNCTION__SHIFT 0 -static inline uint32_t CP_COND_WRITE_0_FUNCTION(enum cp_cond_function val) -{ - return ((val) << CP_COND_WRITE_0_FUNCTION__SHIFT) & CP_COND_WRITE_0_FUNCTION__MASK; -} -#define CP_COND_WRITE_0_POLL_MEMORY 0x00000010 -#define CP_COND_WRITE_0_WRITE_MEMORY 0x00000100 - -#define REG_CP_COND_WRITE_1 0x00000001 -#define CP_COND_WRITE_1_POLL_ADDR__MASK 0xffffffff -#define CP_COND_WRITE_1_POLL_ADDR__SHIFT 0 -static inline uint32_t CP_COND_WRITE_1_POLL_ADDR(uint32_t val) -{ - return ((val) << CP_COND_WRITE_1_POLL_ADDR__SHIFT) & CP_COND_WRITE_1_POLL_ADDR__MASK; -} - -#define REG_CP_COND_WRITE_2 0x00000002 -#define CP_COND_WRITE_2_REF__MASK 0xffffffff -#define CP_COND_WRITE_2_REF__SHIFT 0 -static inline uint32_t CP_COND_WRITE_2_REF(uint32_t val) -{ - return ((val) << CP_COND_WRITE_2_REF__SHIFT) & CP_COND_WRITE_2_REF__MASK; -} - -#define REG_CP_COND_WRITE_3 0x00000003 -#define CP_COND_WRITE_3_MASK__MASK 0xffffffff -#define CP_COND_WRITE_3_MASK__SHIFT 0 -static inline uint32_t CP_COND_WRITE_3_MASK(uint32_t val) -{ - return ((val) << CP_COND_WRITE_3_MASK__SHIFT) & CP_COND_WRITE_3_MASK__MASK; -} - -#define REG_CP_COND_WRITE_4 0x00000004 -#define CP_COND_WRITE_4_WRITE_ADDR__MASK 0xffffffff -#define CP_COND_WRITE_4_WRITE_ADDR__SHIFT 0 -static inline uint32_t CP_COND_WRITE_4_WRITE_ADDR(uint32_t val) -{ - return ((val) << CP_COND_WRITE_4_WRITE_ADDR__SHIFT) & CP_COND_WRITE_4_WRITE_ADDR__MASK; -} - -#define REG_CP_COND_WRITE_5 0x00000005 -#define CP_COND_WRITE_5_WRITE_DATA__MASK 0xffffffff -#define CP_COND_WRITE_5_WRITE_DATA__SHIFT 0 -static inline uint32_t CP_COND_WRITE_5_WRITE_DATA(uint32_t val) -{ - return ((val) << CP_COND_WRITE_5_WRITE_DATA__SHIFT) & CP_COND_WRITE_5_WRITE_DATA__MASK; -} - -#define REG_CP_COND_WRITE5_0 0x00000000 -#define CP_COND_WRITE5_0_FUNCTION__MASK 0x00000007 -#define CP_COND_WRITE5_0_FUNCTION__SHIFT 0 -static inline uint32_t CP_COND_WRITE5_0_FUNCTION(enum cp_cond_function val) -{ - return ((val) << CP_COND_WRITE5_0_FUNCTION__SHIFT) & CP_COND_WRITE5_0_FUNCTION__MASK; -} -#define CP_COND_WRITE5_0_SIGNED_COMPARE 0x00000008 -#define CP_COND_WRITE5_0_POLL__MASK 0x00000030 -#define CP_COND_WRITE5_0_POLL__SHIFT 4 -static inline uint32_t CP_COND_WRITE5_0_POLL(enum poll_memory_type val) -{ - return ((val) << CP_COND_WRITE5_0_POLL__SHIFT) & CP_COND_WRITE5_0_POLL__MASK; -} -#define CP_COND_WRITE5_0_WRITE_MEMORY 0x00000100 - -#define REG_CP_COND_WRITE5_1 0x00000001 -#define CP_COND_WRITE5_1_POLL_ADDR_LO__MASK 0xffffffff -#define CP_COND_WRITE5_1_POLL_ADDR_LO__SHIFT 0 -static inline uint32_t CP_COND_WRITE5_1_POLL_ADDR_LO(uint32_t val) -{ - return ((val) << CP_COND_WRITE5_1_POLL_ADDR_LO__SHIFT) & CP_COND_WRITE5_1_POLL_ADDR_LO__MASK; -} - -#define REG_CP_COND_WRITE5_2 0x00000002 -#define CP_COND_WRITE5_2_POLL_ADDR_HI__MASK 0xffffffff -#define CP_COND_WRITE5_2_POLL_ADDR_HI__SHIFT 0 -static inline uint32_t CP_COND_WRITE5_2_POLL_ADDR_HI(uint32_t val) -{ - return ((val) << CP_COND_WRITE5_2_POLL_ADDR_HI__SHIFT) & CP_COND_WRITE5_2_POLL_ADDR_HI__MASK; -} - -#define REG_CP_COND_WRITE5_3 0x00000003 -#define CP_COND_WRITE5_3_REF__MASK 0xffffffff -#define CP_COND_WRITE5_3_REF__SHIFT 0 -static inline uint32_t CP_COND_WRITE5_3_REF(uint32_t val) -{ - return ((val) << CP_COND_WRITE5_3_REF__SHIFT) & CP_COND_WRITE5_3_REF__MASK; -} - -#define REG_CP_COND_WRITE5_4 0x00000004 -#define CP_COND_WRITE5_4_MASK__MASK 0xffffffff -#define CP_COND_WRITE5_4_MASK__SHIFT 0 -static inline uint32_t CP_COND_WRITE5_4_MASK(uint32_t val) -{ - return ((val) << CP_COND_WRITE5_4_MASK__SHIFT) & CP_COND_WRITE5_4_MASK__MASK; -} - -#define REG_CP_COND_WRITE5_5 0x00000005 -#define CP_COND_WRITE5_5_WRITE_ADDR_LO__MASK 0xffffffff -#define CP_COND_WRITE5_5_WRITE_ADDR_LO__SHIFT 0 -static inline uint32_t CP_COND_WRITE5_5_WRITE_ADDR_LO(uint32_t val) -{ - return ((val) << CP_COND_WRITE5_5_WRITE_ADDR_LO__SHIFT) & CP_COND_WRITE5_5_WRITE_ADDR_LO__MASK; -} - -#define REG_CP_COND_WRITE5_6 0x00000006 -#define CP_COND_WRITE5_6_WRITE_ADDR_HI__MASK 0xffffffff -#define CP_COND_WRITE5_6_WRITE_ADDR_HI__SHIFT 0 -static inline uint32_t CP_COND_WRITE5_6_WRITE_ADDR_HI(uint32_t val) -{ - return ((val) << CP_COND_WRITE5_6_WRITE_ADDR_HI__SHIFT) & CP_COND_WRITE5_6_WRITE_ADDR_HI__MASK; -} - -#define REG_CP_COND_WRITE5_7 0x00000007 -#define CP_COND_WRITE5_7_WRITE_DATA__MASK 0xffffffff -#define CP_COND_WRITE5_7_WRITE_DATA__SHIFT 0 -static inline uint32_t CP_COND_WRITE5_7_WRITE_DATA(uint32_t val) -{ - return ((val) << CP_COND_WRITE5_7_WRITE_DATA__SHIFT) & CP_COND_WRITE5_7_WRITE_DATA__MASK; -} - -#define REG_CP_WAIT_MEM_GTE_0 0x00000000 -#define CP_WAIT_MEM_GTE_0_RESERVED__MASK 0xffffffff -#define CP_WAIT_MEM_GTE_0_RESERVED__SHIFT 0 -static inline uint32_t CP_WAIT_MEM_GTE_0_RESERVED(uint32_t val) -{ - return ((val) << CP_WAIT_MEM_GTE_0_RESERVED__SHIFT) & CP_WAIT_MEM_GTE_0_RESERVED__MASK; -} - -#define REG_CP_WAIT_MEM_GTE_1 0x00000001 -#define CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__MASK 0xffffffff -#define CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__SHIFT 0 -static inline uint32_t CP_WAIT_MEM_GTE_1_POLL_ADDR_LO(uint32_t val) -{ - return ((val) << CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__SHIFT) & CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__MASK; -} - -#define REG_CP_WAIT_MEM_GTE_2 0x00000002 -#define CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__MASK 0xffffffff -#define CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__SHIFT 0 -static inline uint32_t CP_WAIT_MEM_GTE_2_POLL_ADDR_HI(uint32_t val) -{ - return ((val) << CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__SHIFT) & CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__MASK; -} - -#define REG_CP_WAIT_MEM_GTE_3 0x00000003 -#define CP_WAIT_MEM_GTE_3_REF__MASK 0xffffffff -#define CP_WAIT_MEM_GTE_3_REF__SHIFT 0 -static inline uint32_t CP_WAIT_MEM_GTE_3_REF(uint32_t val) -{ - return ((val) << CP_WAIT_MEM_GTE_3_REF__SHIFT) & CP_WAIT_MEM_GTE_3_REF__MASK; -} - -#define REG_CP_WAIT_REG_MEM_0 0x00000000 -#define CP_WAIT_REG_MEM_0_FUNCTION__MASK 0x00000007 -#define CP_WAIT_REG_MEM_0_FUNCTION__SHIFT 0 -static inline uint32_t CP_WAIT_REG_MEM_0_FUNCTION(enum cp_cond_function val) -{ - return ((val) << CP_WAIT_REG_MEM_0_FUNCTION__SHIFT) & CP_WAIT_REG_MEM_0_FUNCTION__MASK; -} -#define CP_WAIT_REG_MEM_0_SIGNED_COMPARE 0x00000008 -#define CP_WAIT_REG_MEM_0_POLL__MASK 0x00000030 -#define CP_WAIT_REG_MEM_0_POLL__SHIFT 4 -static inline uint32_t CP_WAIT_REG_MEM_0_POLL(enum poll_memory_type val) -{ - return ((val) << CP_WAIT_REG_MEM_0_POLL__SHIFT) & CP_WAIT_REG_MEM_0_POLL__MASK; -} -#define CP_WAIT_REG_MEM_0_WRITE_MEMORY 0x00000100 - -#define REG_CP_WAIT_REG_MEM_1 0x00000001 -#define CP_WAIT_REG_MEM_1_POLL_ADDR_LO__MASK 0xffffffff -#define CP_WAIT_REG_MEM_1_POLL_ADDR_LO__SHIFT 0 -static inline uint32_t CP_WAIT_REG_MEM_1_POLL_ADDR_LO(uint32_t val) -{ - return ((val) << CP_WAIT_REG_MEM_1_POLL_ADDR_LO__SHIFT) & CP_WAIT_REG_MEM_1_POLL_ADDR_LO__MASK; -} - -#define REG_CP_WAIT_REG_MEM_2 0x00000002 -#define CP_WAIT_REG_MEM_2_POLL_ADDR_HI__MASK 0xffffffff -#define CP_WAIT_REG_MEM_2_POLL_ADDR_HI__SHIFT 0 -static inline uint32_t CP_WAIT_REG_MEM_2_POLL_ADDR_HI(uint32_t val) -{ - return ((val) << CP_WAIT_REG_MEM_2_POLL_ADDR_HI__SHIFT) & CP_WAIT_REG_MEM_2_POLL_ADDR_HI__MASK; -} - -#define REG_CP_WAIT_REG_MEM_3 0x00000003 -#define CP_WAIT_REG_MEM_3_REF__MASK 0xffffffff -#define CP_WAIT_REG_MEM_3_REF__SHIFT 0 -static inline uint32_t CP_WAIT_REG_MEM_3_REF(uint32_t val) -{ - return ((val) << CP_WAIT_REG_MEM_3_REF__SHIFT) & CP_WAIT_REG_MEM_3_REF__MASK; -} - -#define REG_CP_WAIT_REG_MEM_4 0x00000004 -#define CP_WAIT_REG_MEM_4_MASK__MASK 0xffffffff -#define CP_WAIT_REG_MEM_4_MASK__SHIFT 0 -static inline uint32_t CP_WAIT_REG_MEM_4_MASK(uint32_t val) -{ - return ((val) << CP_WAIT_REG_MEM_4_MASK__SHIFT) & CP_WAIT_REG_MEM_4_MASK__MASK; -} - -#define REG_CP_WAIT_REG_MEM_5 0x00000005 -#define CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__MASK 0xffffffff -#define CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__SHIFT 0 -static inline uint32_t CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(uint32_t val) -{ - return ((val) << CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__SHIFT) & CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__MASK; -} - -#define REG_CP_WAIT_TWO_REGS_0 0x00000000 -#define CP_WAIT_TWO_REGS_0_REG0__MASK 0x0003ffff -#define CP_WAIT_TWO_REGS_0_REG0__SHIFT 0 -static inline uint32_t CP_WAIT_TWO_REGS_0_REG0(uint32_t val) -{ - return ((val) << CP_WAIT_TWO_REGS_0_REG0__SHIFT) & CP_WAIT_TWO_REGS_0_REG0__MASK; -} - -#define REG_CP_WAIT_TWO_REGS_1 0x00000001 -#define CP_WAIT_TWO_REGS_1_REG1__MASK 0x0003ffff -#define CP_WAIT_TWO_REGS_1_REG1__SHIFT 0 -static inline uint32_t CP_WAIT_TWO_REGS_1_REG1(uint32_t val) -{ - return ((val) << CP_WAIT_TWO_REGS_1_REG1__SHIFT) & CP_WAIT_TWO_REGS_1_REG1__MASK; -} - -#define REG_CP_WAIT_TWO_REGS_2 0x00000002 -#define CP_WAIT_TWO_REGS_2_REF__MASK 0xffffffff -#define CP_WAIT_TWO_REGS_2_REF__SHIFT 0 -static inline uint32_t CP_WAIT_TWO_REGS_2_REF(uint32_t val) -{ - return ((val) << CP_WAIT_TWO_REGS_2_REF__SHIFT) & CP_WAIT_TWO_REGS_2_REF__MASK; -} - -#define REG_CP_DISPATCH_COMPUTE_0 0x00000000 - -#define REG_CP_DISPATCH_COMPUTE_1 0x00000001 -#define CP_DISPATCH_COMPUTE_1_X__MASK 0xffffffff -#define CP_DISPATCH_COMPUTE_1_X__SHIFT 0 -static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val) -{ - return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK; -} - -#define REG_CP_DISPATCH_COMPUTE_2 0x00000002 -#define CP_DISPATCH_COMPUTE_2_Y__MASK 0xffffffff -#define CP_DISPATCH_COMPUTE_2_Y__SHIFT 0 -static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val) -{ - return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK; -} - -#define REG_CP_DISPATCH_COMPUTE_3 0x00000003 -#define CP_DISPATCH_COMPUTE_3_Z__MASK 0xffffffff -#define CP_DISPATCH_COMPUTE_3_Z__SHIFT 0 -static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val) -{ - return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK; -} - -#define REG_CP_SET_RENDER_MODE_0 0x00000000 -#define CP_SET_RENDER_MODE_0_MODE__MASK 0x000001ff -#define CP_SET_RENDER_MODE_0_MODE__SHIFT 0 -static inline uint32_t CP_SET_RENDER_MODE_0_MODE(enum render_mode_cmd val) -{ - return ((val) << CP_SET_RENDER_MODE_0_MODE__SHIFT) & CP_SET_RENDER_MODE_0_MODE__MASK; -} - -#define REG_CP_SET_RENDER_MODE_1 0x00000001 -#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK 0xffffffff -#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT 0 -static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val) -{ - return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK; -} - -#define REG_CP_SET_RENDER_MODE_2 0x00000002 -#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK 0xffffffff -#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT 0 -static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val) -{ - return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK; -} - -#define REG_CP_SET_RENDER_MODE_3 0x00000003 -#define CP_SET_RENDER_MODE_3_VSC_ENABLE 0x00000008 -#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010 - -#define REG_CP_SET_RENDER_MODE_4 0x00000004 - -#define REG_CP_SET_RENDER_MODE_5 0x00000005 -#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK 0xffffffff -#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT 0 -static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val) -{ - return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK; -} - -#define REG_CP_SET_RENDER_MODE_6 0x00000006 -#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK 0xffffffff -#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT 0 -static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val) -{ - return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK; -} - -#define REG_CP_SET_RENDER_MODE_7 0x00000007 -#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK 0xffffffff -#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT 0 -static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val) -{ - return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK; -} - -#define REG_CP_COMPUTE_CHECKPOINT_0 0x00000000 -#define CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__MASK 0xffffffff -#define CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__SHIFT 0 -static inline uint32_t CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO(uint32_t val) -{ - return ((val) << CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__SHIFT) & CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__MASK; -} - -#define REG_CP_COMPUTE_CHECKPOINT_1 0x00000001 -#define CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__MASK 0xffffffff -#define CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__SHIFT 0 -static inline uint32_t CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI(uint32_t val) -{ - return ((val) << CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__MASK; -} - -#define REG_CP_COMPUTE_CHECKPOINT_2 0x00000002 - -#define REG_CP_COMPUTE_CHECKPOINT_3 0x00000003 - -#define REG_CP_COMPUTE_CHECKPOINT_4 0x00000004 -#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK 0xffffffff -#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT 0 -static inline uint32_t CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN(uint32_t val) -{ - return ((val) << CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK; -} - -#define REG_CP_COMPUTE_CHECKPOINT_5 0x00000005 -#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK 0xffffffff -#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT 0 -static inline uint32_t CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO(uint32_t val) -{ - return ((val) << CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT) & CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK; -} - -#define REG_CP_COMPUTE_CHECKPOINT_6 0x00000006 -#define CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK 0xffffffff -#define CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT 0 -static inline uint32_t CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI(uint32_t val) -{ - return ((val) << CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK; -} - -#define REG_CP_COMPUTE_CHECKPOINT_7 0x00000007 - -#define REG_CP_PERFCOUNTER_ACTION_0 0x00000000 - -#define REG_CP_PERFCOUNTER_ACTION_1 0x00000001 -#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK 0xffffffff -#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT 0 -static inline uint32_t CP_PERFCOUNTER_ACTION_1_ADDR_0_LO(uint32_t val) -{ - return ((val) << CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT) & CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK; -} - -#define REG_CP_PERFCOUNTER_ACTION_2 0x00000002 -#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK 0xffffffff -#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT 0 -static inline uint32_t CP_PERFCOUNTER_ACTION_2_ADDR_0_HI(uint32_t val) -{ - return ((val) << CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT) & CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK; -} - -#define REG_CP_EVENT_WRITE_0 0x00000000 -#define CP_EVENT_WRITE_0_EVENT__MASK 0x000000ff -#define CP_EVENT_WRITE_0_EVENT__SHIFT 0 -static inline uint32_t CP_EVENT_WRITE_0_EVENT(enum vgt_event_type val) -{ - return ((val) << CP_EVENT_WRITE_0_EVENT__SHIFT) & CP_EVENT_WRITE_0_EVENT__MASK; -} -#define CP_EVENT_WRITE_0_TIMESTAMP 0x40000000 -#define CP_EVENT_WRITE_0_IRQ 0x80000000 - -#define REG_CP_EVENT_WRITE_1 0x00000001 -#define CP_EVENT_WRITE_1_ADDR_0_LO__MASK 0xffffffff -#define CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT 0 -static inline uint32_t CP_EVENT_WRITE_1_ADDR_0_LO(uint32_t val) -{ - return ((val) << CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT) & CP_EVENT_WRITE_1_ADDR_0_LO__MASK; -} - -#define REG_CP_EVENT_WRITE_2 0x00000002 -#define CP_EVENT_WRITE_2_ADDR_0_HI__MASK 0xffffffff -#define CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT 0 -static inline uint32_t CP_EVENT_WRITE_2_ADDR_0_HI(uint32_t val) -{ - return ((val) << CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT) & CP_EVENT_WRITE_2_ADDR_0_HI__MASK; -} - -#define REG_CP_EVENT_WRITE_3 0x00000003 - -#define REG_CP_EVENT_WRITE7_0 0x00000000 -#define CP_EVENT_WRITE7_0_EVENT__MASK 0x000000ff -#define CP_EVENT_WRITE7_0_EVENT__SHIFT 0 -static inline uint32_t CP_EVENT_WRITE7_0_EVENT(enum vgt_event_type val) -{ - return ((val) << CP_EVENT_WRITE7_0_EVENT__SHIFT) & CP_EVENT_WRITE7_0_EVENT__MASK; -} -#define CP_EVENT_WRITE7_0_WRITE_SAMPLE_COUNT 0x00001000 -#define CP_EVENT_WRITE7_0_SAMPLE_COUNT_END_OFFSET 0x00002000 -#define CP_EVENT_WRITE7_0_WRITE_SAMPLE_COUNT_DIFF 0x00004000 -#define CP_EVENT_WRITE7_0_INC_BV_COUNT 0x00010000 -#define CP_EVENT_WRITE7_0_INC_BR_COUNT 0x00020000 -#define CP_EVENT_WRITE7_0_CLEAR_RENDER_RESOURCE 0x00040000 -#define CP_EVENT_WRITE7_0_CLEAR_LRZ_RESOURCE 0x00080000 -#define CP_EVENT_WRITE7_0_WRITE_SRC__MASK 0x00700000 -#define CP_EVENT_WRITE7_0_WRITE_SRC__SHIFT 20 -static inline uint32_t CP_EVENT_WRITE7_0_WRITE_SRC(enum event_write_src val) -{ - return ((val) << CP_EVENT_WRITE7_0_WRITE_SRC__SHIFT) & CP_EVENT_WRITE7_0_WRITE_SRC__MASK; -} -#define CP_EVENT_WRITE7_0_WRITE_DST__MASK 0x01000000 -#define CP_EVENT_WRITE7_0_WRITE_DST__SHIFT 24 -static inline uint32_t CP_EVENT_WRITE7_0_WRITE_DST(enum event_write_dst val) -{ - return ((val) << CP_EVENT_WRITE7_0_WRITE_DST__SHIFT) & CP_EVENT_WRITE7_0_WRITE_DST__MASK; -} -#define CP_EVENT_WRITE7_0_WRITE_ENABLED 0x08000000 - -#define REG_EV_DST_RAM_CP_EVENT_WRITE7_1 0x00000001 -#define EV_DST_RAM_CP_EVENT_WRITE7_1_ADDR_0_LO__MASK 0xffffffff -#define EV_DST_RAM_CP_EVENT_WRITE7_1_ADDR_0_LO__SHIFT 0 -static inline uint32_t EV_DST_RAM_CP_EVENT_WRITE7_1_ADDR_0_LO(uint32_t val) -{ - return ((val) << EV_DST_RAM_CP_EVENT_WRITE7_1_ADDR_0_LO__SHIFT) & EV_DST_RAM_CP_EVENT_WRITE7_1_ADDR_0_LO__MASK; -} - -#define REG_EV_DST_RAM_CP_EVENT_WRITE7_2 0x00000002 -#define EV_DST_RAM_CP_EVENT_WRITE7_2_ADDR_0_HI__MASK 0xffffffff -#define EV_DST_RAM_CP_EVENT_WRITE7_2_ADDR_0_HI__SHIFT 0 -static inline uint32_t EV_DST_RAM_CP_EVENT_WRITE7_2_ADDR_0_HI(uint32_t val) -{ - return ((val) << EV_DST_RAM_CP_EVENT_WRITE7_2_ADDR_0_HI__SHIFT) & EV_DST_RAM_CP_EVENT_WRITE7_2_ADDR_0_HI__MASK; -} - -#define REG_EV_DST_RAM_CP_EVENT_WRITE7_3 0x00000003 -#define EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0__MASK 0xffffffff -#define EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0__SHIFT 0 -static inline uint32_t EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0(uint32_t val) -{ - return ((val) << EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0__SHIFT) & EV_DST_RAM_CP_EVENT_WRITE7_3_PAYLOAD_0__MASK; -} - -#define REG_EV_DST_RAM_CP_EVENT_WRITE7_4 0x00000004 -#define EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1__MASK 0xffffffff -#define EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1__SHIFT 0 -static inline uint32_t EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1(uint32_t val) -{ - return ((val) << EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1__SHIFT) & EV_DST_RAM_CP_EVENT_WRITE7_4_PAYLOAD_1__MASK; -} - -#define REG_EV_DST_ONCHIP_CP_EVENT_WRITE7_1 0x00000001 -#define EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0__MASK 0xffffffff -#define EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0__SHIFT 0 -static inline uint32_t EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0(uint32_t val) -{ - return ((val) << EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0__SHIFT) & EV_DST_ONCHIP_CP_EVENT_WRITE7_1_ONCHIP_ADDR_0__MASK; -} - -#define REG_EV_DST_ONCHIP_CP_EVENT_WRITE7_3 0x00000003 -#define EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0__MASK 0xffffffff -#define EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0__SHIFT 0 -static inline uint32_t EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0(uint32_t val) -{ - return ((val) << EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0__SHIFT) & EV_DST_ONCHIP_CP_EVENT_WRITE7_3_PAYLOAD_0__MASK; -} - -#define REG_EV_DST_ONCHIP_CP_EVENT_WRITE7_4 0x00000004 -#define EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1__MASK 0xffffffff -#define EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1__SHIFT 0 -static inline uint32_t EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1(uint32_t val) -{ - return ((val) << EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1__SHIFT) & EV_DST_ONCHIP_CP_EVENT_WRITE7_4_PAYLOAD_1__MASK; -} - -#define REG_CP_BLIT_0 0x00000000 -#define CP_BLIT_0_OP__MASK 0x0000000f -#define CP_BLIT_0_OP__SHIFT 0 -static inline uint32_t CP_BLIT_0_OP(enum cp_blit_cmd val) -{ - return ((val) << CP_BLIT_0_OP__SHIFT) & CP_BLIT_0_OP__MASK; -} - -#define REG_CP_BLIT_1 0x00000001 -#define CP_BLIT_1_SRC_X1__MASK 0x00003fff -#define CP_BLIT_1_SRC_X1__SHIFT 0 -static inline uint32_t CP_BLIT_1_SRC_X1(uint32_t val) -{ - return ((val) << CP_BLIT_1_SRC_X1__SHIFT) & CP_BLIT_1_SRC_X1__MASK; -} -#define CP_BLIT_1_SRC_Y1__MASK 0x3fff0000 -#define CP_BLIT_1_SRC_Y1__SHIFT 16 -static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val) -{ - return ((val) << CP_BLIT_1_SRC_Y1__SHIFT) & CP_BLIT_1_SRC_Y1__MASK; -} - -#define REG_CP_BLIT_2 0x00000002 -#define CP_BLIT_2_SRC_X2__MASK 0x00003fff -#define CP_BLIT_2_SRC_X2__SHIFT 0 -static inline uint32_t CP_BLIT_2_SRC_X2(uint32_t val) -{ - return ((val) << CP_BLIT_2_SRC_X2__SHIFT) & CP_BLIT_2_SRC_X2__MASK; -} -#define CP_BLIT_2_SRC_Y2__MASK 0x3fff0000 -#define CP_BLIT_2_SRC_Y2__SHIFT 16 -static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val) -{ - return ((val) << CP_BLIT_2_SRC_Y2__SHIFT) & CP_BLIT_2_SRC_Y2__MASK; -} - -#define REG_CP_BLIT_3 0x00000003 -#define CP_BLIT_3_DST_X1__MASK 0x00003fff -#define CP_BLIT_3_DST_X1__SHIFT 0 -static inline uint32_t CP_BLIT_3_DST_X1(uint32_t val) -{ - return ((val) << CP_BLIT_3_DST_X1__SHIFT) & CP_BLIT_3_DST_X1__MASK; -} -#define CP_BLIT_3_DST_Y1__MASK 0x3fff0000 -#define CP_BLIT_3_DST_Y1__SHIFT 16 -static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val) -{ - return ((val) << CP_BLIT_3_DST_Y1__SHIFT) & CP_BLIT_3_DST_Y1__MASK; -} - -#define REG_CP_BLIT_4 0x00000004 -#define CP_BLIT_4_DST_X2__MASK 0x00003fff -#define CP_BLIT_4_DST_X2__SHIFT 0 -static inline uint32_t CP_BLIT_4_DST_X2(uint32_t val) -{ - return ((val) << CP_BLIT_4_DST_X2__SHIFT) & CP_BLIT_4_DST_X2__MASK; -} -#define CP_BLIT_4_DST_Y2__MASK 0x3fff0000 -#define CP_BLIT_4_DST_Y2__SHIFT 16 -static inline uint32_t CP_BLIT_4_DST_Y2(uint32_t val) -{ - return ((val) << CP_BLIT_4_DST_Y2__SHIFT) & CP_BLIT_4_DST_Y2__MASK; -} - -#define REG_CP_EXEC_CS_0 0x00000000 - -#define REG_CP_EXEC_CS_1 0x00000001 -#define CP_EXEC_CS_1_NGROUPS_X__MASK 0xffffffff -#define CP_EXEC_CS_1_NGROUPS_X__SHIFT 0 -static inline uint32_t CP_EXEC_CS_1_NGROUPS_X(uint32_t val) -{ - return ((val) << CP_EXEC_CS_1_NGROUPS_X__SHIFT) & CP_EXEC_CS_1_NGROUPS_X__MASK; -} - -#define REG_CP_EXEC_CS_2 0x00000002 -#define CP_EXEC_CS_2_NGROUPS_Y__MASK 0xffffffff -#define CP_EXEC_CS_2_NGROUPS_Y__SHIFT 0 -static inline uint32_t CP_EXEC_CS_2_NGROUPS_Y(uint32_t val) -{ - return ((val) << CP_EXEC_CS_2_NGROUPS_Y__SHIFT) & CP_EXEC_CS_2_NGROUPS_Y__MASK; -} - -#define REG_CP_EXEC_CS_3 0x00000003 -#define CP_EXEC_CS_3_NGROUPS_Z__MASK 0xffffffff -#define CP_EXEC_CS_3_NGROUPS_Z__SHIFT 0 -static inline uint32_t CP_EXEC_CS_3_NGROUPS_Z(uint32_t val) -{ - return ((val) << CP_EXEC_CS_3_NGROUPS_Z__SHIFT) & CP_EXEC_CS_3_NGROUPS_Z__MASK; -} - -#define REG_A4XX_CP_EXEC_CS_INDIRECT_0 0x00000000 - -#define REG_A4XX_CP_EXEC_CS_INDIRECT_1 0x00000001 -#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK 0xffffffff -#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT 0 -static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_1_ADDR(uint32_t val) -{ - return ((val) << A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK; -} - -#define REG_A4XX_CP_EXEC_CS_INDIRECT_2 0x00000002 -#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK 0x00000ffc -#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT 2 -static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX(uint32_t val) -{ - return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK; -} -#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK 0x003ff000 -#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT 12 -static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY(uint32_t val) -{ - return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK; -} -#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK 0xffc00000 -#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT 22 -static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ(uint32_t val) -{ - return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK; -} - -#define REG_A5XX_CP_EXEC_CS_INDIRECT_1 0x00000001 -#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK 0xffffffff -#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT 0 -static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO(uint32_t val) -{ - return ((val) << A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK; -} - -#define REG_A5XX_CP_EXEC_CS_INDIRECT_2 0x00000002 -#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK 0xffffffff -#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT 0 -static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI(uint32_t val) -{ - return ((val) << A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK; -} - -#define REG_A5XX_CP_EXEC_CS_INDIRECT_3 0x00000003 -#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK 0x00000ffc -#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT 2 -static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(uint32_t val) -{ - return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK; -} -#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK 0x003ff000 -#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT 12 -static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(uint32_t val) -{ - return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK; -} -#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK 0xffc00000 -#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT 22 -static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(uint32_t val) -{ - return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK; -} - -#define REG_A6XX_CP_SET_MARKER_0 0x00000000 -#define A6XX_CP_SET_MARKER_0_MODE__MASK 0x000001ff -#define A6XX_CP_SET_MARKER_0_MODE__SHIFT 0 -static inline uint32_t A6XX_CP_SET_MARKER_0_MODE(enum a6xx_marker val) -{ - return ((val) << A6XX_CP_SET_MARKER_0_MODE__SHIFT) & A6XX_CP_SET_MARKER_0_MODE__MASK; -} -#define A6XX_CP_SET_MARKER_0_MARKER__MASK 0x0000000f -#define A6XX_CP_SET_MARKER_0_MARKER__SHIFT 0 -static inline uint32_t A6XX_CP_SET_MARKER_0_MARKER(enum a6xx_marker val) -{ - return ((val) << A6XX_CP_SET_MARKER_0_MARKER__SHIFT) & A6XX_CP_SET_MARKER_0_MARKER__MASK; -} - -#define REG_A6XX_CP_SET_PSEUDO_REG_(i0) (0x00000000 + 0x3*(i0)) - -static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__0(uint32_t i0) { return 0x00000000 + 0x3*i0; } -#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK 0x000007ff -#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT 0 -static inline uint32_t A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG(enum pseudo_reg val) -{ - return ((val) << A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT) & A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK; -} - -static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__1(uint32_t i0) { return 0x00000001 + 0x3*i0; } -#define A6XX_CP_SET_PSEUDO_REG__1_LO__MASK 0xffffffff -#define A6XX_CP_SET_PSEUDO_REG__1_LO__SHIFT 0 -static inline uint32_t A6XX_CP_SET_PSEUDO_REG__1_LO(uint32_t val) -{ - return ((val) << A6XX_CP_SET_PSEUDO_REG__1_LO__SHIFT) & A6XX_CP_SET_PSEUDO_REG__1_LO__MASK; -} - -static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__2(uint32_t i0) { return 0x00000002 + 0x3*i0; } -#define A6XX_CP_SET_PSEUDO_REG__2_HI__MASK 0xffffffff -#define A6XX_CP_SET_PSEUDO_REG__2_HI__SHIFT 0 -static inline uint32_t A6XX_CP_SET_PSEUDO_REG__2_HI(uint32_t val) -{ - return ((val) << A6XX_CP_SET_PSEUDO_REG__2_HI__SHIFT) & A6XX_CP_SET_PSEUDO_REG__2_HI__MASK; -} - -#define REG_A6XX_CP_REG_TEST_0 0x00000000 -#define A6XX_CP_REG_TEST_0_REG__MASK 0x0003ffff -#define A6XX_CP_REG_TEST_0_REG__SHIFT 0 -static inline uint32_t A6XX_CP_REG_TEST_0_REG(uint32_t val) -{ - return ((val) << A6XX_CP_REG_TEST_0_REG__SHIFT) & A6XX_CP_REG_TEST_0_REG__MASK; -} -#define A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET__MASK 0x0003ffff -#define A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET__SHIFT 0 -static inline uint32_t A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET(uint32_t val) -{ - return ((val) << A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET__SHIFT) & A6XX_CP_REG_TEST_0_SCRATCH_MEM_OFFSET__MASK; -} -#define A6XX_CP_REG_TEST_0_SOURCE__MASK 0x00040000 -#define A6XX_CP_REG_TEST_0_SOURCE__SHIFT 18 -static inline uint32_t A6XX_CP_REG_TEST_0_SOURCE(enum source_type val) -{ - return ((val) << A6XX_CP_REG_TEST_0_SOURCE__SHIFT) & A6XX_CP_REG_TEST_0_SOURCE__MASK; -} -#define A6XX_CP_REG_TEST_0_BIT__MASK 0x01f00000 -#define A6XX_CP_REG_TEST_0_BIT__SHIFT 20 -static inline uint32_t A6XX_CP_REG_TEST_0_BIT(uint32_t val) -{ - return ((val) << A6XX_CP_REG_TEST_0_BIT__SHIFT) & A6XX_CP_REG_TEST_0_BIT__MASK; -} -#define A6XX_CP_REG_TEST_0_SKIP_WAIT_FOR_ME 0x02000000 -#define A6XX_CP_REG_TEST_0_PRED_BIT__MASK 0x7c000000 -#define A6XX_CP_REG_TEST_0_PRED_BIT__SHIFT 26 -static inline uint32_t A6XX_CP_REG_TEST_0_PRED_BIT(uint32_t val) -{ - return ((val) << A6XX_CP_REG_TEST_0_PRED_BIT__SHIFT) & A6XX_CP_REG_TEST_0_PRED_BIT__MASK; -} -#define A6XX_CP_REG_TEST_0_PRED_UPDATE 0x80000000 - -#define REG_A6XX_CP_REG_TEST_PRED_MASK 0x00000001 - -#define REG_A6XX_CP_REG_TEST_PRED_VAL 0x00000002 - -#define REG_CP_COND_REG_EXEC_0 0x00000000 -#define CP_COND_REG_EXEC_0_REG0__MASK 0x0003ffff -#define CP_COND_REG_EXEC_0_REG0__SHIFT 0 -static inline uint32_t CP_COND_REG_EXEC_0_REG0(uint32_t val) -{ - return ((val) << CP_COND_REG_EXEC_0_REG0__SHIFT) & CP_COND_REG_EXEC_0_REG0__MASK; -} -#define CP_COND_REG_EXEC_0_PRED_BIT__MASK 0x007c0000 -#define CP_COND_REG_EXEC_0_PRED_BIT__SHIFT 18 -static inline uint32_t CP_COND_REG_EXEC_0_PRED_BIT(uint32_t val) -{ - return ((val) << CP_COND_REG_EXEC_0_PRED_BIT__SHIFT) & CP_COND_REG_EXEC_0_PRED_BIT__MASK; -} -#define CP_COND_REG_EXEC_0_SKIP_WAIT_FOR_ME 0x00800000 -#define CP_COND_REG_EXEC_0_ONCHIP_MEM 0x01000000 -#define CP_COND_REG_EXEC_0_BINNING 0x02000000 -#define CP_COND_REG_EXEC_0_GMEM 0x04000000 -#define CP_COND_REG_EXEC_0_SYSMEM 0x08000000 -#define CP_COND_REG_EXEC_0_BV 0x02000000 -#define CP_COND_REG_EXEC_0_BR 0x04000000 -#define CP_COND_REG_EXEC_0_LPAC 0x08000000 -#define CP_COND_REG_EXEC_0_MODE__MASK 0xf0000000 -#define CP_COND_REG_EXEC_0_MODE__SHIFT 28 -static inline uint32_t CP_COND_REG_EXEC_0_MODE(enum compare_mode val) -{ - return ((val) << CP_COND_REG_EXEC_0_MODE__SHIFT) & CP_COND_REG_EXEC_0_MODE__MASK; -} - -#define REG_PRED_TEST_CP_COND_REG_EXEC_1 0x00000001 -#define PRED_TEST_CP_COND_REG_EXEC_1_DWORDS__MASK 0x00ffffff -#define PRED_TEST_CP_COND_REG_EXEC_1_DWORDS__SHIFT 0 -static inline uint32_t PRED_TEST_CP_COND_REG_EXEC_1_DWORDS(uint32_t val) -{ - return ((val) << PRED_TEST_CP_COND_REG_EXEC_1_DWORDS__SHIFT) & PRED_TEST_CP_COND_REG_EXEC_1_DWORDS__MASK; -} - -#define REG_REG_COMPARE_CP_COND_REG_EXEC_1 0x00000001 -#define REG_COMPARE_CP_COND_REG_EXEC_1_REG1__MASK 0x0003ffff -#define REG_COMPARE_CP_COND_REG_EXEC_1_REG1__SHIFT 0 -static inline uint32_t REG_COMPARE_CP_COND_REG_EXEC_1_REG1(uint32_t val) -{ - return ((val) << REG_COMPARE_CP_COND_REG_EXEC_1_REG1__SHIFT) & REG_COMPARE_CP_COND_REG_EXEC_1_REG1__MASK; -} -#define REG_COMPARE_CP_COND_REG_EXEC_1_ONCHIP_MEM 0x01000000 - -#define REG_RENDER_MODE_CP_COND_REG_EXEC_1 0x00000001 -#define RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS__MASK 0x00ffffff -#define RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS__SHIFT 0 -static inline uint32_t RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS(uint32_t val) -{ - return ((val) << RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS__SHIFT) & RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS__MASK; -} - -#define REG_REG_COMPARE_IMM_CP_COND_REG_EXEC_1 0x00000001 -#define REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM__MASK 0xffffffff -#define REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM__SHIFT 0 -static inline uint32_t REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM(uint32_t val) -{ - return ((val) << REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM__SHIFT) & REG_COMPARE_IMM_CP_COND_REG_EXEC_1_IMM__MASK; -} - -#define REG_THREAD_MODE_CP_COND_REG_EXEC_1 0x00000001 -#define THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS__MASK 0x00ffffff -#define THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS__SHIFT 0 -static inline uint32_t THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS(uint32_t val) -{ - return ((val) << THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS__SHIFT) & THREAD_MODE_CP_COND_REG_EXEC_1_DWORDS__MASK; -} - -#define REG_CP_COND_REG_EXEC_2 0x00000002 -#define CP_COND_REG_EXEC_2_DWORDS__MASK 0x00ffffff -#define CP_COND_REG_EXEC_2_DWORDS__SHIFT 0 -static inline uint32_t CP_COND_REG_EXEC_2_DWORDS(uint32_t val) -{ - return ((val) << CP_COND_REG_EXEC_2_DWORDS__SHIFT) & CP_COND_REG_EXEC_2_DWORDS__MASK; -} - -#define REG_CP_COND_EXEC_0 0x00000000 -#define CP_COND_EXEC_0_ADDR0_LO__MASK 0xffffffff -#define CP_COND_EXEC_0_ADDR0_LO__SHIFT 0 -static inline uint32_t CP_COND_EXEC_0_ADDR0_LO(uint32_t val) -{ - return ((val) << CP_COND_EXEC_0_ADDR0_LO__SHIFT) & CP_COND_EXEC_0_ADDR0_LO__MASK; -} - -#define REG_CP_COND_EXEC_1 0x00000001 -#define CP_COND_EXEC_1_ADDR0_HI__MASK 0xffffffff -#define CP_COND_EXEC_1_ADDR0_HI__SHIFT 0 -static inline uint32_t CP_COND_EXEC_1_ADDR0_HI(uint32_t val) -{ - return ((val) << CP_COND_EXEC_1_ADDR0_HI__SHIFT) & CP_COND_EXEC_1_ADDR0_HI__MASK; -} - -#define REG_CP_COND_EXEC_2 0x00000002 -#define CP_COND_EXEC_2_ADDR1_LO__MASK 0xffffffff -#define CP_COND_EXEC_2_ADDR1_LO__SHIFT 0 -static inline uint32_t CP_COND_EXEC_2_ADDR1_LO(uint32_t val) -{ - return ((val) << CP_COND_EXEC_2_ADDR1_LO__SHIFT) & CP_COND_EXEC_2_ADDR1_LO__MASK; -} - -#define REG_CP_COND_EXEC_3 0x00000003 -#define CP_COND_EXEC_3_ADDR1_HI__MASK 0xffffffff -#define CP_COND_EXEC_3_ADDR1_HI__SHIFT 0 -static inline uint32_t CP_COND_EXEC_3_ADDR1_HI(uint32_t val) -{ - return ((val) << CP_COND_EXEC_3_ADDR1_HI__SHIFT) & CP_COND_EXEC_3_ADDR1_HI__MASK; -} - -#define REG_CP_COND_EXEC_4 0x00000004 -#define CP_COND_EXEC_4_REF__MASK 0xffffffff -#define CP_COND_EXEC_4_REF__SHIFT 0 -static inline uint32_t CP_COND_EXEC_4_REF(uint32_t val) -{ - return ((val) << CP_COND_EXEC_4_REF__SHIFT) & CP_COND_EXEC_4_REF__MASK; -} - -#define REG_CP_COND_EXEC_5 0x00000005 -#define CP_COND_EXEC_5_DWORDS__MASK 0xffffffff -#define CP_COND_EXEC_5_DWORDS__SHIFT 0 -static inline uint32_t CP_COND_EXEC_5_DWORDS(uint32_t val) -{ - return ((val) << CP_COND_EXEC_5_DWORDS__SHIFT) & CP_COND_EXEC_5_DWORDS__MASK; -} - -#define REG_CP_SET_CTXSWITCH_IB_0 0x00000000 -#define CP_SET_CTXSWITCH_IB_0_ADDR_LO__MASK 0xffffffff -#define CP_SET_CTXSWITCH_IB_0_ADDR_LO__SHIFT 0 -static inline uint32_t CP_SET_CTXSWITCH_IB_0_ADDR_LO(uint32_t val) -{ - return ((val) << CP_SET_CTXSWITCH_IB_0_ADDR_LO__SHIFT) & CP_SET_CTXSWITCH_IB_0_ADDR_LO__MASK; -} - -#define REG_CP_SET_CTXSWITCH_IB_1 0x00000001 -#define CP_SET_CTXSWITCH_IB_1_ADDR_HI__MASK 0xffffffff -#define CP_SET_CTXSWITCH_IB_1_ADDR_HI__SHIFT 0 -static inline uint32_t CP_SET_CTXSWITCH_IB_1_ADDR_HI(uint32_t val) -{ - return ((val) << CP_SET_CTXSWITCH_IB_1_ADDR_HI__SHIFT) & CP_SET_CTXSWITCH_IB_1_ADDR_HI__MASK; -} - -#define REG_CP_SET_CTXSWITCH_IB_2 0x00000002 -#define CP_SET_CTXSWITCH_IB_2_DWORDS__MASK 0x000fffff -#define CP_SET_CTXSWITCH_IB_2_DWORDS__SHIFT 0 -static inline uint32_t CP_SET_CTXSWITCH_IB_2_DWORDS(uint32_t val) -{ - return ((val) << CP_SET_CTXSWITCH_IB_2_DWORDS__SHIFT) & CP_SET_CTXSWITCH_IB_2_DWORDS__MASK; -} -#define CP_SET_CTXSWITCH_IB_2_TYPE__MASK 0x00300000 -#define CP_SET_CTXSWITCH_IB_2_TYPE__SHIFT 20 -static inline uint32_t CP_SET_CTXSWITCH_IB_2_TYPE(enum ctxswitch_ib val) -{ - return ((val) << CP_SET_CTXSWITCH_IB_2_TYPE__SHIFT) & CP_SET_CTXSWITCH_IB_2_TYPE__MASK; -} - -#define REG_CP_REG_WRITE_0 0x00000000 -#define CP_REG_WRITE_0_TRACKER__MASK 0x0000000f -#define CP_REG_WRITE_0_TRACKER__SHIFT 0 -static inline uint32_t CP_REG_WRITE_0_TRACKER(enum reg_tracker val) -{ - return ((val) << CP_REG_WRITE_0_TRACKER__SHIFT) & CP_REG_WRITE_0_TRACKER__MASK; -} - -#define REG_CP_REG_WRITE_1 0x00000001 - -#define REG_CP_REG_WRITE_2 0x00000002 - -#define REG_CP_SMMU_TABLE_UPDATE_0 0x00000000 -#define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK 0xffffffff -#define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT 0 -static inline uint32_t CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(uint32_t val) -{ - return ((val) << CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT) & CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK; -} - -#define REG_CP_SMMU_TABLE_UPDATE_1 0x00000001 -#define CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__MASK 0x0000ffff -#define CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__SHIFT 0 -static inline uint32_t CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(uint32_t val) -{ - return ((val) << CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__SHIFT) & CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__MASK; -} -#define CP_SMMU_TABLE_UPDATE_1_ASID__MASK 0xffff0000 -#define CP_SMMU_TABLE_UPDATE_1_ASID__SHIFT 16 -static inline uint32_t CP_SMMU_TABLE_UPDATE_1_ASID(uint32_t val) -{ - return ((val) << CP_SMMU_TABLE_UPDATE_1_ASID__SHIFT) & CP_SMMU_TABLE_UPDATE_1_ASID__MASK; -} - -#define REG_CP_SMMU_TABLE_UPDATE_2 0x00000002 -#define CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__MASK 0xffffffff -#define CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__SHIFT 0 -static inline uint32_t CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(uint32_t val) -{ - return ((val) << CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__SHIFT) & CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__MASK; -} - -#define REG_CP_SMMU_TABLE_UPDATE_3 0x00000003 -#define CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__MASK 0xffffffff -#define CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__SHIFT 0 -static inline uint32_t CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(uint32_t val) -{ - return ((val) << CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__SHIFT) & CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__MASK; -} - -#define REG_CP_START_BIN_BIN_COUNT 0x00000000 - -#define REG_CP_START_BIN_PREFIX_ADDR 0x00000001 - -#define REG_CP_START_BIN_PREFIX_DWORDS 0x00000003 - -#define REG_CP_START_BIN_BODY_DWORDS 0x00000004 - -#define REG_CP_WAIT_TIMESTAMP_0 0x00000000 -#define CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC__MASK 0x00000003 -#define CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC__SHIFT 0 -static inline uint32_t CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC(enum ts_wait_value_src val) -{ - return ((val) << CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC__SHIFT) & CP_WAIT_TIMESTAMP_0_WAIT_VALUE_SRC__MASK; -} -#define CP_WAIT_TIMESTAMP_0_WAIT_DST__MASK 0x00000010 -#define CP_WAIT_TIMESTAMP_0_WAIT_DST__SHIFT 4 -static inline uint32_t CP_WAIT_TIMESTAMP_0_WAIT_DST(enum ts_wait_type val) -{ - return ((val) << CP_WAIT_TIMESTAMP_0_WAIT_DST__SHIFT) & CP_WAIT_TIMESTAMP_0_WAIT_DST__MASK; -} - -#define REG_TS_WAIT_RAM_CP_WAIT_TIMESTAMP_ADDR 0x00000001 - -#define REG_TS_WAIT_ONCHIP_CP_WAIT_TIMESTAMP_ONCHIP_ADDR_0 0x00000001 - -#define REG_CP_WAIT_TIMESTAMP_SRC_0 0x00000003 - -#define REG_CP_WAIT_TIMESTAMP_SRC_1 0x00000004 - -#define REG_CP_BV_BR_COUNT_OPS_0 0x00000000 -#define CP_BV_BR_COUNT_OPS_0_OP__MASK 0x0000000f -#define CP_BV_BR_COUNT_OPS_0_OP__SHIFT 0 -static inline uint32_t CP_BV_BR_COUNT_OPS_0_OP(enum pipe_count_op val) -{ - return ((val) << CP_BV_BR_COUNT_OPS_0_OP__SHIFT) & CP_BV_BR_COUNT_OPS_0_OP__MASK; -} - -#define REG_CP_BV_BR_COUNT_OPS_1 0x00000001 -#define CP_BV_BR_COUNT_OPS_1_BR_OFFSET__MASK 0x0000ffff -#define CP_BV_BR_COUNT_OPS_1_BR_OFFSET__SHIFT 0 -static inline uint32_t CP_BV_BR_COUNT_OPS_1_BR_OFFSET(uint32_t val) -{ - return ((val) << CP_BV_BR_COUNT_OPS_1_BR_OFFSET__SHIFT) & CP_BV_BR_COUNT_OPS_1_BR_OFFSET__MASK; -} - -#define REG_CP_MODIFY_TIMESTAMP_0 0x00000000 -#define CP_MODIFY_TIMESTAMP_0_ADD__MASK 0x000000ff -#define CP_MODIFY_TIMESTAMP_0_ADD__SHIFT 0 -static inline uint32_t CP_MODIFY_TIMESTAMP_0_ADD(uint32_t val) -{ - return ((val) << CP_MODIFY_TIMESTAMP_0_ADD__SHIFT) & CP_MODIFY_TIMESTAMP_0_ADD__MASK; -} -#define CP_MODIFY_TIMESTAMP_0_OP__MASK 0xf0000000 -#define CP_MODIFY_TIMESTAMP_0_OP__SHIFT 28 -static inline uint32_t CP_MODIFY_TIMESTAMP_0_OP(enum timestamp_op val) -{ - return ((val) << CP_MODIFY_TIMESTAMP_0_OP__SHIFT) & CP_MODIFY_TIMESTAMP_0_OP__MASK; -} - -#define REG_CP_MEM_TO_SCRATCH_MEM_0 0x00000000 -#define CP_MEM_TO_SCRATCH_MEM_0_CNT__MASK 0x0000003f -#define CP_MEM_TO_SCRATCH_MEM_0_CNT__SHIFT 0 -static inline uint32_t CP_MEM_TO_SCRATCH_MEM_0_CNT(uint32_t val) -{ - return ((val) << CP_MEM_TO_SCRATCH_MEM_0_CNT__SHIFT) & CP_MEM_TO_SCRATCH_MEM_0_CNT__MASK; -} - -#define REG_CP_MEM_TO_SCRATCH_MEM_1 0x00000001 -#define CP_MEM_TO_SCRATCH_MEM_1_OFFSET__MASK 0x0000003f -#define CP_MEM_TO_SCRATCH_MEM_1_OFFSET__SHIFT 0 -static inline uint32_t CP_MEM_TO_SCRATCH_MEM_1_OFFSET(uint32_t val) -{ - return ((val) << CP_MEM_TO_SCRATCH_MEM_1_OFFSET__SHIFT) & CP_MEM_TO_SCRATCH_MEM_1_OFFSET__MASK; -} - -#define REG_CP_MEM_TO_SCRATCH_MEM_2 0x00000002 -#define CP_MEM_TO_SCRATCH_MEM_2_SRC__MASK 0xffffffff -#define CP_MEM_TO_SCRATCH_MEM_2_SRC__SHIFT 0 -static inline uint32_t CP_MEM_TO_SCRATCH_MEM_2_SRC(uint32_t val) -{ - return ((val) << CP_MEM_TO_SCRATCH_MEM_2_SRC__SHIFT) & CP_MEM_TO_SCRATCH_MEM_2_SRC__MASK; -} - -#define REG_CP_MEM_TO_SCRATCH_MEM_3 0x00000003 -#define CP_MEM_TO_SCRATCH_MEM_3_SRC_HI__MASK 0xffffffff -#define CP_MEM_TO_SCRATCH_MEM_3_SRC_HI__SHIFT 0 -static inline uint32_t CP_MEM_TO_SCRATCH_MEM_3_SRC_HI(uint32_t val) -{ - return ((val) << CP_MEM_TO_SCRATCH_MEM_3_SRC_HI__SHIFT) & CP_MEM_TO_SCRATCH_MEM_3_SRC_HI__MASK; -} - -#define REG_CP_THREAD_CONTROL_0 0x00000000 -#define CP_THREAD_CONTROL_0_THREAD__MASK 0x00000003 -#define CP_THREAD_CONTROL_0_THREAD__SHIFT 0 -static inline uint32_t CP_THREAD_CONTROL_0_THREAD(enum cp_thread val) -{ - return ((val) << CP_THREAD_CONTROL_0_THREAD__SHIFT) & CP_THREAD_CONTROL_0_THREAD__MASK; -} -#define CP_THREAD_CONTROL_0_CONCURRENT_BIN_DISABLE 0x08000000 -#define CP_THREAD_CONTROL_0_SYNC_THREADS 0x80000000 - -#define REG_CP_FIXED_STRIDE_DRAW_TABLE_IB_BASE 0x00000000 - -#define REG_CP_FIXED_STRIDE_DRAW_TABLE_2 0x00000002 -#define CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE__MASK 0x00000fff -#define CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE__SHIFT 0 -static inline uint32_t CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE(uint32_t val) -{ - return ((val) << CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE__SHIFT) & CP_FIXED_STRIDE_DRAW_TABLE_2_IB_SIZE__MASK; -} -#define CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE__MASK 0xfff00000 -#define CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE__SHIFT 20 -static inline uint32_t CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE(uint32_t val) -{ - return ((val) << CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE__SHIFT) & CP_FIXED_STRIDE_DRAW_TABLE_2_STRIDE__MASK; -} - -#define REG_CP_FIXED_STRIDE_DRAW_TABLE_3 0x00000003 -#define CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT__MASK 0xffffffff -#define CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT__SHIFT 0 -static inline uint32_t CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT(uint32_t val) -{ - return ((val) << CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT__SHIFT) & CP_FIXED_STRIDE_DRAW_TABLE_3_COUNT__MASK; -} - -#define REG_CP_RESET_CONTEXT_STATE_0 0x00000000 -#define CP_RESET_CONTEXT_STATE_0_CLEAR_ON_CHIP_TS 0x00000001 -#define CP_RESET_CONTEXT_STATE_0_CLEAR_RESOURCE_TABLE 0x00000002 -#define CP_RESET_CONTEXT_STATE_0_CLEAR_GLOBAL_LOCAL_TS 0x00000004 - -#ifdef __cplusplus -#endif - -#endif /* ADRENO_PM4_XML */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 88c2e51ab1..9f21647828 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -320,7 +320,7 @@ static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc, } static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, - struct dpu_plane_state *pstate, struct dpu_format *format) + struct dpu_plane_state *pstate, const struct msm_format *format) { struct dpu_hw_mixer *lm = mixer->hw_lm; uint32_t blend_op; @@ -363,7 +363,7 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, fg_alpha, bg_alpha, blend_op); DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n", - &format->base.pixel_format, format->alpha_enable, blend_op); + &format->pixel_format, format->alpha_enable, blend_op); } static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) @@ -395,7 +395,7 @@ static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc, struct dpu_crtc_mixer *mixer, u32 num_mixers, enum dpu_stage stage, - struct dpu_format *format, + const struct msm_format *format, uint64_t modifier, struct dpu_sw_pipe *pipe, unsigned int stage_idx, @@ -412,7 +412,7 @@ static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc, trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane), state, to_dpu_plane_state(state), stage_idx, - format->base.pixel_format, + format->pixel_format, modifier); DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d multirect_idx %d\n", @@ -440,7 +440,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, struct drm_plane_state *state; struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); struct dpu_plane_state *pstate = NULL; - struct dpu_format *format; + const struct msm_format *format; struct dpu_hw_ctl *ctl = mixer->lm_ctl; uint32_t lm_idx; @@ -459,7 +459,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, pstate = to_dpu_plane_state(state); fb = state->fb; - format = to_dpu_format(msm_framebuffer_format(pstate->base.fb)); + format = msm_framebuffer_format(pstate->base.fb); if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) bg_alpha_enable = true; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 9a14d2232e..697ad4a640 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -428,7 +428,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, return -EWOULDBLOCK; } - if (irq_idx < 0) { + if (irq_idx == 0) { DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n", DRMID(phys_enc->parent), func); return 0; @@ -675,7 +675,7 @@ static int dpu_encoder_virt_atomic_check( if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) { fb = conn_state->writeback_job->fb; - if (fb && DPU_FORMAT_IS_YUV(to_dpu_format(msm_framebuffer_format(fb)))) + if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) topology.needs_cdm = true; } else if (disp_info->intf_type == INTF_DP) { if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode)) @@ -1200,6 +1200,8 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]); phys->cached_mode = crtc_state->adjusted_mode; + if (phys->ops.atomic_mode_set) + phys->ops.atomic_mode_set(phys, crtc_state, conn_state); } } @@ -1741,8 +1743,7 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) phys = dpu_enc->phys_encs[i]; ctl = phys->hw_ctl; - if (ctl->ops.clear_pending_flush) - ctl->ops.clear_pending_flush(ctl); + ctl->ops.clear_pending_flush(ctl); /* update only for command mode primary ctl */ if ((phys == dpu_enc->cur_master) && @@ -2184,7 +2185,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) } void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc, - const struct dpu_format *dpu_fmt, + const struct msm_format *dpu_fmt, u32 output_type) { struct dpu_hw_cdm *hw_cdm; @@ -2202,9 +2203,9 @@ void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc, if (!hw_cdm) return; - if (!DPU_FORMAT_IS_YUV(dpu_fmt)) { - DPU_DEBUG("[enc:%d] cdm_disable fmt:%x\n", DRMID(phys_enc->parent), - dpu_fmt->base.pixel_format); + if (!MSM_FORMAT_IS_YUV(dpu_fmt)) { + DPU_DEBUG("[enc:%d] cdm_disable fmt:%p4cc\n", DRMID(phys_enc->parent), + &dpu_fmt->pixel_format); if (hw_cdm->ops.bind_pingpong_blk) hw_cdm->ops.bind_pingpong_blk(hw_cdm, PINGPONG_NONE); @@ -2217,25 +2218,25 @@ void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc, cdm_cfg->output_height = phys_enc->cached_mode.vdisplay; cdm_cfg->output_fmt = dpu_fmt; cdm_cfg->output_type = output_type; - cdm_cfg->output_bit_depth = DPU_FORMAT_IS_DX(dpu_fmt) ? + cdm_cfg->output_bit_depth = MSM_FORMAT_IS_DX(dpu_fmt) ? CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT; cdm_cfg->csc_cfg = &dpu_csc10_rgb2yuv_601l; /* enable 10 bit logic */ switch (cdm_cfg->output_fmt->chroma_sample) { - case DPU_CHROMA_RGB: + case CHROMA_FULL: cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; break; - case DPU_CHROMA_H2V1: + case CHROMA_H2V1: cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; break; - case DPU_CHROMA_420: + case CHROMA_420: cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE; break; - case DPU_CHROMA_H1V2: + case CHROMA_H1V2: default: DPU_ERROR("[enc:%d] unsupported chroma sampling type\n", DRMID(phys_enc->parent)); @@ -2244,9 +2245,9 @@ void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc, break; } - DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n", + DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%p4cc,%d,%d,%d,%d]\n", DRMID(phys_enc->parent), cdm_cfg->output_width, - cdm_cfg->output_height, cdm_cfg->output_fmt->base.pixel_format, + cdm_cfg->output_height, &cdm_cfg->output_fmt->pixel_format, cdm_cfg->output_type, cdm_cfg->output_bit_depth, cdm_cfg->h_cdwn_type, cdm_cfg->v_cdwn_type); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h index 98d1b64a43..30470cd15a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h @@ -69,6 +69,8 @@ struct dpu_encoder_phys; * @is_master: Whether this phys_enc is the current master * encoder. Can be switched at enable time. Based * on split_role and current mode (CMD/VID). + * @atomic_mode_set: DRM Call. Set a DRM mode. + * This likely caches the mode, for use at enable. * @enable: DRM Call. Enable a DRM mode. * @disable: DRM Call. Disable mode. * @control_vblank_irq Register/Deregister for VBLANK IRQ @@ -93,6 +95,9 @@ struct dpu_encoder_phys; struct dpu_encoder_phys_ops { void (*prepare_commit)(struct dpu_encoder_phys *encoder); bool (*is_master)(struct dpu_encoder_phys *encoder); + void (*atomic_mode_set)(struct dpu_encoder_phys *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); void (*enable)(struct dpu_encoder_phys *encoder); void (*disable)(struct dpu_encoder_phys *encoder); int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable); @@ -393,7 +398,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc); * @output_type: HDMI/WB */ void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc, - const struct dpu_format *dpu_fmt, + const struct msm_format *dpu_fmt, u32 output_type); /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 489be1c0c7..95cd39b496 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -142,6 +142,23 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg) dpu_encoder_underrun_callback(phys_enc->parent, phys_enc); } +static void dpu_encoder_phys_cmd_atomic_mode_set( + struct dpu_encoder_phys *phys_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start; + + phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done; + + if (phys_enc->has_intf_te) + phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr; + else + phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr; + + phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun; +} + static int _dpu_encoder_phys_cmd_handle_ppdone_timeout( struct dpu_encoder_phys *phys_enc) { @@ -280,14 +297,6 @@ static void dpu_encoder_phys_cmd_irq_enable(struct dpu_encoder_phys *phys_enc) phys_enc->hw_pp->idx - PINGPONG_0, phys_enc->vblank_refcount); - phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start; - phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done; - - if (phys_enc->has_intf_te) - phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr; - else - phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr; - dpu_core_irq_register_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_PINGPONG], dpu_encoder_phys_cmd_pp_tx_done_irq, @@ -318,10 +327,6 @@ static void dpu_encoder_phys_cmd_irq_disable(struct dpu_encoder_phys *phys_enc) dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_UNDERRUN]); dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false); dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_PINGPONG]); - - phys_enc->irq[INTR_IDX_CTL_START] = 0; - phys_enc->irq[INTR_IDX_PINGPONG] = 0; - phys_enc->irq[INTR_IDX_RDPTR] = 0; } static void dpu_encoder_phys_cmd_tearcheck_config( @@ -698,6 +703,7 @@ static void dpu_encoder_phys_cmd_init_ops( struct dpu_encoder_phys_ops *ops) { ops->is_master = dpu_encoder_phys_cmd_is_master; + ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set; ops->enable = dpu_encoder_phys_cmd_enable; ops->disable = dpu_encoder_phys_cmd_disable; ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq; @@ -736,8 +742,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev, dpu_encoder_phys_cmd_init_ops(&phys_enc->ops); phys_enc->intf_mode = INTF_MODE_CMD; - phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun; - cmd_enc->stream_sel = 0; if (!phys_enc->hw_intf) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index d9e7dbf049..636a97432d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -235,7 +235,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine( { struct drm_display_mode mode; struct dpu_hw_intf_timing_params timing_params = { 0 }; - const struct dpu_format *fmt = NULL; + const struct msm_format *fmt = NULL; u32 fmt_fourcc; unsigned long lock_flags; struct dpu_hw_intf_cfg intf_cfg = { 0 }; @@ -274,7 +274,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine( drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params); - fmt = dpu_get_dpu_format(fmt_fourcc); + fmt = mdp_get_format(&phys_enc->dpu_kms->base, fmt_fourcc, 0); DPU_DEBUG_VIDENC(phys_enc, "fmt_fourcc 0x%X\n", fmt_fourcc); if (phys_enc->hw_cdm) @@ -356,6 +356,16 @@ static bool dpu_encoder_phys_vid_needs_single_flush( return phys_enc->split_role != ENC_ROLE_SOLO; } +static void dpu_encoder_phys_vid_atomic_mode_set( + struct dpu_encoder_phys *phys_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync; + + phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun; +} + static int dpu_encoder_phys_vid_control_vblank_irq( struct dpu_encoder_phys *phys_enc, bool enable) @@ -409,12 +419,12 @@ end: static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) { struct dpu_hw_ctl *ctl; - const struct dpu_format *fmt; + const struct msm_format *fmt; u32 fmt_fourcc; ctl = phys_enc->hw_ctl; fmt_fourcc = dpu_encoder_get_drm_fmt(phys_enc); - fmt = dpu_get_dpu_format(fmt_fourcc); + fmt = mdp_get_format(&phys_enc->dpu_kms->base, fmt_fourcc, 0); DPU_DEBUG_VIDENC(phys_enc, "\n"); @@ -699,6 +709,7 @@ static int dpu_encoder_phys_vid_get_frame_count( static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops) { ops->is_master = dpu_encoder_phys_vid_is_master; + ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set; ops->enable = dpu_encoder_phys_vid_enable; ops->disable = dpu_encoder_phys_vid_disable; ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq; @@ -737,8 +748,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev, dpu_encoder_phys_vid_init_ops(&phys_enc->ops); phys_enc->intf_mode = INTF_MODE_VIDEO; - phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync; - phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun; DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->hw_intf->idx); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c index 1924a2b28e..882c717859 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c @@ -322,11 +322,11 @@ static void dpu_encoder_phys_wb_setup( struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); struct drm_writeback_job *wb_job; const struct msm_format *format; - const struct dpu_format *dpu_fmt; + const struct msm_format *dpu_fmt; wb_job = wb_enc->wb_job; format = msm_framebuffer_format(wb_enc->wb_job->fb); - dpu_fmt = dpu_get_dpu_format_ext(format->pixel_format, wb_job->fb->modifier); + dpu_fmt = mdp_get_format(&phys_enc->dpu_kms->base, format->pixel_format, wb_job->fb->modifier); DPU_DEBUG("[mode_set:%d, \"%s\",%d,%d]\n", hw_wb->idx - WB_0, mode.name, @@ -404,6 +404,15 @@ static void dpu_encoder_phys_wb_irq_disable(struct dpu_encoder_phys *phys) dpu_core_irq_unregister_callback(phys->dpu_kms, phys->irq[INTR_IDX_WB_DONE]); } +static void dpu_encoder_phys_wb_atomic_mode_set( + struct dpu_encoder_phys *phys_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + + phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done; +} + static void _dpu_encoder_phys_wb_handle_wbdone_timeout( struct dpu_encoder_phys *phys_enc) { @@ -529,8 +538,7 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc) } /* reset h/w before final flush */ - if (phys_enc->hw_ctl->ops.clear_pending_flush) - phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl); + phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl); /* * New CTL reset sequence from 5.0 MDP onwards. @@ -576,11 +584,11 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc format = msm_framebuffer_format(job->fb); - wb_cfg->dest.format = dpu_get_dpu_format_ext( - format->pixel_format, job->fb->modifier); + wb_cfg->dest.format = mdp_get_format(&phys_enc->dpu_kms->base, + format->pixel_format, job->fb->modifier); if (!wb_cfg->dest.format) { /* this error should be detected during atomic_check */ - DPU_ERROR("failed to get format %x\n", format->pixel_format); + DPU_ERROR("failed to get format %p4cc\n", &format->pixel_format); return; } @@ -594,7 +602,7 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc wb_cfg->dest.height = job->fb->height; wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes; - if ((wb_cfg->dest.format->fetch_planes == DPU_PLANE_PLANAR) && + if ((wb_cfg->dest.format->fetch_type == MDP_PLANE_PLANAR) && (wb_cfg->dest.format->element[0] == C1_B_Cb)) swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]); @@ -640,6 +648,7 @@ static bool dpu_encoder_phys_wb_is_valid_for_commit(struct dpu_encoder_phys *phy static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops) { ops->is_master = dpu_encoder_phys_wb_is_master; + ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set; ops->enable = dpu_encoder_phys_wb_enable; ops->disable = dpu_encoder_phys_wb_disable; ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done; @@ -685,7 +694,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev, dpu_encoder_phys_wb_init_ops(&phys_enc->ops); phys_enc->intf_mode = INTF_MODE_WB_LINE; - phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done; atomic_set(&wb_enc->wbirq_refcount, 0); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c index e366ab1342..6b1e9a617d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c @@ -11,178 +11,11 @@ #include "dpu_kms.h" #include "dpu_formats.h" -#define DPU_UBWC_META_MACRO_W_H 16 -#define DPU_UBWC_META_BLOCK_SIZE 256 #define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096 -#define DPU_TILE_HEIGHT_DEFAULT 1 -#define DPU_TILE_HEIGHT_TILED 4 -#define DPU_TILE_HEIGHT_UBWC 4 -#define DPU_TILE_HEIGHT_NV12 8 - #define DPU_MAX_IMG_WIDTH 0x3FFF #define DPU_MAX_IMG_HEIGHT 0x3FFF -/* - * DPU supported format packing, bpp, and other format - * information. - * DPU currently only supports interleaved RGB formats - * UBWC support for a pixel format is indicated by the flag, - * there is additional meta data plane for such formats - */ - -#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha, \ -bp, flg, fm, np) \ -{ \ - .base.pixel_format = DRM_FORMAT_ ## fmt, \ - .fetch_planes = DPU_PLANE_INTERLEAVED, \ - .alpha_enable = alpha, \ - .element = { (e0), (e1), (e2), (e3) }, \ - .bits = { g, b, r, a }, \ - .chroma_sample = DPU_CHROMA_RGB, \ - .unpack_align_msb = 0, \ - .unpack_tight = 1, \ - .unpack_count = uc, \ - .bpp = bp, \ - .fetch_mode = fm, \ - .flag = {(flg)}, \ - .num_planes = np, \ - .tile_height = DPU_TILE_HEIGHT_DEFAULT \ -} - -#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \ -alpha, bp, flg, fm, np, th) \ -{ \ - .base.pixel_format = DRM_FORMAT_ ## fmt, \ - .fetch_planes = DPU_PLANE_INTERLEAVED, \ - .alpha_enable = alpha, \ - .element = { (e0), (e1), (e2), (e3) }, \ - .bits = { g, b, r, a }, \ - .chroma_sample = DPU_CHROMA_RGB, \ - .unpack_align_msb = 0, \ - .unpack_tight = 1, \ - .unpack_count = uc, \ - .bpp = bp, \ - .fetch_mode = fm, \ - .flag = {(flg)}, \ - .num_planes = np, \ - .tile_height = th \ -} - - -#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \ -alpha, chroma, count, bp, flg, fm, np) \ -{ \ - .base.pixel_format = DRM_FORMAT_ ## fmt, \ - .fetch_planes = DPU_PLANE_INTERLEAVED, \ - .alpha_enable = alpha, \ - .element = { (e0), (e1), (e2), (e3)}, \ - .bits = { g, b, r, a }, \ - .chroma_sample = chroma, \ - .unpack_align_msb = 0, \ - .unpack_tight = 1, \ - .unpack_count = count, \ - .bpp = bp, \ - .fetch_mode = fm, \ - .flag = {(flg)}, \ - .num_planes = np, \ - .tile_height = DPU_TILE_HEIGHT_DEFAULT \ -} - -#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \ -{ \ - .base.pixel_format = DRM_FORMAT_ ## fmt, \ - .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \ - .alpha_enable = false, \ - .element = { (e0), (e1), 0, 0 }, \ - .bits = { g, b, r, a }, \ - .chroma_sample = chroma, \ - .unpack_align_msb = 0, \ - .unpack_tight = 1, \ - .unpack_count = 2, \ - .bpp = 2, \ - .fetch_mode = fm, \ - .flag = {(flg)}, \ - .num_planes = np, \ - .tile_height = DPU_TILE_HEIGHT_DEFAULT \ -} - -#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \ -flg, fm, np, th) \ -{ \ - .base.pixel_format = DRM_FORMAT_ ## fmt, \ - .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \ - .alpha_enable = false, \ - .element = { (e0), (e1), 0, 0 }, \ - .bits = { g, b, r, a }, \ - .chroma_sample = chroma, \ - .unpack_align_msb = 0, \ - .unpack_tight = 1, \ - .unpack_count = 2, \ - .bpp = 2, \ - .fetch_mode = fm, \ - .flag = {(flg)}, \ - .num_planes = np, \ - .tile_height = th \ -} - -#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\ -{ \ - .base.pixel_format = DRM_FORMAT_ ## fmt, \ - .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \ - .alpha_enable = false, \ - .element = { (e0), (e1), 0, 0 }, \ - .bits = { g, b, r, a }, \ - .chroma_sample = chroma, \ - .unpack_align_msb = 1, \ - .unpack_tight = 0, \ - .unpack_count = 2, \ - .bpp = 2, \ - .fetch_mode = fm, \ - .flag = {(flg)}, \ - .num_planes = np, \ - .tile_height = DPU_TILE_HEIGHT_DEFAULT \ -} - -#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \ -flg, fm, np, th) \ -{ \ - .base.pixel_format = DRM_FORMAT_ ## fmt, \ - .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \ - .alpha_enable = false, \ - .element = { (e0), (e1), 0, 0 }, \ - .bits = { g, b, r, a }, \ - .chroma_sample = chroma, \ - .unpack_align_msb = 1, \ - .unpack_tight = 0, \ - .unpack_count = 2, \ - .bpp = 2, \ - .fetch_mode = fm, \ - .flag = {(flg)}, \ - .num_planes = np, \ - .tile_height = th \ -} - - -#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \ -flg, fm, np) \ -{ \ - .base.pixel_format = DRM_FORMAT_ ## fmt, \ - .fetch_planes = DPU_PLANE_PLANAR, \ - .alpha_enable = alpha, \ - .element = { (e0), (e1), (e2), 0 }, \ - .bits = { g, b, r, a }, \ - .chroma_sample = chroma, \ - .unpack_align_msb = 0, \ - .unpack_tight = 1, \ - .unpack_count = 1, \ - .bpp = bp, \ - .fetch_mode = fm, \ - .flag = {(flg)}, \ - .num_planes = np, \ - .tile_height = DPU_TILE_HEIGHT_DEFAULT \ -} - /* * struct dpu_media_color_map - maps drm format to media format * @format: DRM base pixel format @@ -193,380 +26,11 @@ struct dpu_media_color_map { uint32_t color; }; -static const struct dpu_format dpu_format_map[] = { - INTERLEAVED_RGB_FMT(ARGB8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, - true, 4, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(ABGR8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - true, 4, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(XBGR8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - false, 4, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(RGBA8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, - true, 4, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(BGRA8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, - true, 4, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(BGRX8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, - false, 4, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(XRGB8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, - false, 4, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(RGBX8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, - false, 4, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(RGB888, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, - false, 3, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(BGR888, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, - false, 3, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(RGB565, - 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, - false, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(BGR565, - 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, - false, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(ARGB1555, - COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, - true, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(ABGR1555, - COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - true, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(RGBA5551, - COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, - C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, - true, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(BGRA5551, - COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, - C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, - true, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(XRGB1555, - COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, - false, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(XBGR1555, - COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - false, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(RGBX5551, - COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, - C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, - false, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(BGRX5551, - COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, - C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, - false, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(ARGB4444, - COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, - true, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(ABGR4444, - COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - true, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(RGBA4444, - COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, - C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, - true, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(BGRA4444, - COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, - C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, - true, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(XRGB4444, - COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, - false, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(XBGR4444, - COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - false, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(RGBX4444, - COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, - C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, - false, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(BGRX4444, - COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, - C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, - false, 2, 0, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(BGRA1010102, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, - true, 4, DPU_FORMAT_FLAG_DX, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(RGBA1010102, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, - true, 4, DPU_FORMAT_FLAG_DX, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(ABGR2101010, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - true, 4, DPU_FORMAT_FLAG_DX, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(ARGB2101010, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, - true, 4, DPU_FORMAT_FLAG_DX, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(XRGB2101010, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, - false, 4, DPU_FORMAT_FLAG_DX, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(BGRX1010102, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, - false, 4, DPU_FORMAT_FLAG_DX, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(XBGR2101010, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - false, 4, DPU_FORMAT_FLAG_DX, - DPU_FETCH_LINEAR, 1), - - INTERLEAVED_RGB_FMT(RGBX1010102, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, - false, 4, DPU_FORMAT_FLAG_DX, - DPU_FETCH_LINEAR, 1), - - PSEUDO_YUV_FMT(NV12, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C2_R_Cr, - DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV, - DPU_FETCH_LINEAR, 2), - - PSEUDO_YUV_FMT(NV21, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C1_B_Cb, - DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV, - DPU_FETCH_LINEAR, 2), - - PSEUDO_YUV_FMT(NV16, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C2_R_Cr, - DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV, - DPU_FETCH_LINEAR, 2), - - PSEUDO_YUV_FMT(NV61, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C1_B_Cb, - DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV, - DPU_FETCH_LINEAR, 2), - - PSEUDO_YUV_FMT_LOOSE(P010, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C2_R_Cr, - DPU_CHROMA_420, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_YUV, - DPU_FETCH_LINEAR, 2), - - INTERLEAVED_YUV_FMT(VYUY, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y, - false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV, - DPU_FETCH_LINEAR, 2), - - INTERLEAVED_YUV_FMT(UYVY, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y, - false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV, - DPU_FETCH_LINEAR, 2), - - INTERLEAVED_YUV_FMT(YUYV, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr, - false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV, - DPU_FETCH_LINEAR, 2), - - INTERLEAVED_YUV_FMT(YVYU, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb, - false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV, - DPU_FETCH_LINEAR, 2), - - PLANAR_YUV_FMT(YUV420, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C1_B_Cb, C0_G_Y, - false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV, - DPU_FETCH_LINEAR, 3), - - PLANAR_YUV_FMT(YVU420, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C2_R_Cr, C0_G_Y, - false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV, - DPU_FETCH_LINEAR, 3), -}; - -/* - * UBWC formats table: - * This table holds the UBWC formats supported. - * If a compression ratio needs to be used for this or any other format, - * the data will be passed by user-space. - */ -static const struct dpu_format dpu_format_map_ubwc[] = { - INTERLEAVED_RGB_FMT_TILED(BGR565, - 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, - false, 2, DPU_FORMAT_FLAG_COMPRESSED, - DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), - - INTERLEAVED_RGB_FMT_TILED(ABGR8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - true, 4, DPU_FORMAT_FLAG_COMPRESSED, - DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), - - /* ARGB8888 and ABGR8888 purposely have the same color - * ordering. The hardware only supports ABGR8888 UBWC - * natively. - */ - INTERLEAVED_RGB_FMT_TILED(ARGB8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - true, 4, DPU_FORMAT_FLAG_COMPRESSED, - DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), - - INTERLEAVED_RGB_FMT_TILED(XBGR8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - false, 4, DPU_FORMAT_FLAG_COMPRESSED, - DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), - - INTERLEAVED_RGB_FMT_TILED(XRGB8888, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - false, 4, DPU_FORMAT_FLAG_COMPRESSED, - DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), - - INTERLEAVED_RGB_FMT_TILED(ABGR2101010, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED, - DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), - - INTERLEAVED_RGB_FMT_TILED(XBGR2101010, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED, - DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), - - INTERLEAVED_RGB_FMT_TILED(XRGB2101010, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED, - DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), - - /* XRGB2101010 and ARGB2101010 purposely have the same color - * ordering. The hardware only supports ARGB2101010 UBWC - * natively. - */ - INTERLEAVED_RGB_FMT_TILED(ARGB2101010, - COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, - true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED, - DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), - - PSEUDO_YUV_FMT_TILED(NV12, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C2_R_Cr, - DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV | - DPU_FORMAT_FLAG_COMPRESSED, - DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12), - - PSEUDO_YUV_FMT_TILED(P010, - 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, - C1_B_Cb, C2_R_Cr, - DPU_CHROMA_420, DPU_FORMAT_FLAG_DX | - DPU_FORMAT_FLAG_YUV | - DPU_FORMAT_FLAG_COMPRESSED, - DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_UBWC), -}; - /* _dpu_get_v_h_subsample_rate - Get subsample rates for all formats we support * Note: Not using the drm_format_*_subsampling since we have formats */ static void _dpu_get_v_h_subsample_rate( - enum dpu_chroma_samp_type chroma_sample, + enum mdp_chroma_samp_type chroma_sample, uint32_t *v_sample, uint32_t *h_sample) { @@ -574,15 +38,15 @@ static void _dpu_get_v_h_subsample_rate( return; switch (chroma_sample) { - case DPU_CHROMA_H2V1: + case CHROMA_H2V1: *v_sample = 1; *h_sample = 2; break; - case DPU_CHROMA_H1V2: + case CHROMA_H1V2: *v_sample = 2; *h_sample = 1; break; - case DPU_CHROMA_420: + case CHROMA_420: *v_sample = 2; *h_sample = 2; break; @@ -593,7 +57,7 @@ static void _dpu_get_v_h_subsample_rate( } } -static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt) +static int _dpu_format_get_media_color_ubwc(const struct msm_format *fmt) { static const struct dpu_media_color_map dpu_media_ubwc_map[] = { {DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC}, @@ -609,10 +73,10 @@ static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt) int color_fmt = -1; int i; - if (fmt->base.pixel_format == DRM_FORMAT_NV12 || - fmt->base.pixel_format == DRM_FORMAT_P010) { - if (DPU_FORMAT_IS_DX(fmt)) { - if (fmt->unpack_tight) + if (fmt->pixel_format == DRM_FORMAT_NV12 || + fmt->pixel_format == DRM_FORMAT_P010) { + if (MSM_FORMAT_IS_DX(fmt)) { + if (fmt->flags & MSM_FORMAT_FLAG_UNPACK_TIGHT) color_fmt = COLOR_FMT_NV12_BPP10_UBWC; else color_fmt = COLOR_FMT_P010_UBWC; @@ -622,7 +86,7 @@ static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt) } for (i = 0; i < ARRAY_SIZE(dpu_media_ubwc_map); ++i) - if (fmt->base.pixel_format == dpu_media_ubwc_map[i].format) { + if (fmt->pixel_format == dpu_media_ubwc_map[i].format) { color_fmt = dpu_media_ubwc_map[i].color; break; } @@ -630,14 +94,14 @@ static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt) } static int _dpu_format_get_plane_sizes_ubwc( - const struct dpu_format *fmt, + const struct msm_format *fmt, const uint32_t width, const uint32_t height, struct dpu_hw_fmt_layout *layout) { int i; int color; - bool meta = DPU_FORMAT_IS_UBWC(fmt); + bool meta = MSM_FORMAT_IS_UBWC(fmt); memset(layout, 0, sizeof(struct dpu_hw_fmt_layout)); layout->format = fmt; @@ -647,12 +111,12 @@ static int _dpu_format_get_plane_sizes_ubwc( color = _dpu_format_get_media_color_ubwc(fmt); if (color < 0) { - DRM_ERROR("UBWC format not supported for fmt: %4.4s\n", - (char *)&fmt->base.pixel_format); + DRM_ERROR("UBWC format not supported for fmt: %p4cc\n", + &fmt->pixel_format); return -EINVAL; } - if (DPU_FORMAT_IS_YUV(layout->format)) { + if (MSM_FORMAT_IS_YUV(layout->format)) { uint32_t y_sclines, uv_sclines; uint32_t y_meta_scanlines = 0; uint32_t uv_meta_scanlines = 0; @@ -709,7 +173,7 @@ done: } static int _dpu_format_get_plane_sizes_linear( - const struct dpu_format *fmt, + const struct msm_format *fmt, const uint32_t width, const uint32_t height, struct dpu_hw_fmt_layout *layout, @@ -724,7 +188,7 @@ static int _dpu_format_get_plane_sizes_linear( layout->num_planes = fmt->num_planes; /* Due to memset above, only need to set planes of interest */ - if (fmt->fetch_planes == DPU_PLANE_INTERLEAVED) { + if (fmt->fetch_type == MDP_PLANE_INTERLEAVED) { layout->num_planes = 1; layout->plane_size[0] = width * height * layout->format->bpp; layout->plane_pitch[0] = width * layout->format->bpp; @@ -742,8 +206,8 @@ static int _dpu_format_get_plane_sizes_linear( return -EINVAL; } - if ((fmt->base.pixel_format == DRM_FORMAT_NV12) && - (DPU_FORMAT_IS_DX(fmt))) + if ((fmt->pixel_format == DRM_FORMAT_NV12) && + (MSM_FORMAT_IS_DX(fmt))) bpp = 2; layout->plane_pitch[0] = width * bpp; layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample; @@ -751,7 +215,7 @@ static int _dpu_format_get_plane_sizes_linear( layout->plane_size[1] = layout->plane_pitch[1] * (height / v_subsample); - if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) { + if (fmt->fetch_type == MDP_PLANE_PSEUDO_PLANAR) { layout->num_planes = 2; layout->plane_size[1] *= 2; layout->plane_pitch[1] *= 2; @@ -781,7 +245,7 @@ static int _dpu_format_get_plane_sizes_linear( } static int dpu_format_get_plane_sizes( - const struct dpu_format *fmt, + const struct msm_format *fmt, const uint32_t w, const uint32_t h, struct dpu_hw_fmt_layout *layout, @@ -797,7 +261,7 @@ static int dpu_format_get_plane_sizes( return -ERANGE; } - if (DPU_FORMAT_IS_UBWC(fmt) || DPU_FORMAT_IS_TILE(fmt)) + if (MSM_FORMAT_IS_UBWC(fmt) || MSM_FORMAT_IS_TILE(fmt)) return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout); return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches); @@ -823,10 +287,10 @@ static int _dpu_format_populate_addrs_ubwc( return -EFAULT; } - meta = DPU_FORMAT_IS_UBWC(layout->format); + meta = MSM_FORMAT_IS_UBWC(layout->format); /* Per-format logic for verifying active planes */ - if (DPU_FORMAT_IS_YUV(layout->format)) { + if (MSM_FORMAT_IS_YUV(layout->format)) { /************************************************/ /* UBWC ** */ /* buffer ** DPU PLANE */ @@ -942,7 +406,7 @@ int dpu_format_populate_layout( return -ERANGE; } - layout->format = to_dpu_format(msm_framebuffer_format(fb)); + layout->format = msm_framebuffer_format(fb); /* Populate the plane sizes etc via get_format */ ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height, @@ -951,8 +415,8 @@ int dpu_format_populate_layout( return ret; /* Populate the addresses given the fb */ - if (DPU_FORMAT_IS_UBWC(layout->format) || - DPU_FORMAT_IS_TILE(layout->format)) + if (MSM_FORMAT_IS_UBWC(layout->format) || + MSM_FORMAT_IS_TILE(layout->format)) ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout); else ret = _dpu_format_populate_addrs_linear(aspace, fb, layout); @@ -962,23 +426,21 @@ int dpu_format_populate_layout( int dpu_format_check_modified_format( const struct msm_kms *kms, - const struct msm_format *msm_fmt, + const struct msm_format *fmt, const struct drm_mode_fb_cmd2 *cmd, struct drm_gem_object **bos) { const struct drm_format_info *info; - const struct dpu_format *fmt; struct dpu_hw_fmt_layout layout; uint32_t bos_total_size = 0; int ret, i; - if (!msm_fmt || !cmd || !bos) { + if (!fmt || !cmd || !bos) { DRM_ERROR("invalid arguments\n"); return -EINVAL; } - fmt = to_dpu_format(msm_fmt); - info = drm_format_info(fmt->base.pixel_format); + info = drm_format_info(fmt->pixel_format); if (!info) return -EINVAL; @@ -1004,65 +466,3 @@ int dpu_format_check_modified_format( return 0; } - -const struct dpu_format *dpu_get_dpu_format_ext( - const uint32_t format, - const uint64_t modifier) -{ - uint32_t i = 0; - const struct dpu_format *fmt = NULL; - const struct dpu_format *map = NULL; - ssize_t map_size = 0; - - /* - * Currently only support exactly zero or one modifier. - * All planes use the same modifier. - */ - DRM_DEBUG_ATOMIC("plane format modifier 0x%llX\n", modifier); - - switch (modifier) { - case 0: - map = dpu_format_map; - map_size = ARRAY_SIZE(dpu_format_map); - break; - case DRM_FORMAT_MOD_QCOM_COMPRESSED: - map = dpu_format_map_ubwc; - map_size = ARRAY_SIZE(dpu_format_map_ubwc); - DRM_DEBUG_ATOMIC("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n", - (char *)&format); - break; - default: - DPU_ERROR("unsupported format modifier %llX\n", modifier); - return NULL; - } - - for (i = 0; i < map_size; i++) { - if (format == map[i].base.pixel_format) { - fmt = &map[i]; - break; - } - } - - if (fmt == NULL) - DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n", - (char *)&format, modifier); - else - DRM_DEBUG_ATOMIC("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n", - (char *)&format, modifier, - DPU_FORMAT_IS_UBWC(fmt), - DPU_FORMAT_IS_YUV(fmt)); - - return fmt; -} - -const struct msm_format *dpu_get_msm_format( - struct msm_kms *kms, - const uint32_t format, - const uint64_t modifiers) -{ - const struct dpu_format *fmt = dpu_get_dpu_format_ext(format, - modifiers); - if (fmt) - return &fmt->base; - return NULL; -} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h index 84b8b3289f..210d0ed5f0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h @@ -9,17 +9,6 @@ #include "msm_gem.h" #include "dpu_hw_mdss.h" -/** - * dpu_get_dpu_format_ext() - Returns dpu format structure pointer. - * @format: DRM FourCC Code - * @modifiers: format modifier array from client, one per plane - */ -const struct dpu_format *dpu_get_dpu_format_ext( - const uint32_t format, - const uint64_t modifier); - -#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0) - /** * dpu_find_format - validate if the pixel format is supported * @format: dpu format @@ -42,23 +31,11 @@ static inline bool dpu_find_format(u32 format, const u32 *supported_formats, return false; } -/** - * dpu_get_msm_format - get an dpu_format by its msm_format base - * callback function registers with the msm_kms layer - * @kms: kms driver - * @format: DRM FourCC Code - * @modifiers: data layout modifier - */ -const struct msm_format *dpu_get_msm_format( - struct msm_kms *kms, - const uint32_t format, - const uint64_t modifiers); - /** * dpu_format_check_modified_format - validate format and buffers for * dpu non-standard, i.e. modified format * @kms: kms driver - * @msm_fmt: pointer to the msm_fmt base pointer of an dpu_format + * @msm_fmt: pointer to the msm_fmt base pointer of an msm_format * @cmd: fb_cmd2 structure user request * @bos: gem buffer object list * diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index f2b6eac760..9b72977fea 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -220,12 +220,9 @@ static const u32 wb2_formats_rgb[] = { DRM_FORMAT_RGBA4444, DRM_FORMAT_RGBX4444, DRM_FORMAT_XRGB4444, - DRM_FORMAT_BGR565, DRM_FORMAT_BGR888, - DRM_FORMAT_ABGR8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_BGRX8888, - DRM_FORMAT_XBGR8888, DRM_FORMAT_ABGR1555, DRM_FORMAT_BGRA5551, DRM_FORMAT_XBGR1555, @@ -254,12 +251,9 @@ static const u32 wb2_formats_rgb_yuv[] = { DRM_FORMAT_RGBA4444, DRM_FORMAT_RGBX4444, DRM_FORMAT_XRGB4444, - DRM_FORMAT_BGR565, DRM_FORMAT_BGR888, - DRM_FORMAT_ABGR8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_BGRX8888, - DRM_FORMAT_XBGR8888, DRM_FORMAT_ABGR1555, DRM_FORMAT_BGRA5551, DRM_FORMAT_XBGR1555, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c index 9016b3ade6..55d2768a6d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c @@ -170,7 +170,7 @@ static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx, struct dpu_hw_cdm_cfg * static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx, struct dpu_hw_cdm_cfg *cdm) { struct dpu_hw_blk_reg_map *c = &ctx->hw; - const struct dpu_format *fmt; + const struct msm_format *fmt; u32 opmode = 0; u32 csc = 0; @@ -179,14 +179,14 @@ static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx, struct dpu_hw_cdm_cfg *cdm) fmt = cdm->output_fmt; - if (!DPU_FORMAT_IS_YUV(fmt)) + if (!MSM_FORMAT_IS_YUV(fmt)) return -EINVAL; dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, cdm->csc_cfg, true); dpu_hw_cdm_setup_cdwn(ctx, cdm); if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) { - if (fmt->chroma_sample == DPU_CHROMA_H1V2) + if (fmt->chroma_sample == CHROMA_H1V2) return -EINVAL; /*unsupported format */ opmode = CDM_HDMI_PACK_OP_MODE_EN; opmode |= (fmt->chroma_sample << 1); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h index 348424df87..ec71c9886d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h @@ -19,7 +19,7 @@ struct dpu_hw_cdm; * @output_bit_depth: output bit-depth of CDM block * @h_cdwn_type: downsample type used for horizontal pixels * @v_cdwn_type: downsample type used for vertical pixels - * @output_fmt: handle to dpu_format of CDM block + * @output_fmt: handle to msm_format of CDM block * @csc_cfg: handle to CSC matrix programmed for CDM block * @output_type: interface to which CDM is paired (HDMI/WB) * @pp_id: ping-pong block to which CDM is bound to @@ -30,7 +30,7 @@ struct dpu_hw_cdm_cfg { u32 output_bit_depth; u32 h_cdwn_type; u32 v_cdwn_type; - const struct dpu_format *output_fmt; + const struct msm_format *output_fmt; const struct dpu_csc_cfg *csc_cfg; u32 output_type; int pp_id; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h index ef56280bea..4401fdc0f3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h @@ -83,7 +83,8 @@ struct dpu_hw_ctl_ops { /** * Clear the value of the cached pending_flush_mask - * No effect on hardware + * No effect on hardware. + * Required to be implemented. * @ctx : ctl path ctx pointer */ void (*clear_pending_flush)(struct dpu_hw_ctl *ctx); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c index 965692ef78..225c1c7768 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c @@ -96,11 +96,11 @@ #define INTF_CFG2_DCE_DATA_COMPRESS BIT(12) -static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx, +static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *intf, const struct dpu_hw_intf_timing_params *p, - const struct dpu_format *fmt) + const struct msm_format *fmt) { - struct dpu_hw_blk_reg_map *c = &ctx->hw; + struct dpu_hw_blk_reg_map *c = &intf->hw; u32 hsync_period, vsync_period; u32 display_v_start, display_v_end; u32 hsync_start_x, hsync_end_x; @@ -118,7 +118,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx, /* read interface_cfg */ intf_cfg = DPU_REG_READ(c, INTF_CONFIG); - if (ctx->cap->type == INTF_DP) + if (intf->cap->type == INTF_DP) dp_intf = true; hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width + @@ -194,16 +194,16 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx, (p->vsync_polarity << 1) | /* VSYNC Polarity */ (p->hsync_polarity << 0); /* HSYNC Polarity */ - if (!DPU_FORMAT_IS_YUV(fmt)) - panel_format = (fmt->bits[C0_G_Y] | - (fmt->bits[C1_B_Cb] << 2) | - (fmt->bits[C2_R_Cr] << 4) | + if (!MSM_FORMAT_IS_YUV(fmt)) + panel_format = (fmt->bpc_g_y | + (fmt->bpc_b_cb << 2) | + (fmt->bpc_r_cr << 4) | (0x21 << 8)); else /* Interface treats all the pixel data in RGB888 format */ - panel_format = (COLOR_8BIT | - (COLOR_8BIT << 2) | - (COLOR_8BIT << 4) | + panel_format = (BPC8 | + (BPC8 << 2) | + (BPC8 << 4) | (0x21 << 8)); DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl); @@ -223,7 +223,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx, DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3); DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg); DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format); - if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) { + if (intf->cap->features & BIT(DPU_DATA_HCTL_EN)) { /* * DATA_HCTL_EN controls data timing which can be different from * video timing. It is recommended to enable it for all cases, except @@ -518,10 +518,10 @@ static void dpu_hw_intf_disable_autorefresh(struct dpu_hw_intf *intf, } -static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *ctx, +static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *intf, struct dpu_hw_intf_cmd_mode_cfg *cmd_mode_cfg) { - u32 intf_cfg2 = DPU_REG_READ(&ctx->hw, INTF_CONFIG2); + u32 intf_cfg2 = DPU_REG_READ(&intf->hw, INTF_CONFIG2); if (cmd_mode_cfg->data_compress) intf_cfg2 |= INTF_CFG2_DCE_DATA_COMPRESS; @@ -529,7 +529,7 @@ static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *ctx, if (cmd_mode_cfg->wide_bus_en) intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN; - DPU_REG_WRITE(&ctx->hw, INTF_CONFIG2, intf_cfg2); + DPU_REG_WRITE(&intf->hw, INTF_CONFIG2, intf_cfg2); } struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h index 6f4c87244f..f9015c67a5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h @@ -81,7 +81,7 @@ struct dpu_hw_intf_cmd_mode_cfg { struct dpu_hw_intf_ops { void (*setup_timing_gen)(struct dpu_hw_intf *intf, const struct dpu_hw_intf_timing_params *p, - const struct dpu_format *fmt); + const struct msm_format *fmt); void (*setup_prg_fetch)(struct dpu_hw_intf *intf, const struct dpu_hw_intf_prog_fetch *fetch); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h index 5df5459040..66759623fc 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h @@ -10,6 +10,8 @@ #include "msm_drv.h" +#include "disp/mdp_format.h" + #define DPU_DBG_NAME "dpu" #define DPU_NONE 0 @@ -35,28 +37,6 @@ #define DPU_MAX_DE_CURVES 3 #endif -enum dpu_format_flags { - DPU_FORMAT_FLAG_YUV_BIT, - DPU_FORMAT_FLAG_DX_BIT, - DPU_FORMAT_FLAG_COMPRESSED_BIT, - DPU_FORMAT_FLAG_BIT_MAX, -}; - -#define DPU_FORMAT_FLAG_YUV BIT(DPU_FORMAT_FLAG_YUV_BIT) -#define DPU_FORMAT_FLAG_DX BIT(DPU_FORMAT_FLAG_DX_BIT) -#define DPU_FORMAT_FLAG_COMPRESSED BIT(DPU_FORMAT_FLAG_COMPRESSED_BIT) -#define DPU_FORMAT_IS_YUV(X) \ - (test_bit(DPU_FORMAT_FLAG_YUV_BIT, (X)->flag)) -#define DPU_FORMAT_IS_DX(X) \ - (test_bit(DPU_FORMAT_FLAG_DX_BIT, (X)->flag)) -#define DPU_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == DPU_FETCH_LINEAR) -#define DPU_FORMAT_IS_TILE(X) \ - (((X)->fetch_mode == DPU_FETCH_UBWC) && \ - !test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag)) -#define DPU_FORMAT_IS_UBWC(X) \ - (((X)->fetch_mode == DPU_FETCH_UBWC) && \ - test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag)) - #define DPU_BLEND_FG_ALPHA_FG_CONST (0 << 0) #define DPU_BLEND_FG_ALPHA_BG_CONST (1 << 0) #define DPU_BLEND_FG_ALPHA_FG_PIXEL (2 << 0) @@ -290,67 +270,6 @@ enum dpu_vbif { VBIF_MAX, }; -/** - * DPU HW,Component order color map - */ -enum { - C0_G_Y = 0, - C1_B_Cb = 1, - C2_R_Cr = 2, - C3_ALPHA = 3 -}; - -/** - * enum dpu_plane_type - defines how the color component pixel packing - * @DPU_PLANE_INTERLEAVED : Color components in single plane - * @DPU_PLANE_PLANAR : Color component in separate planes - * @DPU_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane - */ -enum dpu_plane_type { - DPU_PLANE_INTERLEAVED, - DPU_PLANE_PLANAR, - DPU_PLANE_PSEUDO_PLANAR, -}; - -/** - * enum dpu_chroma_samp_type - chroma sub-samplng type - * @DPU_CHROMA_RGB : No chroma subsampling - * @DPU_CHROMA_H2V1 : Chroma pixels are horizontally subsampled - * @DPU_CHROMA_H1V2 : Chroma pixels are vertically subsampled - * @DPU_CHROMA_420 : 420 subsampling - */ -enum dpu_chroma_samp_type { - DPU_CHROMA_RGB, - DPU_CHROMA_H2V1, - DPU_CHROMA_H1V2, - DPU_CHROMA_420 -}; - -/** - * dpu_fetch_type - Defines How DPU HW fetches data - * @DPU_FETCH_LINEAR : fetch is line by line - * @DPU_FETCH_TILE : fetches data in Z order from a tile - * @DPU_FETCH_UBWC : fetch and decompress data - */ -enum dpu_fetch_type { - DPU_FETCH_LINEAR, - DPU_FETCH_TILE, - DPU_FETCH_UBWC -}; - -/** - * Value of enum chosen to fit the number of bits - * expected by the HW programming. - */ -enum { - COLOR_ALPHA_1BIT = 0, - COLOR_ALPHA_4BIT = 1, - COLOR_4BIT = 0, - COLOR_5BIT = 1, /* No 5-bit Alpha */ - COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */ - COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */ -}; - /** * enum dpu_3d_blend_mode * Desribes how the 3d data is blended @@ -370,43 +289,6 @@ enum dpu_3d_blend_mode { BLEND_3D_MAX }; -/** struct dpu_format - defines the format configuration which - * allows DPU HW to correctly fetch and decode the format - * @base: base msm_format structure containing fourcc code - * @fetch_planes: how the color components are packed in pixel format - * @element: element color ordering - * @bits: element bit widths - * @chroma_sample: chroma sub-samplng type - * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB - * @unpack_tight: 0 for loose, 1 for tight - * @unpack_count: 0 = 1 component, 1 = 2 component - * @bpp: bytes per pixel - * @alpha_enable: whether the format has an alpha channel - * @num_planes: number of planes (including meta data planes) - * @fetch_mode: linear, tiled, or ubwc hw fetch behavior - * @flag: usage bit flags - * @tile_width: format tile width - * @tile_height: format tile height - */ -struct dpu_format { - struct msm_format base; - enum dpu_plane_type fetch_planes; - u8 element[DPU_MAX_PLANES]; - u8 bits[DPU_MAX_PLANES]; - enum dpu_chroma_samp_type chroma_sample; - u8 unpack_align_msb; - u8 unpack_tight; - u8 unpack_count; - u8 bpp; - u8 alpha_enable; - u8 num_planes; - enum dpu_fetch_type fetch_mode; - DECLARE_BITMAP(flag, DPU_FORMAT_FLAG_BIT_MAX); - u16 tile_width; - u16 tile_height; -}; -#define to_dpu_format(x) container_of(x, struct dpu_format, base) - /** * struct dpu_hw_fmt_layout - format information of the source pixel data * @format: pixel format parameters @@ -419,7 +301,7 @@ struct dpu_format { * @plane_pitch: pitch of each plane */ struct dpu_hw_fmt_layout { - const struct dpu_format *format; + const struct msm_format *format; uint32_t num_planes; uint32_t width; uint32_t height; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c index 0bf8a83e8d..2c720f1fc1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c @@ -2,6 +2,8 @@ /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. */ +#include + #include "dpu_hwio.h" #include "dpu_hw_catalog.h" #include "dpu_hw_lm.h" @@ -206,7 +208,7 @@ static void _sspp_setup_csc10_opmode(struct dpu_hw_sspp *ctx, * Setup source pixel format, flip, */ static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe, - const struct dpu_format *fmt, u32 flags) + const struct msm_format *fmt, u32 flags) { struct dpu_hw_sspp *ctx = pipe->sspp; struct dpu_hw_blk_reg_map *c; @@ -241,20 +243,20 @@ static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe, chroma_samp = fmt->chroma_sample; if (flags & DPU_SSPP_SOURCE_ROTATED_90) { - if (chroma_samp == DPU_CHROMA_H2V1) - chroma_samp = DPU_CHROMA_H1V2; - else if (chroma_samp == DPU_CHROMA_H1V2) - chroma_samp = DPU_CHROMA_H2V1; + if (chroma_samp == CHROMA_H2V1) + chroma_samp = CHROMA_H1V2; + else if (chroma_samp == CHROMA_H1V2) + chroma_samp = CHROMA_H2V1; } - src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) | - (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) | - (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0); + src_format = (chroma_samp << 23) | (fmt->fetch_type << 19) | + (fmt->bpc_a << 6) | (fmt->bpc_r_cr << 4) | + (fmt->bpc_b_cb << 2) | (fmt->bpc_g_y << 0); if (flags & DPU_SSPP_ROT_90) src_format |= BIT(11); /* ROT90 */ - if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED) + if (fmt->alpha_enable && fmt->fetch_type == MDP_PLANE_INTERLEAVED) src_format |= BIT(8); /* SRCC3_EN */ if (flags & DPU_SSPP_SOLID_FILL) @@ -263,12 +265,12 @@ static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe, unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) | (fmt->element[1] << 8) | (fmt->element[0] << 0); src_format |= ((fmt->unpack_count - 1) << 12) | - (fmt->unpack_tight << 17) | - (fmt->unpack_align_msb << 18) | + ((fmt->flags & MSM_FORMAT_FLAG_UNPACK_TIGHT ? 1 : 0) << 17) | + ((fmt->flags & MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB ? 1 : 0) << 18) | ((fmt->bpp - 1) << 9); - if (fmt->fetch_mode != DPU_FETCH_LINEAR) { - if (DPU_FORMAT_IS_UBWC(fmt)) + if (fmt->fetch_mode != MDP_FETCH_LINEAR) { + if (MSM_FORMAT_IS_UBWC(fmt)) opmode |= MDSS_MDP_OP_BWC_EN; src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */ DPU_REG_WRITE(c, SSPP_FETCH_CONFIG, @@ -295,7 +297,7 @@ static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe, break; case UBWC_4_0: DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL, - DPU_FORMAT_IS_YUV(fmt) ? 0 : BIT(30)); + MSM_FORMAT_IS_YUV(fmt) ? 0 : BIT(30)); break; } } @@ -303,20 +305,20 @@ static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe, opmode |= MDSS_MDP_OP_PE_OVERRIDE; /* if this is YUV pixel format, enable CSC */ - if (DPU_FORMAT_IS_YUV(fmt)) + if (MSM_FORMAT_IS_YUV(fmt)) src_format |= BIT(15); - if (DPU_FORMAT_IS_DX(fmt)) + if (MSM_FORMAT_IS_DX(fmt)) src_format |= BIT(14); /* update scaler opmode, if appropriate */ if (test_bit(DPU_SSPP_CSC, &ctx->cap->features)) _sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT, - DPU_FORMAT_IS_YUV(fmt)); + MSM_FORMAT_IS_YUV(fmt)); else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) _sspp_setup_csc10_opmode(ctx, VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT, - DPU_FORMAT_IS_YUV(fmt)); + MSM_FORMAT_IS_YUV(fmt)); DPU_REG_WRITE(c, format_off, src_format); DPU_REG_WRITE(c, unpack_pat_off, unpack); @@ -385,7 +387,7 @@ static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_sspp *ctx, static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_sspp *ctx, struct dpu_hw_scaler3_cfg *scaler3_cfg, - const struct dpu_format *format) + const struct msm_format *format) { if (!ctx || !scaler3_cfg) return; @@ -556,7 +558,7 @@ static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_sspp *ctx, } static void dpu_hw_sspp_setup_cdp(struct dpu_sw_pipe *pipe, - const struct dpu_format *fmt, + const struct msm_format *fmt, bool enable) { struct dpu_hw_sspp *ctx = pipe->sspp; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h index b7dc52312c..4a910b8086 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h @@ -183,7 +183,7 @@ struct dpu_hw_sspp_ops { * @flags: Extra flags for format config */ void (*setup_format)(struct dpu_sw_pipe *pipe, - const struct dpu_format *fmt, u32 flags); + const struct msm_format *fmt, u32 flags); /** * setup_rects - setup pipe ROI rectangles @@ -279,7 +279,7 @@ struct dpu_hw_sspp_ops { */ void (*setup_scaler)(struct dpu_hw_sspp *ctx, struct dpu_hw_scaler3_cfg *scaler3_cfg, - const struct dpu_format *format); + const struct msm_format *format); /** * setup_cdp - setup client driven prefetch @@ -288,7 +288,7 @@ struct dpu_hw_sspp_ops { * @enable: whether the CDP should be enabled for this pipe */ void (*setup_cdp)(struct dpu_sw_pipe *pipe, - const struct dpu_format *fmt, + const struct msm_format *fmt, bool enable); }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c index dd47582731..486be346d4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c @@ -282,7 +282,7 @@ static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c, void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c, struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 scaler_offset, u32 scaler_version, - const struct dpu_format *format) + const struct msm_format *format) { u32 op_mode = 0; u32 phase_init, preload, src_y_rgb, src_uv, dst; @@ -293,7 +293,7 @@ void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c, op_mode |= BIT(0); op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16; - if (format && DPU_FORMAT_IS_YUV(format)) { + if (format && MSM_FORMAT_IS_YUV(format)) { op_mode |= BIT(12); op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24; } @@ -367,7 +367,7 @@ void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c, DPU_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst); end: - if (format && !DPU_FORMAT_IS_DX(format)) + if (format && !MSM_FORMAT_IS_DX(format)) op_mode |= BIT(14); if (format && format->alpha_enable) { @@ -522,16 +522,16 @@ int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c, #define CDP_PRELOAD_AHEAD_64 BIT(3) void dpu_setup_cdp(struct dpu_hw_blk_reg_map *c, u32 offset, - const struct dpu_format *fmt, bool enable) + const struct msm_format *fmt, bool enable) { u32 cdp_cntl = CDP_PRELOAD_AHEAD_64; if (enable) cdp_cntl |= CDP_ENABLE; - if (DPU_FORMAT_IS_UBWC(fmt)) + if (MSM_FORMAT_IS_UBWC(fmt)) cdp_cntl |= CDP_UBWC_META_ENABLE; - if (DPU_FORMAT_IS_UBWC(fmt) || - DPU_FORMAT_IS_TILE(fmt)) + if (MSM_FORMAT_IS_UBWC(fmt) || + MSM_FORMAT_IS_TILE(fmt)) cdp_cntl |= CDP_TILE_AMORTIZE_ENABLE; DPU_REG_WRITE(c, offset, cdp_cntl); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h index 64ded69fa9..67b08e9933 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h @@ -344,14 +344,14 @@ void *dpu_hw_util_get_dir(void); void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c, struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 scaler_offset, u32 scaler_version, - const struct dpu_format *format); + const struct msm_format *format); void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c, u32 csc_reg_off, const struct dpu_csc_cfg *data, bool csc10); void dpu_setup_cdp(struct dpu_hw_blk_reg_map *c, u32 offset, - const struct dpu_format *fmt, bool enable); + const struct msm_format *fmt, bool enable); u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl, u32 total_fl); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c index e75995f7fc..93ff01c889 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c @@ -67,7 +67,7 @@ static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx, struct dpu_hw_wb_cfg *data) { struct dpu_hw_blk_reg_map *c = &ctx->hw; - const struct dpu_format *fmt = data->dest.format; + const struct msm_format *fmt = data->dest.format; u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp; u32 write_config = 0; u32 opmode = 0; @@ -76,20 +76,20 @@ static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx, chroma_samp = fmt->chroma_sample; dst_format = (chroma_samp << 23) | - (fmt->fetch_planes << 19) | - (fmt->bits[C3_ALPHA] << 6) | - (fmt->bits[C2_R_Cr] << 4) | - (fmt->bits[C1_B_Cb] << 2) | - (fmt->bits[C0_G_Y] << 0); + (fmt->fetch_type << 19) | + (fmt->bpc_a << 6) | + (fmt->bpc_r_cr << 4) | + (fmt->bpc_b_cb << 2) | + (fmt->bpc_g_y << 0); - if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) { + if (fmt->bpc_a || fmt->alpha_enable) { dst_format |= BIT(8); /* DSTC3_EN */ if (!fmt->alpha_enable || !(ctx->caps->features & BIT(DPU_WB_PIPE_ALPHA))) dst_format |= BIT(14); /* DST_ALPHA_X */ } - if (DPU_FORMAT_IS_YUV(fmt)) + if (MSM_FORMAT_IS_YUV(fmt)) dst_format |= BIT(15); pattern = (fmt->element[3] << 24) | @@ -97,8 +97,8 @@ static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx, (fmt->element[1] << 8) | (fmt->element[0] << 0); - dst_format |= (fmt->unpack_align_msb << 18) | - (fmt->unpack_tight << 17) | + dst_format |= ((fmt->flags & MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB ? 1 : 0) << 18) | + ((fmt->flags & MSM_FORMAT_FLAG_UNPACK_TIGHT ? 1 : 0) << 17) | ((fmt->unpack_count - 1) << 12) | ((fmt->bpp - 1) << 9); @@ -149,7 +149,7 @@ static void dpu_hw_wb_setup_qos_lut(struct dpu_hw_wb *ctx, } static void dpu_hw_wb_setup_cdp(struct dpu_hw_wb *ctx, - const struct dpu_format *fmt, + const struct msm_format *fmt, bool enable) { if (!ctx) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h index e671796ea3..37497473e1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h @@ -46,7 +46,7 @@ struct dpu_hw_wb_ops { struct dpu_hw_qos_cfg *cfg); void (*setup_cdp)(struct dpu_hw_wb *ctx, - const struct dpu_format *fmt, + const struct msm_format *fmt, bool enable); bool (*setup_clk_force_ctrl)(struct dpu_hw_wb *ctx, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index a1f5d7c4ab..1955848b1b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -348,9 +348,18 @@ static void dpu_kms_global_destroy_state(struct drm_private_obj *obj, kfree(dpu_state); } +static void dpu_kms_global_print_state(struct drm_printer *p, + const struct drm_private_state *state) +{ + const struct dpu_global_state *global_state = to_dpu_global_state(state); + + dpu_rm_print_state(p, global_state); +} + static const struct drm_private_state_funcs dpu_kms_global_state_funcs = { .atomic_duplicate_state = dpu_kms_global_duplicate_state, .atomic_destroy_state = dpu_kms_global_destroy_state, + .atomic_print_state = dpu_kms_global_print_state, }; static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms) @@ -364,6 +373,9 @@ static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms) drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state, &state->base, &dpu_kms_global_state_funcs); + + state->rm = &dpu_kms->rm; + return 0; } @@ -970,7 +982,6 @@ static const struct msm_kms_funcs kms_funcs = { .enable_vblank = dpu_kms_enable_vblank, .disable_vblank = dpu_kms_disable_vblank, .check_modified_format = dpu_format_check_modified_format, - .get_format = dpu_get_msm_format, .destroy = dpu_kms_destroy, .snapshot = dpu_kms_mdp_snapshot, #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index b5db3fc76c..e2adc937ea 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -130,6 +130,8 @@ struct vsync_info { struct dpu_global_state { struct drm_private_state base; + struct dpu_rm *rm; + uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0]; uint32_t mixer_to_enc_id[LM_MAX - LM_0]; uint32_t ctl_to_enc_id[CTL_MAX - CTL_0]; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index ff975ad511..1c3a265745 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -113,7 +113,7 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane) * Prefill BW Equation: line src bytes * line_time */ static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog, - const struct dpu_format *fmt, + const struct msm_format *fmt, const struct drm_display_mode *mode, struct dpu_sw_pipe_cfg *pipe_cfg) { @@ -195,7 +195,7 @@ static u64 _dpu_plane_calc_clk(const struct drm_display_mode *mode, static int _dpu_plane_calc_fill_level(struct drm_plane *plane, struct dpu_sw_pipe *pipe, enum dpu_qos_lut_usage lut_usage, - const struct dpu_format *fmt, u32 src_width) + const struct msm_format *fmt, u32 src_width) { struct dpu_plane *pdpu; u32 fixed_buff_size; @@ -214,8 +214,8 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane, /* FIXME: in multirect case account for the src_width of all the planes */ - if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) { - if (fmt->chroma_sample == DPU_CHROMA_420) { + if (fmt->fetch_type == MDP_PLANE_PSEUDO_PLANAR) { + if (fmt->chroma_sample == CHROMA_420) { /* NV12 */ total_fl = (fixed_buff_size / 2) / ((src_width + 32) * fmt->bpp); @@ -234,9 +234,9 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane, } } - DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s w:%u fl:%u\n", + DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %p4cc w:%u fl:%u\n", pipe->sspp->idx - SSPP_VIG0, - (char *)&fmt->base.pixel_format, + &fmt->pixel_format, src_width, total_fl); return total_fl; @@ -251,7 +251,7 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane, */ static void _dpu_plane_set_qos_lut(struct drm_plane *plane, struct dpu_sw_pipe *pipe, - const struct dpu_format *fmt, struct dpu_sw_pipe_cfg *pipe_cfg) + const struct msm_format *fmt, struct dpu_sw_pipe_cfg *pipe_cfg) { struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_hw_qos_cfg cfg; @@ -260,7 +260,7 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane, if (!pdpu->is_rt_pipe) { lut_usage = DPU_QOS_LUT_USAGE_NRT; } else { - if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) + if (fmt && MSM_FORMAT_IS_LINEAR(fmt)) lut_usage = DPU_QOS_LUT_USAGE_LINEAR; else lut_usage = DPU_QOS_LUT_USAGE_MACROTILE; @@ -284,26 +284,26 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane, pdpu->is_rt_pipe); trace_dpu_perf_set_qos_luts(pipe->sspp->idx - SSPP_VIG0, - (fmt) ? fmt->base.pixel_format : 0, + (fmt) ? fmt->pixel_format : 0, pdpu->is_rt_pipe, total_fl, cfg.creq_lut, lut_usage); - DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n", + DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %p4cc rt:%d fl:%u lut:0x%llx\n", pdpu->pipe - SSPP_VIG0, - fmt ? (char *)&fmt->base.pixel_format : NULL, + fmt ? &fmt->pixel_format : NULL, pdpu->is_rt_pipe, total_fl, cfg.creq_lut); trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0, - (fmt) ? fmt->base.pixel_format : 0, + (fmt) ? fmt->pixel_format : 0, (fmt) ? fmt->fetch_mode : 0, cfg.danger_lut, cfg.safe_lut); - DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n", - pdpu->pipe - SSPP_VIG0, - fmt ? (char *)&fmt->base.pixel_format : NULL, - fmt ? fmt->fetch_mode : -1, - cfg.danger_lut, - cfg.safe_lut); + DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %p4cc mode:%d luts[0x%x, 0x%x]\n", + pdpu->pipe - SSPP_VIG0, + fmt ? &fmt->pixel_format : NULL, + fmt ? fmt->fetch_mode : -1, + cfg.danger_lut, + cfg.safe_lut); pipe->sspp->ops.setup_qos_lut(pipe->sspp, &cfg); } @@ -425,7 +425,7 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane, static void _dpu_plane_setup_scaler3(struct dpu_hw_sspp *pipe_hw, uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h, struct dpu_hw_scaler3_cfg *scale_cfg, - const struct dpu_format *fmt, + const struct msm_format *fmt, uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v, unsigned int rotation) { @@ -477,7 +477,7 @@ static void _dpu_plane_setup_scaler3(struct dpu_hw_sspp *pipe_hw, scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V; } } - if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h) + if (!(MSM_FORMAT_IS_YUV(fmt)) && (src_h == dst_h) && (src_w == dst_w)) return; @@ -510,11 +510,11 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg, } static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_sw_pipe *pipe, - const struct dpu_format *fmt) + const struct msm_format *fmt) { const struct dpu_csc_cfg *csc_ptr; - if (!DPU_FORMAT_IS_YUV(fmt)) + if (!MSM_FORMAT_IS_YUV(fmt)) return NULL; if (BIT(DPU_SSPP_CSC_10BIT) & pipe->sspp->cap->features) @@ -526,12 +526,12 @@ static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_sw_pipe *pipe, } static void _dpu_plane_setup_scaler(struct dpu_sw_pipe *pipe, - const struct dpu_format *fmt, bool color_fill, + const struct msm_format *fmt, bool color_fill, struct dpu_sw_pipe_cfg *pipe_cfg, unsigned int rotation) { struct dpu_hw_sspp *pipe_hw = pipe->sspp; - const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format); + const struct drm_format_info *info = drm_format_info(fmt->pixel_format); struct dpu_hw_scaler3_cfg scaler3_cfg; struct dpu_hw_pixel_ext pixel_ext; u32 src_width = drm_rect_width(&pipe_cfg->src_rect); @@ -577,7 +577,7 @@ static void _dpu_plane_color_fill_pipe(struct dpu_plane_state *pstate, struct dpu_sw_pipe *pipe, struct drm_rect *dst_rect, u32 fill_color, - const struct dpu_format *fmt) + const struct msm_format *fmt) { struct dpu_sw_pipe_cfg pipe_cfg; @@ -615,8 +615,9 @@ static void _dpu_plane_color_fill_pipe(struct dpu_plane_state *pstate, static void _dpu_plane_color_fill(struct dpu_plane *pdpu, uint32_t color, uint32_t alpha) { - const struct dpu_format *fmt; + const struct msm_format *fmt; const struct drm_plane *plane = &pdpu->base; + struct msm_drm_private *priv = plane->dev->dev_private; struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state); u32 fill_color = (color & 0xFFFFFF) | ((alpha & 0xFF) << 24); @@ -626,7 +627,7 @@ static void _dpu_plane_color_fill(struct dpu_plane *pdpu, * select fill format to match user property expectation, * h/w only supports RGB variants */ - fmt = dpu_get_dpu_format(DRM_FORMAT_ABGR8888); + fmt = mdp_get_format(priv->kms, DRM_FORMAT_ABGR8888, 0); /* should not happen ever */ if (!fmt) return; @@ -704,7 +705,7 @@ static void dpu_plane_cleanup_fb(struct drm_plane *plane, static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu, const struct dpu_sspp_sub_blks *sblk, - struct drm_rect src, const struct dpu_format *fmt) + struct drm_rect src, const struct msm_format *fmt) { size_t num_formats; const u32 *supported_formats; @@ -723,8 +724,8 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu, supported_formats = sblk->rotation_cfg->rot_format_list; num_formats = sblk->rotation_cfg->rot_num_formats; - if (!DPU_FORMAT_IS_UBWC(fmt) || - !dpu_find_format(fmt->base.pixel_format, supported_formats, num_formats)) + if (!MSM_FORMAT_IS_UBWC(fmt) || + !dpu_find_format(fmt->pixel_format, supported_formats, num_formats)) return -EINVAL; return 0; @@ -733,15 +734,15 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu, static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe, struct dpu_sw_pipe_cfg *pipe_cfg, - const struct dpu_format *fmt, + const struct msm_format *fmt, const struct drm_display_mode *mode) { uint32_t min_src_size; struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); - min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1; + min_src_size = MSM_FORMAT_IS_YUV(fmt) ? 2 : 1; - if (DPU_FORMAT_IS_YUV(fmt) && + if (MSM_FORMAT_IS_YUV(fmt) && (!pipe->sspp->cap->sblk->scaler_blk.len || !pipe->sspp->cap->sblk->csc_blk.len)) { DPU_DEBUG_PLANE(pdpu, @@ -758,7 +759,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, } /* valid yuv image */ - if (DPU_FORMAT_IS_YUV(fmt) && + if (MSM_FORMAT_IS_YUV(fmt) && (pipe_cfg->src_rect.x1 & 0x1 || pipe_cfg->src_rect.y1 & 0x1 || drm_rect_width(&pipe_cfg->src_rect) & 0x1 || @@ -798,7 +799,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, struct dpu_sw_pipe *pipe = &pstate->pipe; struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; const struct drm_crtc_state *crtc_state = NULL; - const struct dpu_format *fmt; + const struct msm_format *fmt; struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; struct drm_rect fb_rect = { 0 }; @@ -858,7 +859,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, return -E2BIG; } - fmt = to_dpu_format(msm_framebuffer_format(new_plane_state->fb)); + fmt = msm_framebuffer_format(new_plane_state->fb); max_linewidth = pdpu->catalog->caps->max_linewidth; @@ -870,7 +871,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, * full width is more than max_linewidth, thus each rect is * wider than allowed. */ - if (DPU_FORMAT_IS_UBWC(fmt) && + if (MSM_FORMAT_IS_UBWC(fmt) && drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) { DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n", DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); @@ -887,7 +888,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect) || (!test_bit(DPU_SSPP_SMART_DMA_V1, &pipe->sspp->cap->features) && !test_bit(DPU_SSPP_SMART_DMA_V2, &pipe->sspp->cap->features)) || - DPU_FORMAT_IS_YUV(fmt)) { + MSM_FORMAT_IS_YUV(fmt)) { DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, can't use split source\n", DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); return -E2BIG; @@ -945,8 +946,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe) { - const struct dpu_format *format = - to_dpu_format(msm_framebuffer_format(pdpu->base.state->fb)); + const struct msm_format *format = + msm_framebuffer_format(pdpu->base.state->fb); const struct dpu_csc_cfg *csc_ptr; if (!pipe->sspp || !pipe->sspp->ops.setup_csc) @@ -1017,7 +1018,7 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error) static void dpu_plane_sspp_update_pipe(struct drm_plane *plane, struct dpu_sw_pipe *pipe, struct dpu_sw_pipe_cfg *pipe_cfg, - const struct dpu_format *fmt, + const struct msm_format *fmt, int frame_rate, struct dpu_hw_fmt_layout *layout) { @@ -1095,8 +1096,8 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) struct drm_crtc *crtc = state->crtc; struct drm_framebuffer *fb = state->fb; bool is_rt_pipe; - const struct dpu_format *fmt = - to_dpu_format(msm_framebuffer_format(fb)); + const struct msm_format *fmt = + msm_framebuffer_format(fb); struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg; struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg; struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); @@ -1118,9 +1119,9 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) pdpu->is_rt_pipe = is_rt_pipe; DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT - ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src), + ", %p4cc ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src), crtc->base.id, DRM_RECT_ARG(&state->dst), - (char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt)); + &fmt->pixel_format, MSM_FORMAT_IS_UBWC(fmt)); dpu_plane_sspp_update_pipe(plane, pipe, pipe_cfg, fmt, drm_mode_vrefresh(&crtc->mode), diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index cb5ce3c62a..44938ba7a2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -758,3 +758,59 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm, return num_blks; } + +static void dpu_rm_print_state_helper(struct drm_printer *p, + struct dpu_hw_blk *blk, + uint32_t mapping) +{ + if (!blk) + drm_puts(p, "- "); + else if (!mapping) + drm_puts(p, "# "); + else + drm_printf(p, "%d ", mapping); +} + + +void dpu_rm_print_state(struct drm_printer *p, + const struct dpu_global_state *global_state) +{ + const struct dpu_rm *rm = global_state->rm; + int i; + + drm_puts(p, "resource mapping:\n"); + drm_puts(p, "\tpingpong="); + for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_enc_id); i++) + dpu_rm_print_state_helper(p, rm->pingpong_blks[i], + global_state->pingpong_to_enc_id[i]); + drm_puts(p, "\n"); + + drm_puts(p, "\tmixer="); + for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_enc_id); i++) + dpu_rm_print_state_helper(p, rm->mixer_blks[i], + global_state->mixer_to_enc_id[i]); + drm_puts(p, "\n"); + + drm_puts(p, "\tctl="); + for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_enc_id); i++) + dpu_rm_print_state_helper(p, rm->ctl_blks[i], + global_state->ctl_to_enc_id[i]); + drm_puts(p, "\n"); + + drm_puts(p, "\tdspp="); + for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_enc_id); i++) + dpu_rm_print_state_helper(p, rm->dspp_blks[i], + global_state->dspp_to_enc_id[i]); + drm_puts(p, "\n"); + + drm_puts(p, "\tdsc="); + for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_enc_id); i++) + dpu_rm_print_state_helper(p, rm->dsc_blks[i], + global_state->dsc_to_enc_id[i]); + drm_puts(p, "\n"); + + drm_puts(p, "\tcdm="); + dpu_rm_print_state_helper(p, rm->cdm_blk, + global_state->cdm_to_enc_id); + drm_puts(p, "\n"); +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h index e3f83ebc65..e63db8ace6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h @@ -89,6 +89,14 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm, struct dpu_global_state *global_state, uint32_t enc_id, enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size); +/** + * dpu_rm_print_state - output the RM private state + * @p: DRM printer + * @global_state: global state + */ +void dpu_rm_print_state(struct drm_printer *p, + const struct dpu_global_state *global_state); + /** * dpu_rm_get_intf - Return a struct dpu_hw_intf instance given it's index. * @rm: DPU Resource Manager handle diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h index bd92fb2979..0fdd41162e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h @@ -113,7 +113,7 @@ TRACE_EVENT(tracing_mark_write, ), TP_fast_assign( __entry->pid = pid; - __assign_str(trace_name, name); + __assign_str(trace_name); __entry->trace_begin = trace_begin; ), TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E", @@ -130,7 +130,7 @@ TRACE_EVENT(dpu_trace_counter, ), TP_fast_assign( __entry->pid = current->tgid; - __assign_str(counter_name, name); + __assign_str(counter_name); __entry->value = value; ), TP_printk("%d|%s|%d", __entry->pid, @@ -379,7 +379,7 @@ TRACE_EVENT(dpu_enc_rc, __entry->sw_event = sw_event; __entry->idle_pc_supported = idle_pc_supported; __entry->rc_state = rc_state; - __assign_str(stage_str, stage); + __assign_str(stage_str); ), TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d", __get_str(stage_str), __entry->drm_id, __entry->sw_event, @@ -401,7 +401,7 @@ TRACE_EVENT(dpu_enc_frame_done_cb_not_busy, TP_fast_assign( __entry->drm_id = drm_id; __entry->event = event; - __assign_str(intf_mode_str, intf_mode); + __assign_str(intf_mode_str); __entry->intf_idx = intf_idx; __entry->wb_idx = wb_idx; ), @@ -446,7 +446,7 @@ TRACE_EVENT(dpu_enc_trigger_flush, ), TP_fast_assign( __entry->drm_id = drm_id; - __assign_str(intf_mode_str, intf_mode); + __assign_str(intf_mode_str); __entry->intf_idx = intf_idx; __entry->wb_idx = wb_idx; __entry->pending_kickoff_cnt = pending_kickoff_cnt; @@ -946,7 +946,7 @@ TRACE_EVENT(dpu_core_perf_update_clk, __field( u64, clk_rate ) ), TP_fast_assign( - __assign_str(dev_name, dev->unique); + __assign_str(dev_name); __entry->stop_req = stop_req; __entry->clk_rate = clk_rate; ), diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h deleted file mode 100644 index cc8fde4508..0000000000 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h +++ /dev/null @@ -1,1181 +0,0 @@ -#ifndef MDP4_XML -#define MDP4_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) - -Copyright (C) 2013-2022 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -enum mdp4_pipe { - VG1 = 0, - VG2 = 1, - RGB1 = 2, - RGB2 = 3, - RGB3 = 4, - VG3 = 5, - VG4 = 6, -}; - -enum mdp4_mixer { - MIXER0 = 0, - MIXER1 = 1, - MIXER2 = 2, -}; - -enum mdp4_intf { - INTF_LCDC_DTV = 0, - INTF_DSI_VIDEO = 1, - INTF_DSI_CMD = 2, - INTF_EBI2_TV = 3, -}; - -enum mdp4_cursor_format { - CURSOR_ARGB = 1, - CURSOR_XRGB = 2, -}; - -enum mdp4_frame_format { - FRAME_LINEAR = 0, - FRAME_TILE_ARGB_4X4 = 1, - FRAME_TILE_YCBCR_420 = 2, -}; - -enum mdp4_scale_unit { - SCALE_FIR = 0, - SCALE_MN_PHASE = 1, - SCALE_PIXEL_RPT = 2, -}; - -enum mdp4_dma { - DMA_P = 0, - DMA_S = 1, - DMA_E = 2, -}; - -#define MDP4_IRQ_OVERLAY0_DONE 0x00000001 -#define MDP4_IRQ_OVERLAY1_DONE 0x00000002 -#define MDP4_IRQ_DMA_S_DONE 0x00000004 -#define MDP4_IRQ_DMA_E_DONE 0x00000008 -#define MDP4_IRQ_DMA_P_DONE 0x00000010 -#define MDP4_IRQ_VG1_HISTOGRAM 0x00000020 -#define MDP4_IRQ_VG2_HISTOGRAM 0x00000040 -#define MDP4_IRQ_PRIMARY_VSYNC 0x00000080 -#define MDP4_IRQ_PRIMARY_INTF_UDERRUN 0x00000100 -#define MDP4_IRQ_EXTERNAL_VSYNC 0x00000200 -#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN 0x00000400 -#define MDP4_IRQ_PRIMARY_RDPTR 0x00000800 -#define MDP4_IRQ_DMA_P_HISTOGRAM 0x00020000 -#define MDP4_IRQ_DMA_S_HISTOGRAM 0x04000000 -#define MDP4_IRQ_OVERLAY2_DONE 0x40000000 -#define REG_MDP4_VERSION 0x00000000 -#define MDP4_VERSION_MINOR__MASK 0x00ff0000 -#define MDP4_VERSION_MINOR__SHIFT 16 -static inline uint32_t MDP4_VERSION_MINOR(uint32_t val) -{ - return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK; -} -#define MDP4_VERSION_MAJOR__MASK 0xff000000 -#define MDP4_VERSION_MAJOR__SHIFT 24 -static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val) -{ - return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK; -} - -#define REG_MDP4_OVLP0_KICK 0x00000004 - -#define REG_MDP4_OVLP1_KICK 0x00000008 - -#define REG_MDP4_OVLP2_KICK 0x000000d0 - -#define REG_MDP4_DMA_P_KICK 0x0000000c - -#define REG_MDP4_DMA_S_KICK 0x00000010 - -#define REG_MDP4_DMA_E_KICK 0x00000014 - -#define REG_MDP4_DISP_STATUS 0x00000018 - -#define REG_MDP4_DISP_INTF_SEL 0x00000038 -#define MDP4_DISP_INTF_SEL_PRIM__MASK 0x00000003 -#define MDP4_DISP_INTF_SEL_PRIM__SHIFT 0 -static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val) -{ - return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK; -} -#define MDP4_DISP_INTF_SEL_SEC__MASK 0x0000000c -#define MDP4_DISP_INTF_SEL_SEC__SHIFT 2 -static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val) -{ - return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK; -} -#define MDP4_DISP_INTF_SEL_EXT__MASK 0x00000030 -#define MDP4_DISP_INTF_SEL_EXT__SHIFT 4 -static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val) -{ - return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK; -} -#define MDP4_DISP_INTF_SEL_DSI_VIDEO 0x00000040 -#define MDP4_DISP_INTF_SEL_DSI_CMD 0x00000080 - -#define REG_MDP4_RESET_STATUS 0x0000003c - -#define REG_MDP4_READ_CNFG 0x0000004c - -#define REG_MDP4_INTR_ENABLE 0x00000050 - -#define REG_MDP4_INTR_STATUS 0x00000054 - -#define REG_MDP4_INTR_CLEAR 0x00000058 - -#define REG_MDP4_EBI2_LCD0 0x00000060 - -#define REG_MDP4_EBI2_LCD1 0x00000064 - -#define REG_MDP4_PORTMAP_MODE 0x00000070 - -#define REG_MDP4_CS_CONTROLLER0 0x000000c0 - -#define REG_MDP4_CS_CONTROLLER1 0x000000c4 - -#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000 -#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28 -static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK; -} -#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1 0x80000000 - -#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD 0x000100fc - -#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100 -#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007 -#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008 -#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070 -#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080 -#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700 -#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800 -#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000 -#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28 -static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK; -} -#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1 0x80000000 - -#define REG_MDP4_VG2_SRC_FORMAT 0x00030050 - -#define REG_MDP4_VG2_CONST_COLOR 0x00031008 - -#define REG_MDP4_OVERLAY_FLUSH 0x00018000 -#define MDP4_OVERLAY_FLUSH_OVLP0 0x00000001 -#define MDP4_OVERLAY_FLUSH_OVLP1 0x00000002 -#define MDP4_OVERLAY_FLUSH_VG1 0x00000004 -#define MDP4_OVERLAY_FLUSH_VG2 0x00000008 -#define MDP4_OVERLAY_FLUSH_RGB1 0x00000010 -#define MDP4_OVERLAY_FLUSH_RGB2 0x00000020 - -static inline uint32_t __offset_OVLP(uint32_t idx) -{ - switch (idx) { - case 0: return 0x00010000; - case 1: return 0x00018000; - case 2: return 0x00088000; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); } -#define MDP4_OVLP_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP4_OVLP_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK; -} -#define MDP4_OVLP_SIZE_WIDTH__MASK 0x0000ffff -#define MDP4_OVLP_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); } - -static inline uint32_t __offset_STAGE(uint32_t idx) -{ - switch (idx) { - case 0: return 0x00000104; - case 1: return 0x00000124; - case 2: return 0x00000144; - case 3: return 0x00000160; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } - -static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } -#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003 -#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0 -static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp_alpha_type val) -{ - return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK; -} -#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA 0x00000004 -#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008 -#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030 -#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4 -static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp_alpha_type val) -{ - return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK; -} -#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA 0x00000040 -#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA 0x00000080 -#define MDP4_OVLP_STAGE_OP_FG_TRANSP 0x00000100 -#define MDP4_OVLP_STAGE_OP_BG_TRANSP 0x00000200 - -static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); } - -static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); } - -static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); } - -static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); } - -static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); } - -static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); } - -static inline uint32_t __offset_STAGE_CO3(uint32_t idx) -{ - switch (idx) { - case 0: return 0x00001004; - case 1: return 0x00001404; - case 2: return 0x00001804; - case 3: return 0x00001b84; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); } - -static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); } -#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA 0x00000001 - -static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); } - -static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); } - - -static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; } - -#define REG_MDP4_DMA_P_OP_MODE 0x00090070 - -static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; } - -static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; } - -#define REG_MDP4_DMA_S_OP_MODE 0x000a0028 - -static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; } - -static inline uint32_t __offset_DMA(enum mdp4_dma idx) -{ - switch (idx) { - case DMA_P: return 0x00090000; - case DMA_S: return 0x000a0000; - case DMA_E: return 0x000b0000; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } -#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003 -#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0 -static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp_bpc val) -{ - return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK; -} -#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c -#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2 -static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp_bpc val) -{ - return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK; -} -#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030 -#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4 -static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp_bpc val) -{ - return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK; -} -#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB 0x00000080 -#define MDP4_DMA_CONFIG_PACK__MASK 0x0000ff00 -#define MDP4_DMA_CONFIG_PACK__SHIFT 8 -static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val) -{ - return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK; -} -#define MDP4_DMA_CONFIG_DEFLKR_EN 0x01000000 -#define MDP4_DMA_CONFIG_DITHER_EN 0x01000000 - -static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); } -#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK; -} -#define MDP4_DMA_SRC_SIZE_WIDTH__MASK 0x0000ffff -#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); } -#define MDP4_DMA_DST_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK; -} -#define MDP4_DMA_DST_SIZE_WIDTH__MASK 0x0000ffff -#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); } -#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK 0x0000007f -#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK; -} -#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK 0x007f0000 -#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK; -} - -static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); } -#define MDP4_DMA_CURSOR_POS_X__MASK 0x0000ffff -#define MDP4_DMA_CURSOR_POS_X__SHIFT 0 -static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val) -{ - return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK; -} -#define MDP4_DMA_CURSOR_POS_Y__MASK 0xffff0000 -#define MDP4_DMA_CURSOR_POS_Y__SHIFT 16 -static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val) -{ - return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK; -} - -static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); } -#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN 0x00000001 -#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK 0x00000006 -#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT 1 -static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val) -{ - return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK; -} -#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN 0x00000008 - -static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); } - -static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); } - - -static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; } -#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK; -} -#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff -#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mdp4_pipe i0) { return 0x00020004 + 0x10000*i0; } -#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000 -#define MDP4_PIPE_SRC_XY_Y__SHIFT 16 -static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK; -} -#define MDP4_PIPE_SRC_XY_X__MASK 0x0000ffff -#define MDP4_PIPE_SRC_XY_X__SHIFT 0 -static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mdp4_pipe i0) { return 0x00020008 + 0x10000*i0; } -#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK; -} -#define MDP4_PIPE_DST_SIZE_WIDTH__MASK 0x0000ffff -#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mdp4_pipe i0) { return 0x0002000c + 0x10000*i0; } -#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000 -#define MDP4_PIPE_DST_XY_Y__SHIFT 16 -static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val) -{ - return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK; -} -#define MDP4_PIPE_DST_XY_X__MASK 0x0000ffff -#define MDP4_PIPE_DST_XY_X__SHIFT 0 -static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val) -{ - return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mdp4_pipe i0) { return 0x00020010 + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00020014 + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_SRCP3_BASE(enum mdp4_pipe i0) { return 0x0002001c + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; } -#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff -#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0 -static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK; -} -#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000 -#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT 16 -static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mdp4_pipe i0) { return 0x00020044 + 0x10000*i0; } -#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff -#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0 -static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK; -} -#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000 -#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT 16 -static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_SSTILE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; } -#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK; -} -#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK 0x0000ffff -#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; } -#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 -#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c -#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 -#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 -#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100 -#define MDP4_PIPE_SRC_FORMAT_CPP__MASK 0x00000600 -#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT 9 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_ROTATED_90 0x00001000 -#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00006000 -#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 13 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 -#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 -#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK 0x00180000 -#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT 19 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT) & MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000 -#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x0c000000 -#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 26 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; -} -#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK 0x60000000 -#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT 29 -static inline uint32_t MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(enum mdp4_frame_format val) -{ - return ((val) << MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT) & MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; } -#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff -#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0 -static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK; -} -#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00 -#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT 8 -static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK; -} -#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000 -#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT 16 -static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK; -} -#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000 -#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT 24 -static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val) -{ - return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK; -} - -static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; } -#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001 -#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002 -#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK 0x0000000c -#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT 2 -static inline uint32_t MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(enum mdp4_scale_unit val) -{ - return ((val) << MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK; -} -#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK 0x00000030 -#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT 4 -static inline uint32_t MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(enum mdp4_scale_unit val) -{ - return ((val) << MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK; -} -#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200 -#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400 -#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800 -#define MDP4_PIPE_OP_MODE_FLIP_LR 0x00002000 -#define MDP4_PIPE_OP_MODE_FLIP_UD 0x00004000 -#define MDP4_PIPE_OP_MODE_DITHER_EN 0x00008000 -#define MDP4_PIPE_OP_MODE_IGC_LUT_EN 0x00010000 -#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000 -#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000 - -static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mdp4_pipe i0) { return 0x0002005c + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mdp4_pipe i0) { return 0x00020060 + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mdp4_pipe i0) { return 0x00021004 + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mdp4_pipe i0) { return 0x00021008 + 0x10000*i0; } - -static inline uint32_t REG_MDP4_PIPE_CSC(enum mdp4_pipe i0) { return 0x00024000 + 0x10000*i0; } - - -static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; } - -static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; } - -#define REG_MDP4_LCDC 0x000c0000 - -#define REG_MDP4_LCDC_ENABLE 0x000c0000 - -#define REG_MDP4_LCDC_HSYNC_CTRL 0x000c0004 -#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK 0x0000ffff -#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT 0 -static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val) -{ - return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK; -} -#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK 0xffff0000 -#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT 16 -static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val) -{ - return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK; -} - -#define REG_MDP4_LCDC_VSYNC_PERIOD 0x000c0008 - -#define REG_MDP4_LCDC_VSYNC_LEN 0x000c000c - -#define REG_MDP4_LCDC_DISPLAY_HCTRL 0x000c0010 -#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK 0x0000ffff -#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT 0 -static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val) -{ - return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK; -} -#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK 0xffff0000 -#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT 16 -static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val) -{ - return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK; -} - -#define REG_MDP4_LCDC_DISPLAY_VSTART 0x000c0014 - -#define REG_MDP4_LCDC_DISPLAY_VEND 0x000c0018 - -#define REG_MDP4_LCDC_ACTIVE_HCTL 0x000c001c -#define MDP4_LCDC_ACTIVE_HCTL_START__MASK 0x00007fff -#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT 0 -static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val) -{ - return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK; -} -#define MDP4_LCDC_ACTIVE_HCTL_END__MASK 0x7fff0000 -#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT 16 -static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val) -{ - return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK; -} -#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X 0x80000000 - -#define REG_MDP4_LCDC_ACTIVE_VSTART 0x000c0020 - -#define REG_MDP4_LCDC_ACTIVE_VEND 0x000c0024 - -#define REG_MDP4_LCDC_BORDER_CLR 0x000c0028 - -#define REG_MDP4_LCDC_UNDERFLOW_CLR 0x000c002c -#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff -#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT 0 -static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val) -{ - return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK; -} -#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000 - -#define REG_MDP4_LCDC_HSYNC_SKEW 0x000c0030 - -#define REG_MDP4_LCDC_TEST_CNTL 0x000c0034 - -#define REG_MDP4_LCDC_CTRL_POLARITY 0x000c0038 -#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW 0x00000001 -#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002 -#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004 - -#define REG_MDP4_LCDC_LVDS_INTF_CTL 0x000c2000 -#define MDP4_LCDC_LVDS_INTF_CTL_MODE_SEL 0x00000004 -#define MDP4_LCDC_LVDS_INTF_CTL_RGB_OUT 0x00000008 -#define MDP4_LCDC_LVDS_INTF_CTL_CH_SWAP 0x00000010 -#define MDP4_LCDC_LVDS_INTF_CTL_CH1_RES_BIT 0x00000020 -#define MDP4_LCDC_LVDS_INTF_CTL_CH2_RES_BIT 0x00000040 -#define MDP4_LCDC_LVDS_INTF_CTL_ENABLE 0x00000080 -#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN 0x00000100 -#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN 0x00000200 -#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN 0x00000400 -#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN 0x00000800 -#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN 0x00001000 -#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN 0x00002000 -#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN 0x00004000 -#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE3_EN 0x00008000 -#define MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN 0x00010000 -#define MDP4_LCDC_LVDS_INTF_CTL_CH2_CLK_LANE_EN 0x00020000 - -static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL(uint32_t i0) { return 0x000c2014 + 0x8*i0; } - -static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(uint32_t i0) { return 0x000c2014 + 0x8*i0; } -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK 0x000000ff -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT 0 -static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(uint32_t val) -{ - return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK; -} -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK 0x0000ff00 -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT 8 -static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(uint32_t val) -{ - return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK; -} -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK 0x00ff0000 -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT 16 -static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(uint32_t val) -{ - return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK; -} -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK 0xff000000 -#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT 24 -static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(uint32_t val) -{ - return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK; -} - -static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(uint32_t i0) { return 0x000c2018 + 0x8*i0; } -#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK 0x000000ff -#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT 0 -static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(uint32_t val) -{ - return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK; -} -#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK 0x0000ff00 -#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT 8 -static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(uint32_t val) -{ - return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK; -} -#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK 0x00ff0000 -#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT 16 -static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(uint32_t val) -{ - return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK; -} - -#define REG_MDP4_LCDC_LVDS_PHY_RESET 0x000c2034 - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_0 0x000c3000 - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_1 0x000c3004 - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_2 0x000c3008 - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_3 0x000c300c - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_5 0x000c3014 - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_6 0x000c3018 - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_7 0x000c301c - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_8 0x000c3020 - -#define REG_MDP4_LVDS_PHY_PLL_CTRL_9 0x000c3024 - -#define REG_MDP4_LVDS_PHY_PLL_LOCKED 0x000c3080 - -#define REG_MDP4_LVDS_PHY_CFG2 0x000c3108 - -#define REG_MDP4_LVDS_PHY_CFG0 0x000c3100 -#define MDP4_LVDS_PHY_CFG0_SERIALIZATION_ENBLE 0x00000010 -#define MDP4_LVDS_PHY_CFG0_CHANNEL0 0x00000040 -#define MDP4_LVDS_PHY_CFG0_CHANNEL1 0x00000080 - -#define REG_MDP4_DTV 0x000d0000 - -#define REG_MDP4_DTV_ENABLE 0x000d0000 - -#define REG_MDP4_DTV_HSYNC_CTRL 0x000d0004 -#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK 0x0000ffff -#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT 0 -static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val) -{ - return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK; -} -#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK 0xffff0000 -#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT 16 -static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val) -{ - return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK; -} - -#define REG_MDP4_DTV_VSYNC_PERIOD 0x000d0008 - -#define REG_MDP4_DTV_VSYNC_LEN 0x000d000c - -#define REG_MDP4_DTV_DISPLAY_HCTRL 0x000d0018 -#define MDP4_DTV_DISPLAY_HCTRL_START__MASK 0x0000ffff -#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT 0 -static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val) -{ - return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK; -} -#define MDP4_DTV_DISPLAY_HCTRL_END__MASK 0xffff0000 -#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT 16 -static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val) -{ - return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK; -} - -#define REG_MDP4_DTV_DISPLAY_VSTART 0x000d001c - -#define REG_MDP4_DTV_DISPLAY_VEND 0x000d0020 - -#define REG_MDP4_DTV_ACTIVE_HCTL 0x000d002c -#define MDP4_DTV_ACTIVE_HCTL_START__MASK 0x00007fff -#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT 0 -static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val) -{ - return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK; -} -#define MDP4_DTV_ACTIVE_HCTL_END__MASK 0x7fff0000 -#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT 16 -static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val) -{ - return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK; -} -#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X 0x80000000 - -#define REG_MDP4_DTV_ACTIVE_VSTART 0x000d0030 - -#define REG_MDP4_DTV_ACTIVE_VEND 0x000d0038 - -#define REG_MDP4_DTV_BORDER_CLR 0x000d0040 - -#define REG_MDP4_DTV_UNDERFLOW_CLR 0x000d0044 -#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff -#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT 0 -static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val) -{ - return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK; -} -#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000 - -#define REG_MDP4_DTV_HSYNC_SKEW 0x000d0048 - -#define REG_MDP4_DTV_TEST_CNTL 0x000d004c - -#define REG_MDP4_DTV_CTRL_POLARITY 0x000d0050 -#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW 0x00000001 -#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW 0x00000002 -#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW 0x00000004 - -#define REG_MDP4_DSI 0x000e0000 - -#define REG_MDP4_DSI_ENABLE 0x000e0000 - -#define REG_MDP4_DSI_HSYNC_CTRL 0x000e0004 -#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK 0x0000ffff -#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT 0 -static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val) -{ - return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK; -} -#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK 0xffff0000 -#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT 16 -static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val) -{ - return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK; -} - -#define REG_MDP4_DSI_VSYNC_PERIOD 0x000e0008 - -#define REG_MDP4_DSI_VSYNC_LEN 0x000e000c - -#define REG_MDP4_DSI_DISPLAY_HCTRL 0x000e0010 -#define MDP4_DSI_DISPLAY_HCTRL_START__MASK 0x0000ffff -#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT 0 -static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val) -{ - return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK; -} -#define MDP4_DSI_DISPLAY_HCTRL_END__MASK 0xffff0000 -#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT 16 -static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val) -{ - return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK; -} - -#define REG_MDP4_DSI_DISPLAY_VSTART 0x000e0014 - -#define REG_MDP4_DSI_DISPLAY_VEND 0x000e0018 - -#define REG_MDP4_DSI_ACTIVE_HCTL 0x000e001c -#define MDP4_DSI_ACTIVE_HCTL_START__MASK 0x00007fff -#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT 0 -static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val) -{ - return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK; -} -#define MDP4_DSI_ACTIVE_HCTL_END__MASK 0x7fff0000 -#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT 16 -static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val) -{ - return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK; -} -#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X 0x80000000 - -#define REG_MDP4_DSI_ACTIVE_VSTART 0x000e0020 - -#define REG_MDP4_DSI_ACTIVE_VEND 0x000e0024 - -#define REG_MDP4_DSI_BORDER_CLR 0x000e0028 - -#define REG_MDP4_DSI_UNDERFLOW_CLR 0x000e002c -#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff -#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT 0 -static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val) -{ - return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK; -} -#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000 - -#define REG_MDP4_DSI_HSYNC_SKEW 0x000e0030 - -#define REG_MDP4_DSI_TEST_CNTL 0x000e0034 - -#define REG_MDP4_DSI_CTRL_POLARITY 0x000e0038 -#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW 0x00000001 -#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW 0x00000002 -#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW 0x00000004 - - -#endif /* MDP4_XML */ diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c index 75f93e3462..b8610aa806 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -182,8 +182,8 @@ static void blend_setup(struct drm_crtc *crtc) enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); int idx = idxs[pipe_id]; if (idx > 0) { - const struct mdp_format *format = - to_mdp_format(msm_framebuffer_format(plane->state->fb)); + const struct msm_format *format = + msm_framebuffer_format(plane->state->fb); alpha[idx-1] = format->alpha_enable; } } diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index 4ba1cb74ad..6e4e74f9d6 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -151,7 +151,6 @@ static const struct mdp_kms_funcs kms_funcs = { .flush_commit = mdp4_flush_commit, .wait_flush = mdp4_wait_flush, .complete_commit = mdp4_complete_commit, - .get_format = mdp_get_format, .round_pixclk = mdp4_round_pixclk, .destroy = mdp4_destroy, }, diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h index 01179e764a..94b1ba9278 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h @@ -44,12 +44,12 @@ struct mdp4_kms { static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data) { - msm_writel(data, mdp4_kms->mmio + reg); + writel(data, mdp4_kms->mmio + reg); } static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg) { - return msm_readl(mdp4_kms->mmio + reg); + return readl(mdp4_kms->mmio + reg); } static inline uint32_t pipe2flush(enum mdp4_pipe pipe) diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c index b689b618da..3fefb20880 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c @@ -20,12 +20,6 @@ struct mdp4_plane { const char *name; enum mdp4_pipe pipe; - - uint32_t caps; - uint32_t nformats; - uint32_t formats[32]; - - bool enabled; }; #define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base) @@ -59,15 +53,6 @@ static struct mdp4_kms *get_kms(struct drm_plane *plane) return to_mdp4_kms(to_mdp_kms(priv->kms)); } -static void mdp4_plane_destroy(struct drm_plane *plane) -{ - struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); - - drm_plane_cleanup(plane); - - kfree(mdp4_plane); -} - /* helper to install properties which are common to planes and crtcs */ static void mdp4_plane_install_properties(struct drm_plane *plane, struct drm_mode_object *obj) @@ -85,7 +70,6 @@ static int mdp4_plane_set_property(struct drm_plane *plane, static const struct drm_plane_funcs mdp4_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = mdp4_plane_destroy, .set_property = mdp4_plane_set_property, .reset = drm_atomic_helper_plane_reset, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, @@ -218,7 +202,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); struct mdp4_kms *mdp4_kms = get_kms(plane); enum mdp4_pipe pipe = mdp4_plane->pipe; - const struct mdp_format *format; + const struct msm_format *format; uint32_t op_mode = 0; uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; @@ -241,7 +225,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, fb->base.id, src_x, src_y, src_w, src_h, crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); - format = to_mdp_format(msm_framebuffer_format(fb)); + format = msm_framebuffer_format(fb); if (src_w > (crtc_w * DOWN_SCALE_MAX)) { DRM_DEV_ERROR(dev->dev, "Width down scaling exceeds limits!\n"); @@ -267,7 +251,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, uint32_t sel_unit = SCALE_FIR; op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN; - if (MDP_FORMAT_IS_YUV(format)) { + if (MSM_FORMAT_IS_YUV(format)) { if (crtc_w > src_w) sel_unit = SCALE_PIXEL_RPT; else if (crtc_w <= (src_w / 4)) @@ -283,7 +267,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, uint32_t sel_unit = SCALE_FIR; op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN; - if (MDP_FORMAT_IS_YUV(format)) { + if (MSM_FORMAT_IS_YUV(format)) { if (crtc_h > src_h) sel_unit = SCALE_PIXEL_RPT; @@ -316,24 +300,25 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe), MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | - MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | - MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) | - MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) | + MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r_cr) | + MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g_y) | + MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b_cb) | COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) | - MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | + MDP4_PIPE_SRC_FORMAT_CPP(format->bpp - 1) | MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(format->fetch_type) | MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample) | MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(frame_type) | - COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT)); + COND(format->flags & MSM_FORMAT_FLAG_UNPACK_TIGHT, + MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT)); mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe), - MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) | - MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) | - MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) | - MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); + MDP4_PIPE_SRC_UNPACK_ELEM0(format->element[0]) | + MDP4_PIPE_SRC_UNPACK_ELEM1(format->element[1]) | + MDP4_PIPE_SRC_UNPACK_ELEM2(format->element[2]) | + MDP4_PIPE_SRC_UNPACK_ELEM3(format->element[3])); - if (MDP_FORMAT_IS_YUV(format)) { + if (MSM_FORMAT_IS_YUV(format)) { struct csc_cfg *csc = mdp_get_default_csc_cfg(CSC_YUV2RGB); op_mode |= MDP4_PIPE_OP_MODE_SRC_YCBCR; @@ -371,37 +356,81 @@ static const uint64_t supported_format_modifiers[] = { DRM_FORMAT_MOD_INVALID }; +static const uint32_t mdp4_rgb_formats[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, +}; + +static const uint32_t mdp4_rgb_yuv_formats[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, + DRM_FORMAT_NV16, + DRM_FORMAT_NV61, + DRM_FORMAT_VYUY, + DRM_FORMAT_UYVY, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_YUV420, + DRM_FORMAT_YVU420, +}; + /* initialize plane */ struct drm_plane *mdp4_plane_init(struct drm_device *dev, enum mdp4_pipe pipe_id, bool private_plane) { struct drm_plane *plane = NULL; struct mdp4_plane *mdp4_plane; - int ret; enum drm_plane_type type; + uint32_t pipe_caps; + const uint32_t *formats; + size_t nformats; - mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL); - if (!mdp4_plane) { - ret = -ENOMEM; - goto fail; + type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; + + pipe_caps = mdp4_pipe_caps(pipe_id); + if (pipe_supports_yuv(pipe_caps)) { + formats = mdp4_rgb_yuv_formats; + nformats = ARRAY_SIZE(mdp4_rgb_yuv_formats); + } else { + formats = mdp4_rgb_formats; + nformats = ARRAY_SIZE(mdp4_rgb_formats); } + mdp4_plane = drmm_universal_plane_alloc(dev, struct mdp4_plane, base, + 0xff, &mdp4_plane_funcs, + formats, nformats, + supported_format_modifiers, + type, NULL); + if (IS_ERR(mdp4_plane)) + return ERR_CAST(mdp4_plane); + plane = &mdp4_plane->base; mdp4_plane->pipe = pipe_id; mdp4_plane->name = pipe_names[pipe_id]; - mdp4_plane->caps = mdp4_pipe_caps(pipe_id); - - mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats, - ARRAY_SIZE(mdp4_plane->formats), - !pipe_supports_yuv(mdp4_plane->caps)); - - type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; - ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs, - mdp4_plane->formats, mdp4_plane->nformats, - supported_format_modifiers, type, NULL); - if (ret) - goto fail; drm_plane_helper_add(plane, &mdp4_plane_helper_funcs); @@ -410,10 +439,4 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev, drm_plane_enable_fb_damage_clips(plane); return plane; - -fail: - if (plane) - mdp4_plane_destroy(plane); - - return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h deleted file mode 100644 index 270e11c904..0000000000 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h +++ /dev/null @@ -1,1979 +0,0 @@ -#ifndef MDP5_XML -#define MDP5_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) - -Copyright (C) 2013-2022 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -enum mdp5_intf_type { - INTF_DISABLED = 0, - INTF_DSI = 1, - INTF_HDMI = 3, - INTF_LCDC = 5, - INTF_eDP = 9, - INTF_VIRTUAL = 100, - INTF_WB = 101, -}; - -enum mdp5_intfnum { - NO_INTF = 0, - INTF0 = 1, - INTF1 = 2, - INTF2 = 3, - INTF3 = 4, -}; - -enum mdp5_pipe { - SSPP_NONE = 0, - SSPP_VIG0 = 1, - SSPP_VIG1 = 2, - SSPP_VIG2 = 3, - SSPP_RGB0 = 4, - SSPP_RGB1 = 5, - SSPP_RGB2 = 6, - SSPP_DMA0 = 7, - SSPP_DMA1 = 8, - SSPP_VIG3 = 9, - SSPP_RGB3 = 10, - SSPP_CURSOR0 = 11, - SSPP_CURSOR1 = 12, -}; - -enum mdp5_format { - DUMMY = 0, -}; - -enum mdp5_ctl_mode { - MODE_NONE = 0, - MODE_WB_0_BLOCK = 1, - MODE_WB_1_BLOCK = 2, - MODE_WB_0_LINE = 3, - MODE_WB_1_LINE = 4, - MODE_WB_2_LINE = 5, -}; - -enum mdp5_pack_3d { - PACK_3D_FRAME_INT = 0, - PACK_3D_H_ROW_INT = 1, - PACK_3D_V_ROW_INT = 2, - PACK_3D_COL_INT = 3, -}; - -enum mdp5_scale_filter { - SCALE_FILTER_NEAREST = 0, - SCALE_FILTER_BIL = 1, - SCALE_FILTER_PCMN = 2, - SCALE_FILTER_CA = 3, -}; - -enum mdp5_pipe_bwc { - BWC_LOSSLESS = 0, - BWC_Q_HIGH = 1, - BWC_Q_MED = 2, -}; - -enum mdp5_cursor_format { - CURSOR_FMT_ARGB8888 = 0, - CURSOR_FMT_ARGB1555 = 2, - CURSOR_FMT_ARGB4444 = 4, -}; - -enum mdp5_cursor_alpha { - CURSOR_ALPHA_CONST = 0, - CURSOR_ALPHA_PER_PIXEL = 2, -}; - -enum mdp5_igc_type { - IGC_VIG = 0, - IGC_RGB = 1, - IGC_DMA = 2, - IGC_DSPP = 3, -}; - -enum mdp5_data_format { - DATA_FORMAT_RGB = 0, - DATA_FORMAT_YUV = 1, -}; - -enum mdp5_block_size { - BLOCK_SIZE_64 = 0, - BLOCK_SIZE_128 = 1, -}; - -enum mdp5_rotate_mode { - ROTATE_0 = 0, - ROTATE_90 = 1, -}; - -enum mdp5_chroma_downsample_method { - DS_MTHD_NO_PIXEL_DROP = 0, - DS_MTHD_PIXEL_DROP = 1, -}; - -#define MDP5_IRQ_WB_0_DONE 0x00000001 -#define MDP5_IRQ_WB_1_DONE 0x00000002 -#define MDP5_IRQ_WB_2_DONE 0x00000010 -#define MDP5_IRQ_PING_PONG_0_DONE 0x00000100 -#define MDP5_IRQ_PING_PONG_1_DONE 0x00000200 -#define MDP5_IRQ_PING_PONG_2_DONE 0x00000400 -#define MDP5_IRQ_PING_PONG_3_DONE 0x00000800 -#define MDP5_IRQ_PING_PONG_0_RD_PTR 0x00001000 -#define MDP5_IRQ_PING_PONG_1_RD_PTR 0x00002000 -#define MDP5_IRQ_PING_PONG_2_RD_PTR 0x00004000 -#define MDP5_IRQ_PING_PONG_3_RD_PTR 0x00008000 -#define MDP5_IRQ_PING_PONG_0_WR_PTR 0x00010000 -#define MDP5_IRQ_PING_PONG_1_WR_PTR 0x00020000 -#define MDP5_IRQ_PING_PONG_2_WR_PTR 0x00040000 -#define MDP5_IRQ_PING_PONG_3_WR_PTR 0x00080000 -#define MDP5_IRQ_PING_PONG_0_AUTO_REF 0x00100000 -#define MDP5_IRQ_PING_PONG_1_AUTO_REF 0x00200000 -#define MDP5_IRQ_PING_PONG_2_AUTO_REF 0x00400000 -#define MDP5_IRQ_PING_PONG_3_AUTO_REF 0x00800000 -#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000 -#define MDP5_IRQ_INTF0_VSYNC 0x02000000 -#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000 -#define MDP5_IRQ_INTF1_VSYNC 0x08000000 -#define MDP5_IRQ_INTF2_UNDER_RUN 0x10000000 -#define MDP5_IRQ_INTF2_VSYNC 0x20000000 -#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000 -#define MDP5_IRQ_INTF3_VSYNC 0x80000000 -#define REG_MDSS_HW_VERSION 0x00000000 -#define MDSS_HW_VERSION_STEP__MASK 0x0000ffff -#define MDSS_HW_VERSION_STEP__SHIFT 0 -static inline uint32_t MDSS_HW_VERSION_STEP(uint32_t val) -{ - return ((val) << MDSS_HW_VERSION_STEP__SHIFT) & MDSS_HW_VERSION_STEP__MASK; -} -#define MDSS_HW_VERSION_MINOR__MASK 0x0fff0000 -#define MDSS_HW_VERSION_MINOR__SHIFT 16 -static inline uint32_t MDSS_HW_VERSION_MINOR(uint32_t val) -{ - return ((val) << MDSS_HW_VERSION_MINOR__SHIFT) & MDSS_HW_VERSION_MINOR__MASK; -} -#define MDSS_HW_VERSION_MAJOR__MASK 0xf0000000 -#define MDSS_HW_VERSION_MAJOR__SHIFT 28 -static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val) -{ - return ((val) << MDSS_HW_VERSION_MAJOR__SHIFT) & MDSS_HW_VERSION_MAJOR__MASK; -} - -#define REG_MDSS_HW_INTR_STATUS 0x00000010 -#define MDSS_HW_INTR_STATUS_INTR_MDP 0x00000001 -#define MDSS_HW_INTR_STATUS_INTR_DSI0 0x00000010 -#define MDSS_HW_INTR_STATUS_INTR_DSI1 0x00000020 -#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100 -#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000 - -#define REG_MDP5_HW_VERSION 0x00000000 -#define MDP5_HW_VERSION_STEP__MASK 0x0000ffff -#define MDP5_HW_VERSION_STEP__SHIFT 0 -static inline uint32_t MDP5_HW_VERSION_STEP(uint32_t val) -{ - return ((val) << MDP5_HW_VERSION_STEP__SHIFT) & MDP5_HW_VERSION_STEP__MASK; -} -#define MDP5_HW_VERSION_MINOR__MASK 0x0fff0000 -#define MDP5_HW_VERSION_MINOR__SHIFT 16 -static inline uint32_t MDP5_HW_VERSION_MINOR(uint32_t val) -{ - return ((val) << MDP5_HW_VERSION_MINOR__SHIFT) & MDP5_HW_VERSION_MINOR__MASK; -} -#define MDP5_HW_VERSION_MAJOR__MASK 0xf0000000 -#define MDP5_HW_VERSION_MAJOR__SHIFT 28 -static inline uint32_t MDP5_HW_VERSION_MAJOR(uint32_t val) -{ - return ((val) << MDP5_HW_VERSION_MAJOR__SHIFT) & MDP5_HW_VERSION_MAJOR__MASK; -} - -#define REG_MDP5_DISP_INTF_SEL 0x00000004 -#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff -#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0 -static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val) -{ - return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK; -} -#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00 -#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8 -static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val) -{ - return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK; -} -#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000 -#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16 -static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val) -{ - return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK; -} -#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000 -#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24 -static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val) -{ - return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK; -} - -#define REG_MDP5_INTR_EN 0x00000010 - -#define REG_MDP5_INTR_STATUS 0x00000014 - -#define REG_MDP5_INTR_CLEAR 0x00000018 - -#define REG_MDP5_HIST_INTR_EN 0x0000001c - -#define REG_MDP5_HIST_INTR_STATUS 0x00000020 - -#define REG_MDP5_HIST_INTR_CLEAR 0x00000024 - -#define REG_MDP5_SPARE_0 0x00000028 -#define MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001 - -static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000080 + 0x4*i0; } - -static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000080 + 0x4*i0; } -#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff -#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0 -static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(uint32_t val) -{ - return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; -} -#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00 -#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8 -static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(uint32_t val) -{ - return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; -} -#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000 -#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16 -static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(uint32_t val) -{ - return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; -} - -static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000130 + 0x4*i0; } - -static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000130 + 0x4*i0; } -#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff -#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0 -static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(uint32_t val) -{ - return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK; -} -#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00 -#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8 -static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(uint32_t val) -{ - return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK; -} -#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000 -#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16 -static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(uint32_t val) -{ - return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK; -} - -static inline uint32_t __offset_IGC(enum mdp5_igc_type idx) -{ - switch (idx) { - case IGC_VIG: return 0x00000200; - case IGC_RGB: return 0x00000210; - case IGC_DMA: return 0x00000220; - case IGC_DSPP: return 0x00000300; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); } - -static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } -#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff -#define MDP5_IGC_LUT_REG_VAL__SHIFT 0 -static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val) -{ - return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK; -} -#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000 -#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000 -#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 -#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 - -#define REG_MDP5_SPLIT_DPL_EN 0x000002f4 - -#define REG_MDP5_SPLIT_DPL_UPPER 0x000002f8 -#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002 -#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004 -#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010 -#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100 - -#define REG_MDP5_SPLIT_DPL_LOWER 0x000003f0 -#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002 -#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004 -#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010 -#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100 - -static inline uint32_t __offset_CTL(uint32_t idx) -{ - switch (idx) { - case 0: return (mdp5_cfg->ctl.base[0]); - case 1: return (mdp5_cfg->ctl.base[1]); - case 2: return (mdp5_cfg->ctl.base[2]); - case 3: return (mdp5_cfg->ctl.base[3]); - case 4: return (mdp5_cfg->ctl.base[4]); - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000000 + __offset_CTL(i0); } - -static inline uint32_t __offset_LAYER(uint32_t idx) -{ - switch (idx) { - case 0: return 0x00000000; - case 1: return 0x00000004; - case 2: return 0x00000008; - case 3: return 0x0000000c; - case 4: return 0x00000010; - case 5: return 0x00000024; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); } - -static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); } -#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007 -#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK; -} -#define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038 -#define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK; -} -#define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0 -#define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK; -} -#define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00 -#define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK; -} -#define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000 -#define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK; -} -#define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000 -#define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK; -} -#define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000 -#define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18 -static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK; -} -#define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000 -#define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21 -static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK; -} -#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000 -#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000 -#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000 -#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26 -static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK; -} -#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000 -#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29 -static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(uint32_t val) -{ - return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK; -} - -static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000014 + __offset_CTL(i0); } -#define MDP5_CTL_OP_MODE__MASK 0x0000000f -#define MDP5_CTL_OP_MODE__SHIFT 0 -static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val) -{ - return ((val) << MDP5_CTL_OP_MODE__SHIFT) & MDP5_CTL_OP_MODE__MASK; -} -#define MDP5_CTL_OP_INTF_NUM__MASK 0x00000070 -#define MDP5_CTL_OP_INTF_NUM__SHIFT 4 -static inline uint32_t MDP5_CTL_OP_INTF_NUM(enum mdp5_intfnum val) -{ - return ((val) << MDP5_CTL_OP_INTF_NUM__SHIFT) & MDP5_CTL_OP_INTF_NUM__MASK; -} -#define MDP5_CTL_OP_CMD_MODE 0x00020000 -#define MDP5_CTL_OP_PACK_3D_ENABLE 0x00080000 -#define MDP5_CTL_OP_PACK_3D__MASK 0x00300000 -#define MDP5_CTL_OP_PACK_3D__SHIFT 20 -static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val) -{ - return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK; -} - -static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __offset_CTL(i0); } -#define MDP5_CTL_FLUSH_VIG0 0x00000001 -#define MDP5_CTL_FLUSH_VIG1 0x00000002 -#define MDP5_CTL_FLUSH_VIG2 0x00000004 -#define MDP5_CTL_FLUSH_RGB0 0x00000008 -#define MDP5_CTL_FLUSH_RGB1 0x00000010 -#define MDP5_CTL_FLUSH_RGB2 0x00000020 -#define MDP5_CTL_FLUSH_LM0 0x00000040 -#define MDP5_CTL_FLUSH_LM1 0x00000080 -#define MDP5_CTL_FLUSH_LM2 0x00000100 -#define MDP5_CTL_FLUSH_LM3 0x00000200 -#define MDP5_CTL_FLUSH_LM4 0x00000400 -#define MDP5_CTL_FLUSH_DMA0 0x00000800 -#define MDP5_CTL_FLUSH_DMA1 0x00001000 -#define MDP5_CTL_FLUSH_DSPP0 0x00002000 -#define MDP5_CTL_FLUSH_DSPP1 0x00004000 -#define MDP5_CTL_FLUSH_DSPP2 0x00008000 -#define MDP5_CTL_FLUSH_WB 0x00010000 -#define MDP5_CTL_FLUSH_CTL 0x00020000 -#define MDP5_CTL_FLUSH_VIG3 0x00040000 -#define MDP5_CTL_FLUSH_RGB3 0x00080000 -#define MDP5_CTL_FLUSH_LM5 0x00100000 -#define MDP5_CTL_FLUSH_DSPP3 0x00200000 -#define MDP5_CTL_FLUSH_CURSOR_0 0x00400000 -#define MDP5_CTL_FLUSH_CURSOR_1 0x00800000 -#define MDP5_CTL_FLUSH_CHROMADOWN_0 0x04000000 -#define MDP5_CTL_FLUSH_TIMING_3 0x10000000 -#define MDP5_CTL_FLUSH_TIMING_2 0x20000000 -#define MDP5_CTL_FLUSH_TIMING_1 0x40000000 -#define MDP5_CTL_FLUSH_TIMING_0 0x80000000 - -static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); } - -static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); } - -static inline uint32_t __offset_LAYER_EXT(uint32_t idx) -{ - switch (idx) { - case 0: return 0x00000040; - case 1: return 0x00000044; - case 2: return 0x00000048; - case 3: return 0x0000004c; - case 4: return 0x00000050; - case 5: return 0x00000054; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_CTL_LAYER_EXT(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); } - -static inline uint32_t REG_MDP5_CTL_LAYER_EXT_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); } -#define MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3 0x00000001 -#define MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3 0x00000004 -#define MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3 0x00000010 -#define MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3 0x00000040 -#define MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3 0x00000100 -#define MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3 0x00000400 -#define MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3 0x00001000 -#define MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3 0x00004000 -#define MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3 0x00010000 -#define MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3 0x00040000 -#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK 0x00f00000 -#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT 20 -static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR0(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK; -} -#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK 0x3c000000 -#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT 26 -static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id val) -{ - return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK; -} - -static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) -{ - switch (idx) { - case SSPP_NONE: return (INVALID_IDX(idx)); - case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]); - case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]); - case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]); - case SSPP_RGB0: return (mdp5_cfg->pipe_rgb.base[0]); - case SSPP_RGB1: return (mdp5_cfg->pipe_rgb.base[1]); - case SSPP_RGB2: return (mdp5_cfg->pipe_rgb.base[2]); - case SSPP_DMA0: return (mdp5_cfg->pipe_dma.base[0]); - case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]); - case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]); - case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]); - case SSPP_CURSOR0: return (mdp5_cfg->pipe_cursor.base[0]); - case SSPP_CURSOR1: return (mdp5_cfg->pipe_cursor.base[1]); - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_OP_MODE(enum mdp5_pipe i0) { return 0x00000200 + __offset_PIPE(i0); } -#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00080000 -#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 19 -static inline uint32_t MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(enum mdp5_data_format val) -{ - return ((val) << MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK; -} -#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00040000 -#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 18 -static inline uint32_t MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(enum mdp5_data_format val) -{ - return ((val) << MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK; -} -#define MDP5_PIPE_OP_MODE_CSC_1_EN 0x00020000 - -static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000002c4 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000002f0 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00000300 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(enum mdp5_pipe i0) { return 0x00000320 + __offset_PIPE(i0); } -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK; -} -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000 -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT 16 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(enum mdp5_pipe i0) { return 0x00000324 + __offset_PIPE(i0); } -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK; -} -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000 -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT 16 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(enum mdp5_pipe i0) { return 0x00000328 + __offset_PIPE(i0); } -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK; -} -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000 -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT 16 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(enum mdp5_pipe i0) { return 0x0000032c + __offset_PIPE(i0); } -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK; -} -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000 -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT 16 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(enum mdp5_pipe i0) { return 0x00000330 + __offset_PIPE(i0); } -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff -#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; } -#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK 0x000000ff -#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK; -} -#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK 0x0000ff00 -#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT 8 -static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; } -#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK 0x000000ff -#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK; -} -#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK 0x0000ff00 -#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT 8 -static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; } -#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK 0x000001ff -#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; } -#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK 0x000001ff -#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT 0 -static inline uint32_t MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(uint32_t val) -{ - return ((val) << MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_SIZE_HEIGHT__MASK; -} -#define MDP5_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff -#define MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00000004 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK; -} -#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK 0x0000ffff -#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00000008 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000 -#define MDP5_PIPE_SRC_XY_Y__SHIFT 16 -static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_XY_Y__SHIFT) & MDP5_PIPE_SRC_XY_Y__MASK; -} -#define MDP5_PIPE_SRC_XY_X__MASK 0x0000ffff -#define MDP5_PIPE_SRC_XY_X__SHIFT 0 -static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000000c + __offset_PIPE(i0); } -#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_OUT_SIZE_HEIGHT__MASK; -} -#define MDP5_PIPE_OUT_SIZE_WIDTH__MASK 0x0000ffff -#define MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00000010 + __offset_PIPE(i0); } -#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000 -#define MDP5_PIPE_OUT_XY_Y__SHIFT 16 -static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val) -{ - return ((val) << MDP5_PIPE_OUT_XY_Y__SHIFT) & MDP5_PIPE_OUT_XY_Y__MASK; -} -#define MDP5_PIPE_OUT_XY_X__MASK 0x0000ffff -#define MDP5_PIPE_OUT_XY_X__SHIFT 0 -static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val) -{ - return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00000014 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00000018 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000001c + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00000020 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00000024 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff -#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0 -static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P0__MASK; -} -#define MDP5_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000 -#define MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT 16 -static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00000028 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff -#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0 -static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P2__MASK; -} -#define MDP5_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000 -#define MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT 16 -static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000002c + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00000030 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 -#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_G_BPC__MASK; -} -#define MDP5_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c -#define MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_B_BPC__MASK; -} -#define MDP5_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 -#define MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_R_BPC__MASK; -} -#define MDP5_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 -#define MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_A_BPC__MASK; -} -#define MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100 -#define MDP5_PIPE_SRC_FORMAT_CPP__MASK 0x00000600 -#define MDP5_PIPE_SRC_FORMAT_CPP__SHIFT 9 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_CPP(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CPP__MASK; -} -#define MDP5_PIPE_SRC_FORMAT_ROT90 0x00000800 -#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00003000 -#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 12 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK; -} -#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 -#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 -#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK 0x00180000 -#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT 19 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(enum mdp_fetch_type val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT) & MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK; -} -#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000 -#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val) -{ - return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00000034 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff -#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0 -static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM0__MASK; -} -#define MDP5_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00 -#define MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT 8 -static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM1(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM1__MASK; -} -#define MDP5_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000 -#define MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT 16 -static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM2(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM2__MASK; -} -#define MDP5_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000 -#define MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT 24 -static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val) -{ - return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00000038 + __offset_PIPE(i0); } -#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001 -#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006 -#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1 -static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val) -{ - return ((val) << MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT) & MDP5_PIPE_SRC_OP_MODE_BWC__MASK; -} -#define MDP5_PIPE_SRC_OP_MODE_FLIP_LR 0x00002000 -#define MDP5_PIPE_SRC_OP_MODE_FLIP_UD 0x00004000 -#define MDP5_PIPE_SRC_OP_MODE_IGC_EN 0x00010000 -#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_0 0x00020000 -#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000 -#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000 -#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000 -#define MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE 0x80000000 - -static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000003c + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00000048 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000004c + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00000050 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00000054 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00000058 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00000070 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000000a4 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000000a8 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000000ac + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000000b0 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000000b4 + __offset_PIPE(i0); } -#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff -#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0 -static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val) -{ - return ((val) << MDP5_PIPE_DECIMATION_VERT__SHIFT) & MDP5_PIPE_DECIMATION_VERT__MASK; -} -#define MDP5_PIPE_DECIMATION_HORZ__MASK 0x0000ff00 -#define MDP5_PIPE_DECIMATION_HORZ__SHIFT 8 -static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val) -{ - return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK; -} - -static inline uint32_t __offset_SW_PIX_EXT(enum mdp_component_type idx) -{ - switch (idx) { - case COMP_0: return 0x00000100; - case COMP_1_2: return 0x00000110; - case COMP_3: return 0x00000120; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } - -static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_LR(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } -#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK 0x000000ff -#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT 0 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK; -} -#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK 0x0000ff00 -#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT 8 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(int32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK; -} -#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK 0x00ff0000 -#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT 16 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK; -} -#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK 0xff000000 -#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT 24 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(int32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_TB(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000004 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } -#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK 0x000000ff -#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT 0 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK; -} -#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK 0x0000ff00 -#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT 8 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(int32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK; -} -#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK 0x00ff0000 -#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT 16 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK; -} -#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK 0xff000000 -#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT 24 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(int32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000008 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } -#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK 0x0000ffff -#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT 0 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(uint32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK; -} -#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK 0xffff0000 -#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT 16 -static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(uint32_t val) -{ - return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); } -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001 -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK 0x00000300 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT 8 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(enum mdp5_scale_filter val) -{ - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK; -} -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK 0x00000c00 -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT 10 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(enum mdp5_scale_filter val) -{ - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK; -} -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK 0x00003000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT 12 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(enum mdp5_scale_filter val) -{ - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK; -} -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK 0x0000c000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT 14 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(enum mdp5_scale_filter val) -{ - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK; -} -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK 0x00030000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT 16 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(enum mdp5_scale_filter val) -{ - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK; -} -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK 0x000c0000 -#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT 18 -static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(enum mdp5_scale_filter val) -{ - return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK; -} - -static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00000214 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000218 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x0000021c + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00000220 + __offset_PIPE(i0); } - -static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00000224 + __offset_PIPE(i0); } - -static inline uint32_t __offset_LM(uint32_t idx) -{ - switch (idx) { - case 0: return (mdp5_cfg->lm.base[0]); - case 1: return (mdp5_cfg->lm.base[1]); - case 2: return (mdp5_cfg->lm.base[2]); - case 3: return (mdp5_cfg->lm.base[3]); - case 4: return (mdp5_cfg->lm.base[4]); - case 5: return (mdp5_cfg->lm.base[5]); - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00000000 + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000000 + __offset_LM(i0); } -#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002 -#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004 -#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008 -#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010 -#define MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA 0x00000020 -#define MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA 0x00000040 -#define MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA 0x00000080 -#define MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT 0x80000000 - -static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); } -#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000 -#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16 -static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val) -{ - return ((val) << MDP5_LM_OUT_SIZE_HEIGHT__SHIFT) & MDP5_LM_OUT_SIZE_HEIGHT__MASK; -} -#define MDP5_LM_OUT_SIZE_WIDTH__MASK 0x0000ffff -#define MDP5_LM_OUT_SIZE_WIDTH__SHIFT 0 -static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val) -{ - return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK; -} - -static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00000008 + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); } - -static inline uint32_t __offset_BLEND(uint32_t idx) -{ - switch (idx) { - case 0: return 0x00000020; - case 1: return 0x00000050; - case 2: return 0x00000080; - case 3: return 0x000000b0; - case 4: return 0x00000230; - case 5: return 0x00000260; - case 6: return 0x00000290; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); } -#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003 -#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0 -static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val) -{ - return ((val) << MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK; -} -#define MDP5_LM_BLEND_OP_MODE_FG_INV_ALPHA 0x00000004 -#define MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA 0x00000008 -#define MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA 0x00000010 -#define MDP5_LM_BLEND_OP_MODE_FG_TRANSP_EN 0x00000020 -#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK 0x00000300 -#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT 8 -static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val) -{ - return ((val) << MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK; -} -#define MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA 0x00000400 -#define MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA 0x00000800 -#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000 -#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000 - -static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000001c + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + __offset_BLEND(i1); } - -static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); } -#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff -#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT 0 -static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_W(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK; -} -#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK 0xffff0000 -#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT 16 -static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_H(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK; -} - -static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000000e4 + __offset_LM(i0); } -#define MDP5_LM_CURSOR_SIZE_ROI_W__MASK 0x0000ffff -#define MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT 0 -static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_W(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_W__MASK; -} -#define MDP5_LM_CURSOR_SIZE_ROI_H__MASK 0xffff0000 -#define MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT 16 -static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_H(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_H__MASK; -} - -static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000000e8 + __offset_LM(i0); } -#define MDP5_LM_CURSOR_XY_SRC_X__MASK 0x0000ffff -#define MDP5_LM_CURSOR_XY_SRC_X__SHIFT 0 -static inline uint32_t MDP5_LM_CURSOR_XY_SRC_X(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_XY_SRC_X__SHIFT) & MDP5_LM_CURSOR_XY_SRC_X__MASK; -} -#define MDP5_LM_CURSOR_XY_SRC_Y__MASK 0xffff0000 -#define MDP5_LM_CURSOR_XY_SRC_Y__SHIFT 16 -static inline uint32_t MDP5_LM_CURSOR_XY_SRC_Y(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_XY_SRC_Y__SHIFT) & MDP5_LM_CURSOR_XY_SRC_Y__MASK; -} - -static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000000dc + __offset_LM(i0); } -#define MDP5_LM_CURSOR_STRIDE_STRIDE__MASK 0x0000ffff -#define MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT 0 -static inline uint32_t MDP5_LM_CURSOR_STRIDE_STRIDE(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT) & MDP5_LM_CURSOR_STRIDE_STRIDE__MASK; -} - -static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000000ec + __offset_LM(i0); } -#define MDP5_LM_CURSOR_FORMAT_FORMAT__MASK 0x00000007 -#define MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT 0 -static inline uint32_t MDP5_LM_CURSOR_FORMAT_FORMAT(enum mdp5_cursor_format val) -{ - return ((val) << MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT) & MDP5_LM_CURSOR_FORMAT_FORMAT__MASK; -} - -static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000000f0 + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000000f4 + __offset_LM(i0); } -#define MDP5_LM_CURSOR_START_XY_X_START__MASK 0x0000ffff -#define MDP5_LM_CURSOR_START_XY_X_START__SHIFT 0 -static inline uint32_t MDP5_LM_CURSOR_START_XY_X_START(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_START_XY_X_START__SHIFT) & MDP5_LM_CURSOR_START_XY_X_START__MASK; -} -#define MDP5_LM_CURSOR_START_XY_Y_START__MASK 0xffff0000 -#define MDP5_LM_CURSOR_START_XY_Y_START__SHIFT 16 -static inline uint32_t MDP5_LM_CURSOR_START_XY_Y_START(uint32_t val) -{ - return ((val) << MDP5_LM_CURSOR_START_XY_Y_START__SHIFT) & MDP5_LM_CURSOR_START_XY_Y_START__MASK; -} - -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000000f8 + __offset_LM(i0); } -#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN 0x00000001 -#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK 0x00000006 -#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT 1 -static inline uint32_t MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(enum mdp5_cursor_alpha val) -{ - return ((val) << MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT) & MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK; -} -#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN 0x00000008 - -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000000fc + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00000100 + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00000104 + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00000108 + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000010c + __offset_LM(i0); } - -static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00000110 + __offset_LM(i0); } - -static inline uint32_t __offset_DSPP(uint32_t idx) -{ - switch (idx) { - case 0: return (mdp5_cfg->dspp.base[0]); - case 1: return (mdp5_cfg->dspp.base[1]); - case 2: return (mdp5_cfg->dspp.base[2]); - case 3: return (mdp5_cfg->dspp.base[3]); - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); } -#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001 -#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e -#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1 -static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val) -{ - return ((val) << MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT) & MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK; -} -#define MDP5_DSPP_OP_MODE_PCC_EN 0x00000010 -#define MDP5_DSPP_OP_MODE_DITHER_EN 0x00000100 -#define MDP5_DSPP_OP_MODE_HIST_EN 0x00010000 -#define MDP5_DSPP_OP_MODE_AUTO_CLEAR 0x00020000 -#define MDP5_DSPP_OP_MODE_HIST_LUT_EN 0x00080000 -#define MDP5_DSPP_OP_MODE_PA_EN 0x00100000 -#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000 -#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000 - -static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00000030 + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00000150 + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00000210 + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00000230 + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00000234 + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00000238 + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc + __offset_DSPP(i0); } - -static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); } - -static inline uint32_t __offset_PP(uint32_t idx) -{ - switch (idx) { - case 0: return (mdp5_cfg->pp.base[0]); - case 1: return (mdp5_cfg->pp.base[1]); - case 2: return (mdp5_cfg->pp.base[2]); - case 3: return (mdp5_cfg->pp.base[3]); - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_PP(uint32_t i0) { return 0x00000000 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_TEAR_CHECK_EN(uint32_t i0) { return 0x00000000 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_VSYNC(uint32_t i0) { return 0x00000004 + __offset_PP(i0); } -#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK 0x0007ffff -#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT 0 -static inline uint32_t MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(uint32_t val) -{ - return ((val) << MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT) & MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK; -} -#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN 0x00080000 -#define MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN 0x00100000 - -static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_HEIGHT(uint32_t i0) { return 0x00000008 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_SYNC_WRCOUNT(uint32_t i0) { return 0x0000000c + __offset_PP(i0); } -#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK 0x0000ffff -#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT 0 -static inline uint32_t MDP5_PP_SYNC_WRCOUNT_LINE_COUNT(uint32_t val) -{ - return ((val) << MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK; -} -#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK 0xffff0000 -#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT 16 -static inline uint32_t MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT(uint32_t val) -{ - return ((val) << MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK; -} - -static inline uint32_t REG_MDP5_PP_VSYNC_INIT_VAL(uint32_t i0) { return 0x00000010 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_INT_COUNT_VAL(uint32_t i0) { return 0x00000014 + __offset_PP(i0); } -#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK 0x0000ffff -#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT 0 -static inline uint32_t MDP5_PP_INT_COUNT_VAL_LINE_COUNT(uint32_t val) -{ - return ((val) << MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK; -} -#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK 0xffff0000 -#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT 16 -static inline uint32_t MDP5_PP_INT_COUNT_VAL_FRAME_COUNT(uint32_t val) -{ - return ((val) << MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK; -} - -static inline uint32_t REG_MDP5_PP_SYNC_THRESH(uint32_t i0) { return 0x00000018 + __offset_PP(i0); } -#define MDP5_PP_SYNC_THRESH_START__MASK 0x0000ffff -#define MDP5_PP_SYNC_THRESH_START__SHIFT 0 -static inline uint32_t MDP5_PP_SYNC_THRESH_START(uint32_t val) -{ - return ((val) << MDP5_PP_SYNC_THRESH_START__SHIFT) & MDP5_PP_SYNC_THRESH_START__MASK; -} -#define MDP5_PP_SYNC_THRESH_CONTINUE__MASK 0xffff0000 -#define MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT 16 -static inline uint32_t MDP5_PP_SYNC_THRESH_CONTINUE(uint32_t val) -{ - return ((val) << MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT) & MDP5_PP_SYNC_THRESH_CONTINUE__MASK; -} - -static inline uint32_t REG_MDP5_PP_START_POS(uint32_t i0) { return 0x0000001c + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_RD_PTR_IRQ(uint32_t i0) { return 0x00000020 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_WR_PTR_IRQ(uint32_t i0) { return 0x00000024 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_OUT_LINE_COUNT(uint32_t i0) { return 0x00000028 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_PP_LINE_COUNT(uint32_t i0) { return 0x0000002c + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_AUTOREFRESH_CONFIG(uint32_t i0) { return 0x00000030 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_FBC_MODE(uint32_t i0) { return 0x00000034 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_FBC_BUDGET_CTL(uint32_t i0) { return 0x00000038 + __offset_PP(i0); } - -static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); } - -static inline uint32_t __offset_WB(uint32_t idx) -{ - switch (idx) { -#if 0 /* TEMPORARY until patch that adds wb.base[] is merged */ - case 0: return (mdp5_cfg->wb.base[0]); - case 1: return (mdp5_cfg->wb.base[1]); - case 2: return (mdp5_cfg->wb.base[2]); - case 3: return (mdp5_cfg->wb.base[3]); - case 4: return (mdp5_cfg->wb.base[4]); -#endif - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_WB(uint32_t i0) { return 0x00000000 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DST_FORMAT(uint32_t i0) { return 0x00000000 + __offset_WB(i0); } -#define MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK 0x00000003 -#define MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT 0 -static inline uint32_t MDP5_WB_DST_FORMAT_DSTC0_OUT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK; -} -#define MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK 0x0000000c -#define MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT 2 -static inline uint32_t MDP5_WB_DST_FORMAT_DSTC1_OUT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK; -} -#define MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK 0x00000030 -#define MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT 4 -static inline uint32_t MDP5_WB_DST_FORMAT_DSTC2_OUT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK; -} -#define MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK 0x000000c0 -#define MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT 6 -static inline uint32_t MDP5_WB_DST_FORMAT_DSTC3_OUT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK; -} -#define MDP5_WB_DST_FORMAT_DSTC3_EN 0x00000100 -#define MDP5_WB_DST_FORMAT_DST_BPP__MASK 0x00000600 -#define MDP5_WB_DST_FORMAT_DST_BPP__SHIFT 9 -static inline uint32_t MDP5_WB_DST_FORMAT_DST_BPP(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_DST_BPP__SHIFT) & MDP5_WB_DST_FORMAT_DST_BPP__MASK; -} -#define MDP5_WB_DST_FORMAT_PACK_COUNT__MASK 0x00003000 -#define MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT 12 -static inline uint32_t MDP5_WB_DST_FORMAT_PACK_COUNT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT) & MDP5_WB_DST_FORMAT_PACK_COUNT__MASK; -} -#define MDP5_WB_DST_FORMAT_DST_ALPHA_X 0x00004000 -#define MDP5_WB_DST_FORMAT_PACK_TIGHT 0x00020000 -#define MDP5_WB_DST_FORMAT_PACK_ALIGN_MSB 0x00040000 -#define MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK 0x00180000 -#define MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT 19 -static inline uint32_t MDP5_WB_DST_FORMAT_WRITE_PLANES(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT) & MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK; -} -#define MDP5_WB_DST_FORMAT_DST_DITHER_EN 0x00400000 -#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK 0x03800000 -#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT 23 -static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK; -} -#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK 0x3c000000 -#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT 26 -static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SITE(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK; -} -#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK 0xc0000000 -#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT 30 -static inline uint32_t MDP5_WB_DST_FORMAT_FRAME_FORMAT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT) & MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK; -} - -static inline uint32_t REG_MDP5_WB_DST_OP_MODE(uint32_t i0) { return 0x00000004 + __offset_WB(i0); } -#define MDP5_WB_DST_OP_MODE_BWC_ENC_EN 0x00000001 -#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK 0x00000006 -#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT 1 -static inline uint32_t MDP5_WB_DST_OP_MODE_BWC_ENC_OP(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT) & MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK; -} -#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK 0x00000010 -#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT 4 -static inline uint32_t MDP5_WB_DST_OP_MODE_BLOCK_SIZE(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT) & MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK; -} -#define MDP5_WB_DST_OP_MODE_ROT_MODE__MASK 0x00000020 -#define MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT 5 -static inline uint32_t MDP5_WB_DST_OP_MODE_ROT_MODE(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT) & MDP5_WB_DST_OP_MODE_ROT_MODE__MASK; -} -#define MDP5_WB_DST_OP_MODE_ROT_EN 0x00000040 -#define MDP5_WB_DST_OP_MODE_CSC_EN 0x00000100 -#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00000200 -#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 9 -static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK; -} -#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00000400 -#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 10 -static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK; -} -#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_EN 0x00000800 -#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK 0x00001000 -#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT 12 -static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK; -} -#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK 0x00002000 -#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT 13 -static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK; -} -#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK 0x00004000 -#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT 14 -static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD(uint32_t val) -{ - return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK; -} - -static inline uint32_t REG_MDP5_WB_DST_PACK_PATTERN(uint32_t i0) { return 0x00000008 + __offset_WB(i0); } -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK 0x00000003 -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT 0 -static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT0(uint32_t val) -{ - return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK; -} -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK 0x00000300 -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT 8 -static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT1(uint32_t val) -{ - return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK; -} -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK 0x00030000 -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT 16 -static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT2(uint32_t val) -{ - return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK; -} -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK 0x03000000 -#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT 24 -static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT3(uint32_t val) -{ - return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK; -} - -static inline uint32_t REG_MDP5_WB_DST0_ADDR(uint32_t i0) { return 0x0000000c + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DST1_ADDR(uint32_t i0) { return 0x00000010 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DST2_ADDR(uint32_t i0) { return 0x00000014 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DST3_ADDR(uint32_t i0) { return 0x00000018 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DST_YSTRIDE0(uint32_t i0) { return 0x0000001c + __offset_WB(i0); } -#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK 0x0000ffff -#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT 0 -static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE(uint32_t val) -{ - return ((val) << MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK; -} -#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK 0xffff0000 -#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT 16 -static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE(uint32_t val) -{ - return ((val) << MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK; -} - -static inline uint32_t REG_MDP5_WB_DST_YSTRIDE1(uint32_t i0) { return 0x00000020 + __offset_WB(i0); } -#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK 0x0000ffff -#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT 0 -static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE(uint32_t val) -{ - return ((val) << MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK; -} -#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK 0xffff0000 -#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT 16 -static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE(uint32_t val) -{ - return ((val) << MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK; -} - -static inline uint32_t REG_MDP5_WB_DST_DITHER_BITDEPTH(uint32_t i0) { return 0x00000024 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW0(uint32_t i0) { return 0x00000030 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW1(uint32_t i0) { return 0x00000034 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW2(uint32_t i0) { return 0x00000038 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW3(uint32_t i0) { return 0x0000003c + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_DST_WRITE_CONFIG(uint32_t i0) { return 0x00000048 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_ROTATION_DNSCALER(uint32_t i0) { return 0x00000050 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_0_3(uint32_t i0) { return 0x00000060 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_1_2(uint32_t i0) { return 0x00000064 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_0_3(uint32_t i0) { return 0x00000068 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_1_2(uint32_t i0) { return 0x0000006c + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_OUT_SIZE(uint32_t i0) { return 0x00000074 + __offset_WB(i0); } -#define MDP5_WB_OUT_SIZE_DST_W__MASK 0x0000ffff -#define MDP5_WB_OUT_SIZE_DST_W__SHIFT 0 -static inline uint32_t MDP5_WB_OUT_SIZE_DST_W(uint32_t val) -{ - return ((val) << MDP5_WB_OUT_SIZE_DST_W__SHIFT) & MDP5_WB_OUT_SIZE_DST_W__MASK; -} -#define MDP5_WB_OUT_SIZE_DST_H__MASK 0xffff0000 -#define MDP5_WB_OUT_SIZE_DST_H__SHIFT 16 -static inline uint32_t MDP5_WB_OUT_SIZE_DST_H(uint32_t val) -{ - return ((val) << MDP5_WB_OUT_SIZE_DST_H__SHIFT) & MDP5_WB_OUT_SIZE_DST_H__MASK; -} - -static inline uint32_t REG_MDP5_WB_ALPHA_X_VALUE(uint32_t i0) { return 0x00000078 + __offset_WB(i0); } - -static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_0(uint32_t i0) { return 0x00000260 + __offset_WB(i0); } -#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff -#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK; -} -#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000 -#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT 16 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_1(uint32_t i0) { return 0x00000264 + __offset_WB(i0); } -#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff -#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK; -} -#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000 -#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT 16 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_2(uint32_t i0) { return 0x00000268 + __offset_WB(i0); } -#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff -#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK; -} -#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000 -#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT 16 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_3(uint32_t i0) { return 0x0000026c + __offset_WB(i0); } -#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff -#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK; -} -#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000 -#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT 16 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_4(uint32_t i0) { return 0x00000270 + __offset_WB(i0); } -#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff -#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; } -#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK 0x000000ff -#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK; -} -#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK 0x0000ff00 -#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT 8 -static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; } -#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK 0x000000ff -#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK; -} -#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK 0x0000ff00 -#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT 8 -static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS_REG(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; } -#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK 0x000001ff -#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK; -} - -static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; } - -static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS_REG(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; } -#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK 0x000001ff -#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT 0 -static inline uint32_t MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE(uint32_t val) -{ - return ((val) << MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK; -} - -static inline uint32_t __offset_INTF(uint32_t idx) -{ - switch (idx) { - case 0: return (mdp5_cfg->intf.base[0]); - case 1: return (mdp5_cfg->intf.base[1]); - case 2: return (mdp5_cfg->intf.base[2]); - case 3: return (mdp5_cfg->intf.base[3]); - case 4: return (mdp5_cfg->intf.base[4]); - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00000004 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00000008 + __offset_INTF(i0); } -#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff -#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0 -static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val) -{ - return ((val) << MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT) & MDP5_INTF_HSYNC_CTL_PULSEW__MASK; -} -#define MDP5_INTF_HSYNC_CTL_PERIOD__MASK 0xffff0000 -#define MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT 16 -static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val) -{ - return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK; -} - -static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0000000c + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00000010 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00000014 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00000018 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0000001c + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00000020 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00000024 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00000028 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0000002c + __offset_INTF(i0); } -#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff -#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0 -static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val) -{ - return ((val) << MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK; -} -#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000 - -static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00000030 + __offset_INTF(i0); } -#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff -#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0 -static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val) -{ - return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK; -} - -static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00000034 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00000038 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0000003c + __offset_INTF(i0); } -#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff -#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0 -static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val) -{ - return ((val) << MDP5_INTF_DISPLAY_HCTL_START__SHIFT) & MDP5_INTF_DISPLAY_HCTL_START__MASK; -} -#define MDP5_INTF_DISPLAY_HCTL_END__MASK 0xffff0000 -#define MDP5_INTF_DISPLAY_HCTL_END__SHIFT 16 -static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val) -{ - return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK; -} - -static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00000040 + __offset_INTF(i0); } -#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff -#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0 -static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val) -{ - return ((val) << MDP5_INTF_ACTIVE_HCTL_START__SHIFT) & MDP5_INTF_ACTIVE_HCTL_START__MASK; -} -#define MDP5_INTF_ACTIVE_HCTL_END__MASK 0x7fff0000 -#define MDP5_INTF_ACTIVE_HCTL_END__SHIFT 16 -static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val) -{ - return ((val) << MDP5_INTF_ACTIVE_HCTL_END__SHIFT) & MDP5_INTF_ACTIVE_HCTL_END__MASK; -} -#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000 - -static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00000044 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00000048 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0000004c + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00000050 + __offset_INTF(i0); } -#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001 -#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002 -#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004 - -static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00000054 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00000058 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0000005c + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00000084 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00000090 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000000a8 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000000ac + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000000b0 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000000f0 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000000f4 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000000f8 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00000100 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00000104 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00000108 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0000010c + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00000110 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00000114 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00000118 + __offset_INTF(i0); } - -static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0000011c + __offset_INTF(i0); } - -static inline uint32_t __offset_AD(uint32_t idx) -{ - switch (idx) { - case 0: return (mdp5_cfg->ad.base[0]); - case 1: return (mdp5_cfg->ad.base[1]); - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00000000 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00000000 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00000004 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00000008 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0000000c + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00000010 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00000014 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00000018 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0000001c + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00000020 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00000024 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00000028 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0000002c + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00000030 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00000034 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00000038 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0000007c + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000000c8 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000000cc + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000000d0 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000000d4 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000000d8 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000000dc + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000000e0 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000000e8 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000000ec + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000000f0 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000000f4 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000000f8 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00000100 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00000144 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00000148 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0000014c + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00000154 + __offset_AD(i0); } - -static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00000158 + __offset_AD(i0); } - - -#endif /* MDP5_XML */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h index 26c5d8b4ab..4b988e69fb 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h @@ -69,6 +69,16 @@ struct mdp5_mdp_block { uint32_t caps; /* MDP capabilities: MDP_CAP_xxx bits */ }; +struct mdp5_wb_instance { + int id; + int lm; +}; + +struct mdp5_wb_block { + MDP5_SUB_BLOCK_DEFINITION; + struct mdp5_wb_instance instances[MAX_BASES]; +}; + #define MDP5_INTF_NUM_MAX 5 struct mdp5_intf_block { @@ -98,6 +108,7 @@ struct mdp5_cfg_hw { struct mdp5_sub_block pp; struct mdp5_sub_block dsc; struct mdp5_sub_block cdm; + struct mdp5_wb_block wb; struct mdp5_intf_block intf; struct mdp5_perf_block perf; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 4a3db2ea16..0f653e62b4 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -216,7 +216,7 @@ static void blend_setup(struct drm_crtc *crtc) struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_plane *plane; struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL}; - const struct mdp_format *format; + const struct msm_format *format; struct mdp5_hw_mixer *mixer = pipeline->mixer; uint32_t lm = mixer->lm; struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; @@ -274,7 +274,7 @@ static void blend_setup(struct drm_crtc *crtc) ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; DBG("Border Color is enabled"); } else if (plane_cnt) { - format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb)); + format = msm_framebuffer_format(pstates[STAGE_BASE]->base.fb); if (format->alpha_enable) bg_alpha_enabled = true; @@ -285,8 +285,7 @@ static void blend_setup(struct drm_crtc *crtc) if (!pstates[i]) continue; - format = to_mdp_format( - msm_framebuffer_format(pstates[i]->base.fb)); + format = msm_framebuffer_format(pstates[i]->base.fb); plane = pstates[i]->base.plane; blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index a874fd95cc..374704cce6 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -224,7 +224,6 @@ static const struct mdp_kms_funcs kms_funcs = { .prepare_commit = mdp5_prepare_commit, .wait_flush = mdp5_wait_flush, .complete_commit = mdp5_complete_commit, - .get_format = mdp_get_format, .destroy = mdp5_kms_destroy, }, .set_irqmask = mdp5_set_irqmask, diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h index fac9f05aa6..36b6842dfc 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h @@ -171,13 +171,13 @@ struct mdp5_encoder { static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) { WARN_ON(mdp5_kms->enable_count <= 0); - msm_writel(data, mdp5_kms->mmio + reg); + writel(data, mdp5_kms->mmio + reg); } static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg) { WARN_ON(mdp5_kms->enable_count <= 0); - return msm_readl(mdp5_kms->mmio + reg); + return readl(mdp5_kms->mmio + reg); } static inline const char *stage2name(enum mdp_mixer_stage_id stage) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index 0d5ff03cb0..62de248ed1 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -17,9 +17,6 @@ struct mdp5_plane { struct drm_plane base; - - uint32_t nformats; - uint32_t formats[32]; }; #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) @@ -38,15 +35,6 @@ static bool plane_enabled(struct drm_plane_state *state) return state->visible; } -static void mdp5_plane_destroy(struct drm_plane *plane) -{ - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - - drm_plane_cleanup(plane); - - kfree(mdp5_plane); -} - /* helper to install properties which are common to planes and crtcs */ static void mdp5_plane_install_properties(struct drm_plane *plane, struct drm_mode_object *obj) @@ -138,7 +126,6 @@ static void mdp5_plane_destroy_state(struct drm_plane *plane, static const struct drm_plane_funcs mdp5_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = mdp5_plane_destroy, .reset = mdp5_plane_reset, .atomic_duplicate_state = mdp5_plane_duplicate_state, .atomic_destroy_state = mdp5_plane_destroy_state, @@ -231,12 +218,12 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, if (plane_enabled(state)) { unsigned int rotation; - const struct mdp_format *format; + const struct msm_format *format; struct mdp5_kms *mdp5_kms = get_kms(plane); uint32_t blkcfg = 0; - format = to_mdp_format(msm_framebuffer_format(state->fb)); - if (MDP_FORMAT_IS_YUV(format)) + format = msm_framebuffer_format(state->fb); + if (MSM_FORMAT_IS_YUV(format)) caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC; if (((state->src_w >> 16) != state->crtc_w) || @@ -271,8 +258,8 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, new_hwpipe = true; if (mdp5_kms->smp) { - const struct mdp_format *format = - to_mdp_format(msm_framebuffer_format(state->fb)); + const struct msm_format *format = + msm_framebuffer_format(state->fb); blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format, state->src_w >> 16, false); @@ -633,14 +620,14 @@ static int calc_scaley_steps(struct drm_plane *plane, return 0; } -static uint32_t get_scale_config(const struct mdp_format *format, +static uint32_t get_scale_config(const struct msm_format *format, uint32_t src, uint32_t dst, bool horz) { - const struct drm_format_info *info = drm_format_info(format->base.pixel_format); - bool scaling = format->is_yuv ? true : (src != dst); + const struct drm_format_info *info = drm_format_info(format->pixel_format); + bool yuv = MSM_FORMAT_IS_YUV(format); + bool scaling = yuv ? true : (src != dst); uint32_t sub; uint32_t ya_filter, uv_filter; - bool yuv = format->is_yuv; if (!scaling) return 0; @@ -664,12 +651,12 @@ static uint32_t get_scale_config(const struct mdp_format *format, COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter)); } -static void calc_pixel_ext(const struct mdp_format *format, +static void calc_pixel_ext(const struct msm_format *format, uint32_t src, uint32_t dst, uint32_t phase_step[2], int pix_ext_edge1[COMP_MAX], int pix_ext_edge2[COMP_MAX], bool horz) { - bool scaling = format->is_yuv ? true : (src != dst); + bool scaling = MSM_FORMAT_IS_YUV(format) ? true : (src != dst); int i; /* @@ -687,11 +674,11 @@ static void calc_pixel_ext(const struct mdp_format *format, } static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe, - const struct mdp_format *format, + const struct msm_format *format, uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX], uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX]) { - const struct drm_format_info *info = drm_format_info(format->base.pixel_format); + const struct drm_format_info *info = drm_format_info(format->pixel_format); uint32_t lr, tb, req; int i; @@ -699,7 +686,7 @@ static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe, uint32_t roi_w = src_w; uint32_t roi_h = src_h; - if (format->is_yuv && i == COMP_1_2) { + if (MSM_FORMAT_IS_YUV(format) && i == COMP_1_2) { roi_w /= info->hsub; roi_h /= info->vsub; } @@ -773,8 +760,8 @@ static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms, { enum mdp5_pipe pipe = hwpipe->pipe; bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT; - const struct mdp_format *format = - to_mdp_format(msm_framebuffer_format(fb)); + const struct msm_format *format = + msm_framebuffer_format(fb); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) | @@ -798,21 +785,22 @@ static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms, mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | - MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | - MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) | - MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) | + MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r_cr) | + MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g_y) | + MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b_cb) | COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) | - MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | + MDP5_PIPE_SRC_FORMAT_CPP(format->bpp - 1) | MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | - COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) | + COND(format->flags & MSM_FORMAT_FLAG_UNPACK_TIGHT, + MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) | MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) | MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample)); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe), - MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) | - MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) | - MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) | - MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); + MDP5_PIPE_SRC_UNPACK_ELEM0(format->element[0]) | + MDP5_PIPE_SRC_UNPACK_ELEM1(format->element[1]) | + MDP5_PIPE_SRC_UNPACK_ELEM2(format->element[2]) | + MDP5_PIPE_SRC_UNPACK_ELEM3(format->element[3])); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe), (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) | @@ -845,7 +833,7 @@ static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms, } if (hwpipe->caps & MDP_PIPE_CAP_CSC) { - if (MDP_FORMAT_IS_YUV(format)) + if (MSM_FORMAT_IS_YUV(format)) csc_enable(mdp5_kms, pipe, mdp_get_default_csc_cfg(CSC_YUV2RGB)); else @@ -864,7 +852,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, struct mdp5_kms *mdp5_kms = get_kms(plane); enum mdp5_pipe pipe = hwpipe->pipe; struct mdp5_hw_pipe *right_hwpipe; - const struct mdp_format *format; + const struct msm_format *format; uint32_t nplanes, config = 0; struct phase_step step = { { 0 } }; struct pixel_ext pe = { { 0 } }; @@ -885,8 +873,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, if (WARN_ON(nplanes > pipe2nclients(pipe))) return -EINVAL; - format = to_mdp_format(msm_framebuffer_format(fb)); - pix_format = format->base.pixel_format; + format = msm_framebuffer_format(fb); + pix_format = format->pixel_format; src_x = src->x1; src_y = src->y1; @@ -1007,31 +995,48 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane) return mask; } +static const uint32_t mdp5_plane_formats[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, + DRM_FORMAT_NV16, + DRM_FORMAT_NV61, + DRM_FORMAT_VYUY, + DRM_FORMAT_UYVY, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_YUV420, + DRM_FORMAT_YVU420, +}; + /* initialize plane */ struct drm_plane *mdp5_plane_init(struct drm_device *dev, enum drm_plane_type type) { struct drm_plane *plane = NULL; struct mdp5_plane *mdp5_plane; - int ret; - mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL); - if (!mdp5_plane) { - ret = -ENOMEM; - goto fail; - } + mdp5_plane = drmm_universal_plane_alloc(dev, struct mdp5_plane, base, + 0xff, &mdp5_plane_funcs, + mdp5_plane_formats, ARRAY_SIZE(mdp5_plane_formats), + NULL, type, NULL); + if (IS_ERR(mdp5_plane)) + return ERR_CAST(mdp5_plane); plane = &mdp5_plane->base; - mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, - ARRAY_SIZE(mdp5_plane->formats), false); - - ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, - mdp5_plane->formats, mdp5_plane->nformats, - NULL, type, NULL); - if (ret) - goto fail; - drm_plane_helper_add(plane, &mdp5_plane_helper_funcs); mdp5_plane_install_properties(plane, &plane->base); @@ -1039,10 +1044,4 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, drm_plane_enable_fb_damage_clips(plane); return plane; - -fail: - if (plane) - mdp5_plane_destroy(plane); - - return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c index b4bebb425d..3a7f7edda9 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c @@ -114,10 +114,10 @@ static void set_fifo_thresholds(struct mdp5_smp *smp, * presumably happens during the dma from scanout buffer). */ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, - const struct mdp_format *format, + const struct msm_format *format, u32 width, bool hdecim) { - const struct drm_format_info *info = drm_format_info(format->base.pixel_format); + const struct drm_format_info *info = drm_format_info(format->pixel_format); struct mdp5_kms *mdp5_kms = get_kms(smp); int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); int i, hsub, nplanes, nlines; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h index 21732ed485..1be9832382 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h @@ -74,7 +74,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p, struct mdp5_global_state *global_state); uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, - const struct mdp_format *format, + const struct msm_format *format, u32 width, bool hdecim); int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state, diff --git a/drivers/gpu/drm/msm/disp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h deleted file mode 100644 index 4dd8d7db28..0000000000 --- a/drivers/gpu/drm/msm/disp/mdp_common.xml.h +++ /dev/null @@ -1,111 +0,0 @@ -#ifndef MDP_COMMON_XML -#define MDP_COMMON_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) - -Copyright (C) 2013-2022 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -enum mdp_chroma_samp_type { - CHROMA_FULL = 0, - CHROMA_H2V1 = 1, - CHROMA_H1V2 = 2, - CHROMA_420 = 3, -}; - -enum mdp_fetch_type { - MDP_PLANE_INTERLEAVED = 0, - MDP_PLANE_PLANAR = 1, - MDP_PLANE_PSEUDO_PLANAR = 2, -}; - -enum mdp_mixer_stage_id { - STAGE_UNUSED = 0, - STAGE_BASE = 1, - STAGE0 = 2, - STAGE1 = 3, - STAGE2 = 4, - STAGE3 = 5, - STAGE4 = 6, - STAGE5 = 7, - STAGE6 = 8, - STAGE_MAX = 8, -}; - -enum mdp_alpha_type { - FG_CONST = 0, - BG_CONST = 1, - FG_PIXEL = 2, - BG_PIXEL = 3, -}; - -enum mdp_component_type { - COMP_0 = 0, - COMP_1_2 = 1, - COMP_3 = 2, - COMP_MAX = 3, -}; - -enum mdp_bpc { - BPC1 = 0, - BPC5 = 1, - BPC6 = 2, - BPC8 = 3, -}; - -enum mdp_bpc_alpha { - BPC1A = 0, - BPC4A = 1, - BPC6A = 2, - BPC8A = 3, -}; - - -#endif /* MDP_COMMON_XML */ diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c index 025595336f..426782d50c 100644 --- a/drivers/gpu/drm/msm/disp/mdp_format.c +++ b/drivers/gpu/drm/msm/disp/mdp_format.c @@ -62,115 +62,573 @@ static struct csc_cfg csc_convert[CSC_MAX] = { }, }; -#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \ - .base = { .pixel_format = DRM_FORMAT_ ## name }, \ - .bpc_a = BPC ## a ## A, \ - .bpc_r = BPC ## r, \ - .bpc_g = BPC ## g, \ - .bpc_b = BPC ## b, \ - .unpack = { e0, e1, e2, e3 }, \ - .alpha_enable = alpha, \ - .unpack_tight = tight, \ - .cpp = c, \ - .unpack_count = cnt, \ - .fetch_type = fp, \ - .chroma_sample = cs, \ - .is_yuv = yuv, \ +#define MDP_TILE_HEIGHT_DEFAULT 1 +#define MDP_TILE_HEIGHT_UBWC 4 +#define MDP_TILE_HEIGHT_NV12 8 + +#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha, \ +bp, flg, fm, np) \ +{ \ + .pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_type = MDP_PLANE_INTERLEAVED, \ + .alpha_enable = alpha, \ + .element = { (e0), (e1), (e2), (e3) }, \ + .bpc_g_y = g, \ + .bpc_b_cb = b, \ + .bpc_r_cr = r, \ + .bpc_a = a, \ + .chroma_sample = CHROMA_FULL, \ + .unpack_count = uc, \ + .bpp = bp, \ + .fetch_mode = fm, \ + .flags = MSM_FORMAT_FLAG_UNPACK_TIGHT | flg, \ + .num_planes = np, \ + .tile_height = MDP_TILE_HEIGHT_DEFAULT \ } -#define BPC0A 0 +#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \ +alpha, bp, flg, fm, np, th) \ +{ \ + .pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_type = MDP_PLANE_INTERLEAVED, \ + .alpha_enable = alpha, \ + .element = { (e0), (e1), (e2), (e3) }, \ + .bpc_g_y = g, \ + .bpc_b_cb = b, \ + .bpc_r_cr = r, \ + .bpc_a = a, \ + .chroma_sample = CHROMA_FULL, \ + .unpack_count = uc, \ + .bpp = bp, \ + .fetch_mode = fm, \ + .flags = MSM_FORMAT_FLAG_UNPACK_TIGHT | flg, \ + .num_planes = np, \ + .tile_height = th \ +} -/* - * Note: Keep RGB formats 1st, followed by YUV formats to avoid breaking - * mdp_get_rgb_formats()'s implementation. - */ -static const struct mdp_format formats[] = { - /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */ - FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(ABGR8888, 8, 8, 8, 8, 2, 0, 1, 3, true, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(RGBA8888, 8, 8, 8, 8, 3, 1, 0, 2, true, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(BGRA8888, 8, 8, 8, 8, 3, 2, 0, 1, true, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(XBGR8888, 8, 8, 8, 8, 2, 0, 1, 3, false, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(RGBX8888, 8, 8, 8, 8, 3, 1, 0, 2, false, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(BGRX8888, 8, 8, 8, 8, 3, 2, 0, 1, false, true, 4, 4, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), - FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3, - MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), +#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \ +alpha, chroma, count, bp, flg, fm, np) \ +{ \ + .pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_type = MDP_PLANE_INTERLEAVED, \ + .alpha_enable = alpha, \ + .element = { (e0), (e1), (e2), (e3)}, \ + .bpc_g_y = g, \ + .bpc_b_cb = b, \ + .bpc_r_cr = r, \ + .bpc_a = a, \ + .chroma_sample = chroma, \ + .unpack_count = count, \ + .bpp = bp, \ + .fetch_mode = fm, \ + .flags = MSM_FORMAT_FLAG_UNPACK_TIGHT | flg, \ + .num_planes = np, \ + .tile_height = MDP_TILE_HEIGHT_DEFAULT \ +} + +#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \ +{ \ + .pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_type = MDP_PLANE_PSEUDO_PLANAR, \ + .alpha_enable = 0, \ + .element = { (e0), (e1), 0, 0 }, \ + .bpc_g_y = g, \ + .bpc_b_cb = b, \ + .bpc_r_cr = r, \ + .bpc_a = a, \ + .chroma_sample = chroma, \ + .unpack_count = 2, \ + .bpp = 2, \ + .fetch_mode = fm, \ + .flags = MSM_FORMAT_FLAG_UNPACK_TIGHT | flg, \ + .num_planes = np, \ + .tile_height = MDP_TILE_HEIGHT_DEFAULT \ +} + +#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \ +flg, fm, np, th) \ +{ \ + .pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_type = MDP_PLANE_PSEUDO_PLANAR, \ + .alpha_enable = 0, \ + .element = { (e0), (e1), 0, 0 }, \ + .bpc_g_y = g, \ + .bpc_b_cb = b, \ + .bpc_r_cr = r, \ + .bpc_a = a, \ + .chroma_sample = chroma, \ + .unpack_count = 2, \ + .bpp = 2, \ + .fetch_mode = fm, \ + .flags = MSM_FORMAT_FLAG_UNPACK_TIGHT | flg, \ + .num_planes = np, \ + .tile_height = th \ +} + +#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\ +{ \ + .pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_type = MDP_PLANE_PSEUDO_PLANAR, \ + .alpha_enable = 0, \ + .element = { (e0), (e1), 0, 0 }, \ + .bpc_g_y = g, \ + .bpc_b_cb = b, \ + .bpc_r_cr = r, \ + .bpc_a = a, \ + .chroma_sample = chroma, \ + .unpack_count = 2, \ + .bpp = 2, \ + .fetch_mode = fm, \ + .flags = MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB | flg, \ + .num_planes = np, \ + .tile_height = MDP_TILE_HEIGHT_DEFAULT \ +} + +#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \ +flg, fm, np, th) \ +{ \ + .pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_type = MDP_PLANE_PSEUDO_PLANAR, \ + .alpha_enable = 0, \ + .element = { (e0), (e1), 0, 0 }, \ + .bpc_g_y = g, \ + .bpc_b_cb = b, \ + .bpc_r_cr = r, \ + .bpc_a = a, \ + .chroma_sample = chroma, \ + .unpack_count = 2, \ + .bpp = 2, \ + .fetch_mode = fm, \ + .flags = MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB | flg, \ + .num_planes = np, \ + .tile_height = th \ +} + +#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \ +flg, fm, np) \ +{ \ + .pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_type = MDP_PLANE_PLANAR, \ + .alpha_enable = alpha, \ + .element = { (e0), (e1), (e2), 0 }, \ + .bpc_g_y = g, \ + .bpc_b_cb = b, \ + .bpc_r_cr = r, \ + .bpc_a = a, \ + .chroma_sample = chroma, \ + .unpack_count = 1, \ + .bpp = bp, \ + .fetch_mode = fm, \ + .flags = MSM_FORMAT_FLAG_UNPACK_TIGHT | flg, \ + .num_planes = np, \ + .tile_height = MDP_TILE_HEIGHT_DEFAULT \ +} + +static const struct msm_format mdp_formats[] = { + INTERLEAVED_RGB_FMT(ARGB8888, + BPC8A, BPC8, BPC8, BPC8, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + true, 4, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(ABGR8888, + BPC8A, BPC8, BPC8, BPC8, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XBGR8888, + BPC8A, BPC8, BPC8, BPC8, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + false, 4, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBA8888, + BPC8A, BPC8, BPC8, BPC8, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + true, 4, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRA8888, + BPC8A, BPC8, BPC8, BPC8, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + true, 4, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRX8888, + BPC8A, BPC8, BPC8, BPC8, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + false, 4, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XRGB8888, + BPC8A, BPC8, BPC8, BPC8, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + false, 4, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBX8888, + BPC8A, BPC8, BPC8, BPC8, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + false, 4, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGB888, + 0, BPC8, BPC8, BPC8, + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, + false, 3, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGR888, + 0, BPC8, BPC8, BPC8, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, + false, 3, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGB565, + 0, BPC5, BPC6, BPC5, + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, + false, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGR565, + 0, BPC5, BPC6, BPC5, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, + false, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(ARGB1555, + BPC1A, BPC5, BPC5, BPC5, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + true, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(ABGR1555, + BPC1A, BPC5, BPC5, BPC5, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBA5551, + BPC1A, BPC5, BPC5, BPC5, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + true, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRA5551, + BPC1A, BPC5, BPC5, BPC5, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + true, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XRGB1555, + BPC1A, BPC5, BPC5, BPC5, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + false, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XBGR1555, + BPC1A, BPC5, BPC5, BPC5, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + false, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBX5551, + BPC1A, BPC5, BPC5, BPC5, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + false, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRX5551, + BPC1A, BPC5, BPC5, BPC5, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + false, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(ARGB4444, + BPC4A, BPC4, BPC4, BPC4, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + true, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(ABGR4444, + BPC4A, BPC4, BPC4, BPC4, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBA4444, + BPC4A, BPC4, BPC4, BPC4, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + true, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRA4444, + BPC4A, BPC4, BPC4, BPC4, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + true, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XRGB4444, + BPC4A, BPC4, BPC4, BPC4, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + false, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XBGR4444, + BPC4A, BPC4, BPC4, BPC4, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + false, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBX4444, + BPC4A, BPC4, BPC4, BPC4, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + false, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRX4444, + BPC4A, BPC4, BPC4, BPC4, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + false, 2, 0, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRA1010102, + BPC8A, BPC8, BPC8, BPC8, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + true, 4, MSM_FORMAT_FLAG_DX, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBA1010102, + BPC8A, BPC8, BPC8, BPC8, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + true, 4, MSM_FORMAT_FLAG_DX, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(ABGR2101010, + BPC8A, BPC8, BPC8, BPC8, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, MSM_FORMAT_FLAG_DX, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(ARGB2101010, + BPC8A, BPC8, BPC8, BPC8, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + true, 4, MSM_FORMAT_FLAG_DX, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XRGB2101010, + BPC8A, BPC8, BPC8, BPC8, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + false, 4, MSM_FORMAT_FLAG_DX, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRX1010102, + BPC8A, BPC8, BPC8, BPC8, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + false, 4, MSM_FORMAT_FLAG_DX, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XBGR2101010, + BPC8A, BPC8, BPC8, BPC8, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + false, 4, MSM_FORMAT_FLAG_DX, + MDP_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBX1010102, + BPC8A, BPC8, BPC8, BPC8, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + false, 4, MSM_FORMAT_FLAG_DX, + MDP_FETCH_LINEAR, 1), /* --- RGB formats above / YUV formats below this line --- */ /* 2 plane YUV */ - FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2, - MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true), - FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2, - MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true), - FMT(NV16, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2, - MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true), - FMT(NV61, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2, - MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true), + PSEUDO_YUV_FMT(NV12, + 0, BPC8, BPC8, BPC8, + C1_B_Cb, C2_R_Cr, + CHROMA_420, MSM_FORMAT_FLAG_YUV, + MDP_FETCH_LINEAR, 2), + + PSEUDO_YUV_FMT(NV21, + 0, BPC8, BPC8, BPC8, + C2_R_Cr, C1_B_Cb, + CHROMA_420, MSM_FORMAT_FLAG_YUV, + MDP_FETCH_LINEAR, 2), + + PSEUDO_YUV_FMT(NV16, + 0, BPC8, BPC8, BPC8, + C1_B_Cb, C2_R_Cr, + CHROMA_H2V1, MSM_FORMAT_FLAG_YUV, + MDP_FETCH_LINEAR, 2), + + PSEUDO_YUV_FMT(NV61, + 0, BPC8, BPC8, BPC8, + C2_R_Cr, C1_B_Cb, + CHROMA_H2V1, MSM_FORMAT_FLAG_YUV, + MDP_FETCH_LINEAR, 2), + + PSEUDO_YUV_FMT_LOOSE(P010, + 0, BPC8, BPC8, BPC8, + C1_B_Cb, C2_R_Cr, + CHROMA_420, MSM_FORMAT_FLAG_DX | MSM_FORMAT_FLAG_YUV, + MDP_FETCH_LINEAR, 2), + /* 1 plane YUV */ - FMT(VYUY, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 2, 4, - MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), - FMT(UYVY, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 2, 4, - MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), - FMT(YUYV, 0, 8, 8, 8, 0, 1, 0, 2, false, true, 2, 4, - MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), - FMT(YVYU, 0, 8, 8, 8, 0, 2, 0, 1, false, true, 2, 4, - MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + INTERLEAVED_YUV_FMT(VYUY, + 0, BPC8, BPC8, BPC8, + C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y, + false, CHROMA_H2V1, 4, 2, MSM_FORMAT_FLAG_YUV, + MDP_FETCH_LINEAR, 2), + + INTERLEAVED_YUV_FMT(UYVY, + 0, BPC8, BPC8, BPC8, + C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y, + false, CHROMA_H2V1, 4, 2, MSM_FORMAT_FLAG_YUV, + MDP_FETCH_LINEAR, 2), + + INTERLEAVED_YUV_FMT(YUYV, + 0, BPC8, BPC8, BPC8, + C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr, + false, CHROMA_H2V1, 4, 2, MSM_FORMAT_FLAG_YUV, + MDP_FETCH_LINEAR, 2), + + INTERLEAVED_YUV_FMT(YVYU, + 0, BPC8, BPC8, BPC8, + C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb, + false, CHROMA_H2V1, 4, 2, MSM_FORMAT_FLAG_YUV, + MDP_FETCH_LINEAR, 2), + /* 3 plane YUV */ - FMT(YUV420, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 1, 1, - MDP_PLANE_PLANAR, CHROMA_420, true), - FMT(YVU420, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 1, 1, - MDP_PLANE_PLANAR, CHROMA_420, true), + PLANAR_YUV_FMT(YUV420, + 0, BPC8, BPC8, BPC8, + C2_R_Cr, C1_B_Cb, C0_G_Y, + false, CHROMA_420, 1, MSM_FORMAT_FLAG_YUV, + MDP_FETCH_LINEAR, 3), + + PLANAR_YUV_FMT(YVU420, + 0, BPC8, BPC8, BPC8, + C1_B_Cb, C2_R_Cr, C0_G_Y, + false, CHROMA_420, 1, MSM_FORMAT_FLAG_YUV, + MDP_FETCH_LINEAR, 3), }; /* - * Note: - * @rgb_only must be set to true, when requesting - * supported formats for RGB pipes. + * UBWC formats table: + * This table holds the UBWC formats supported. + * If a compression ratio needs to be used for this or any other format, + * the data will be passed by user-space. */ -uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats, - bool rgb_only) -{ - uint32_t i; - for (i = 0; i < ARRAY_SIZE(formats); i++) { - const struct mdp_format *f = &formats[i]; +static const struct msm_format mdp_formats_ubwc[] = { + INTERLEAVED_RGB_FMT_TILED(BGR565, + 0, BPC5, BPC6, BPC5, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, + false, 2, MSM_FORMAT_FLAG_COMPRESSED, + MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC), - if (i == max_formats) - break; + INTERLEAVED_RGB_FMT_TILED(ABGR8888, + BPC8A, BPC8, BPC8, BPC8, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, MSM_FORMAT_FLAG_COMPRESSED, + MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC), - if (rgb_only && MDP_FORMAT_IS_YUV(f)) - break; + /* ARGB8888 and ABGR8888 purposely have the same color + * ordering. The hardware only supports ABGR8888 UBWC + * natively. + */ + INTERLEAVED_RGB_FMT_TILED(ARGB8888, + BPC8A, BPC8, BPC8, BPC8, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, MSM_FORMAT_FLAG_COMPRESSED, + MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC), - pixel_formats[i] = f->base.pixel_format; - } + INTERLEAVED_RGB_FMT_TILED(XBGR8888, + BPC8A, BPC8, BPC8, BPC8, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + false, 4, MSM_FORMAT_FLAG_COMPRESSED, + MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC), - return i; -} + INTERLEAVED_RGB_FMT_TILED(XRGB8888, + BPC8A, BPC8, BPC8, BPC8, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + false, 4, MSM_FORMAT_FLAG_COMPRESSED, + MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC), + + INTERLEAVED_RGB_FMT_TILED(ABGR2101010, + BPC8A, BPC8, BPC8, BPC8, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, MSM_FORMAT_FLAG_DX | MSM_FORMAT_FLAG_COMPRESSED, + MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC), + + INTERLEAVED_RGB_FMT_TILED(XBGR2101010, + BPC8A, BPC8, BPC8, BPC8, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, MSM_FORMAT_FLAG_DX | MSM_FORMAT_FLAG_COMPRESSED, + MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC), + + INTERLEAVED_RGB_FMT_TILED(XRGB2101010, + BPC8A, BPC8, BPC8, BPC8, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, MSM_FORMAT_FLAG_DX | MSM_FORMAT_FLAG_COMPRESSED, + MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC), + + /* XRGB2101010 and ARGB2101010 purposely have the same color + * ordering. The hardware only supports ARGB2101010 UBWC + * natively. + */ + INTERLEAVED_RGB_FMT_TILED(ARGB2101010, + BPC8A, BPC8, BPC8, BPC8, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, MSM_FORMAT_FLAG_DX | MSM_FORMAT_FLAG_COMPRESSED, + MDP_FETCH_UBWC, 2, MDP_TILE_HEIGHT_UBWC), + + PSEUDO_YUV_FMT_TILED(NV12, + 0, BPC8, BPC8, BPC8, + C1_B_Cb, C2_R_Cr, + CHROMA_420, MSM_FORMAT_FLAG_YUV | + MSM_FORMAT_FLAG_COMPRESSED, + MDP_FETCH_UBWC, 4, MDP_TILE_HEIGHT_NV12), + + PSEUDO_YUV_FMT_TILED(P010, + 0, BPC8, BPC8, BPC8, + C1_B_Cb, C2_R_Cr, + CHROMA_420, MSM_FORMAT_FLAG_DX | + MSM_FORMAT_FLAG_YUV | + MSM_FORMAT_FLAG_COMPRESSED, + MDP_FETCH_UBWC, 4, MDP_TILE_HEIGHT_UBWC), +}; const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier) { + const struct msm_format *map = NULL; + ssize_t map_size; int i; - for (i = 0; i < ARRAY_SIZE(formats); i++) { - const struct mdp_format *f = &formats[i]; - if (f->base.pixel_format == format) - return &f->base; + + switch (modifier) { + case 0: + map = mdp_formats; + map_size = ARRAY_SIZE(mdp_formats); + break; + case DRM_FORMAT_MOD_QCOM_COMPRESSED: + map = mdp_formats_ubwc; + map_size = ARRAY_SIZE(mdp_formats_ubwc); + break; + default: + drm_err(kms->dev, "unsupported format modifier %llX\n", modifier); + return NULL; } + + for (i = 0; i < map_size; i++) { + const struct msm_format *f = &map[i]; + + if (f->pixel_format == format) + return f; + } + + drm_err(kms->dev, "unsupported fmt: %p4cc modifier 0x%llX\n", + &format, modifier); + return NULL; } diff --git a/drivers/gpu/drm/msm/disp/mdp_format.h b/drivers/gpu/drm/msm/disp/mdp_format.h new file mode 100644 index 0000000000..a00d646ff4 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp_format.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#ifndef __MSM_FORMAT_H__ +#define __MSM_FORMAT_H__ + +#include "mdp_common.xml.h" + +enum msm_format_flags { + MSM_FORMAT_FLAG_YUV_BIT, + MSM_FORMAT_FLAG_DX_BIT, + MSM_FORMAT_FLAG_COMPRESSED_BIT, + MSM_FORMAT_FLAG_UNPACK_TIGHT_BIT, + MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB_BIT, +}; + +#define MSM_FORMAT_FLAG_YUV BIT(MSM_FORMAT_FLAG_YUV_BIT) +#define MSM_FORMAT_FLAG_DX BIT(MSM_FORMAT_FLAG_DX_BIT) +#define MSM_FORMAT_FLAG_COMPRESSED BIT(MSM_FORMAT_FLAG_COMPRESSED_BIT) +#define MSM_FORMAT_FLAG_UNPACK_TIGHT BIT(MSM_FORMAT_FLAG_UNPACK_TIGHT_BIT) +#define MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB BIT(MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB_BIT) + +/** + * DPU HW,Component order color map + */ +enum { + C0_G_Y = 0, + C1_B_Cb = 1, + C2_R_Cr = 2, + C3_ALPHA = 3 +}; + +/** + * struct msm_format: defines the format configuration + * @pixel_format: format fourcc + * @element: element color ordering + * @fetch_type: how the color components are packed in pixel format + * @chroma_sample: chroma sub-samplng type + * @alpha_enable: whether the format has an alpha channel + * @unpack_count: number of the components to unpack + * @bpp: bytes per pixel + * @flags: usage bit flags + * @num_planes: number of planes (including meta data planes) + * @fetch_mode: linear, tiled, or ubwc hw fetch behavior + * @tile_height: format tile height + */ +struct msm_format { + uint32_t pixel_format; + enum mdp_bpc bpc_g_y, bpc_b_cb, bpc_r_cr; + enum mdp_bpc_alpha bpc_a; + u8 element[4]; + enum mdp_fetch_type fetch_type; + enum mdp_chroma_samp_type chroma_sample; + bool alpha_enable; + u8 unpack_count; + u8 bpp; + unsigned long flags; + u8 num_planes; + enum mdp_fetch_mode fetch_mode; + u16 tile_height; +}; + +#define MSM_FORMAT_IS_YUV(X) ((X)->flags & MSM_FORMAT_FLAG_YUV) +#define MSM_FORMAT_IS_DX(X) ((X)->flags & MSM_FORMAT_FLAG_DX) +#define MSM_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == MDP_FETCH_LINEAR) +#define MSM_FORMAT_IS_TILE(X) \ + (((X)->fetch_mode == MDP_FETCH_UBWC) && \ + !((X)->flags & MSM_FORMAT_FLAG_COMPRESSED)) +#define MSM_FORMAT_IS_UBWC(X) \ + (((X)->fetch_mode == MDP_FETCH_UBWC) && \ + ((X)->flags & MSM_FORMAT_FLAG_COMPRESSED)) + +#endif diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h index b0286d5d51..068fbeac6e 100644 --- a/drivers/gpu/drm/msm/disp/mdp_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp_kms.h @@ -11,6 +11,7 @@ #include #include +#include "mdp_format.h" #include "msm_drv.h" #include "msm_kms.h" #include "mdp_common.xml.h" @@ -77,23 +78,6 @@ void mdp_irq_update(struct mdp_kms *mdp_kms); * pixel format helpers: */ -struct mdp_format { - struct msm_format base; - enum mdp_bpc bpc_r, bpc_g, bpc_b; - enum mdp_bpc_alpha bpc_a; - uint8_t unpack[4]; - bool alpha_enable, unpack_tight; - uint8_t cpp, unpack_count; - enum mdp_fetch_type fetch_type; - enum mdp_chroma_samp_type chroma_sample; - bool is_yuv; -}; -#define to_mdp_format(x) container_of(x, struct mdp_format, base) -#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv) - -uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); -const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier); - /* MDP capabilities */ #define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */ #define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */ diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c index 7634e4b742..a599fc5d63 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.c +++ b/drivers/gpu/drm/msm/dp/dp_audio.c @@ -22,9 +22,7 @@ struct dp_audio_private { struct platform_device *pdev; struct drm_device *drm_dev; struct dp_catalog *catalog; - struct dp_panel *panel; - bool engine_on; u32 channels; struct dp_audio dp_audio; @@ -34,11 +32,7 @@ static u32 dp_audio_get_header(struct dp_catalog *catalog, enum dp_catalog_audio_sdp_type sdp, enum dp_catalog_audio_header_type header) { - catalog->sdp_type = sdp; - catalog->sdp_header = header; - dp_catalog_audio_get_header(catalog); - - return catalog->audio_data; + return dp_catalog_audio_get_header(catalog, sdp, header); } static void dp_audio_set_header(struct dp_catalog *catalog, @@ -46,10 +40,7 @@ static void dp_audio_set_header(struct dp_catalog *catalog, enum dp_catalog_audio_sdp_type sdp, enum dp_catalog_audio_header_type header) { - catalog->sdp_type = sdp; - catalog->sdp_header = header; - catalog->audio_data = data; - dp_catalog_audio_set_header(catalog); + dp_catalog_audio_set_header(catalog, sdp, header, data); } static void dp_audio_stream_sdp(struct dp_audio_private *audio) @@ -319,8 +310,7 @@ static void dp_audio_setup_acr(struct dp_audio_private *audio) break; } - catalog->audio_data = select; - dp_catalog_audio_config_acr(catalog); + dp_catalog_audio_config_acr(catalog, select); } static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio) @@ -346,18 +336,14 @@ static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio) break; } - catalog->audio_data = safe_to_exit_level; - dp_catalog_audio_sfe_level(catalog); + dp_catalog_audio_sfe_level(catalog, safe_to_exit_level); } static void dp_audio_enable(struct dp_audio_private *audio, bool enable) { struct dp_catalog *catalog = audio->catalog; - catalog->audio_data = enable; - dp_catalog_audio_enable(catalog); - - audio->engine_on = enable; + dp_catalog_audio_enable(catalog, enable); } static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev) @@ -571,7 +557,6 @@ struct dp_audio *dp_audio_get(struct platform_device *pdev, } audio->pdev = pdev; - audio->panel = panel; audio->catalog = catalog; dp_audio = &audio->dp_audio; diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index 707489776e..00dfafbebe 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -88,8 +88,7 @@ static ssize_t dp_aux_write(struct dp_aux_private *aux, /* index = 0, write */ if (i == 0) reg |= DP_AUX_DATA_INDEX_WRITE; - aux->catalog->aux_data = reg; - dp_catalog_aux_write_data(aux->catalog); + dp_catalog_aux_write_data(aux->catalog, reg); } dp_catalog_aux_clear_trans(aux->catalog, false); @@ -107,8 +106,7 @@ static ssize_t dp_aux_write(struct dp_aux_private *aux, } reg |= DP_AUX_TRANS_CTRL_GO; - aux->catalog->aux_data = reg; - dp_catalog_aux_write_trans(aux->catalog); + dp_catalog_aux_write_trans(aux->catalog, reg); return len; } @@ -146,8 +144,7 @@ static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux, data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */ data |= DP_AUX_DATA_READ; /* read */ - aux->catalog->aux_data = data; - dp_catalog_aux_write_data(aux->catalog); + dp_catalog_aux_write_data(aux->catalog, data); dp = msg->buffer; @@ -316,23 +313,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, goto exit; } - /* - * For eDP it's important to give a reasonably long wait here for HPD - * to be asserted. This is because the panel driver may have _just_ - * turned on the panel and then tried to do an AUX transfer. The panel - * driver has no way of knowing when the panel is ready, so it's up - * to us to wait. For DP we never get into this situation so let's - * avoid ever doing the extra long wait for DP. - */ - if (aux->is_edp) { - ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, - 500000); - if (ret) { - DRM_DEBUG_DP("Panel not ready for aux transactions\n"); - goto exit; - } - } - dp_aux_update_offset_and_segment(aux, msg); dp_aux_transfer_helper(aux, msg, true); @@ -533,7 +513,10 @@ static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux, aux = container_of(dp_aux, struct dp_aux_private, dp_aux); - pm_runtime_get_sync(aux->dev); + ret = pm_runtime_resume_and_get(aux->dev); + if (ret) + return ret; + ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us); pm_runtime_put_sync(aux->dev); diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index 628c8181dd..6e55cbf696 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -81,7 +81,6 @@ struct dp_catalog_private { struct dss_io_data io; u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX]; struct dp_catalog dp_catalog; - u8 aux_lut_cfg_index[PHY_AUX_CFG_MAX]; }; void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state) @@ -170,21 +169,21 @@ u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog) return dp_read_aux(catalog, REG_DP_AUX_DATA); } -int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog) +int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog, u32 data) { struct dp_catalog_private *catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); - dp_write_aux(catalog, REG_DP_AUX_DATA, dp_catalog->aux_data); + dp_write_aux(catalog, REG_DP_AUX_DATA, data); return 0; } -int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog) +int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog, u32 data) { struct dp_catalog_private *catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); - dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, dp_catalog->aux_data); + dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); return 0; } @@ -470,7 +469,7 @@ void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog) void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate, u32 stream_rate_khz, - bool fixed_nvid, bool is_ycbcr_420) + bool is_ycbcr_420) { u32 pixel_m, pixel_n; u32 mvid, nvid, pixel_div = 0, dispcc_input_rate; @@ -882,19 +881,17 @@ u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog) } /* panel related catalog functions */ -int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog) +int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog, u32 total, + u32 sync_start, u32 width_blanking, u32 dp_active) { struct dp_catalog_private *catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); u32 reg; - dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, - dp_catalog->total); - dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, - dp_catalog->sync_start); - dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, - dp_catalog->width_blanking); - dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active); + dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, total); + dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, sync_start); + dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking); + dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_active); reg = dp_read_p0(catalog, MMSS_DP_INTF_CONFIG); @@ -1163,34 +1160,28 @@ struct dp_catalog *dp_catalog_get(struct device *dev) return &catalog->dp_catalog; } -void dp_catalog_audio_get_header(struct dp_catalog *dp_catalog) +u32 dp_catalog_audio_get_header(struct dp_catalog *dp_catalog, + enum dp_catalog_audio_sdp_type sdp, + enum dp_catalog_audio_header_type header) { struct dp_catalog_private *catalog; u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; - enum dp_catalog_audio_sdp_type sdp; - enum dp_catalog_audio_header_type header; - - if (!dp_catalog) - return; catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); sdp_map = catalog->audio_map; - sdp = dp_catalog->sdp_type; - header = dp_catalog->sdp_header; - dp_catalog->audio_data = dp_read_link(catalog, - sdp_map[sdp][header]); + return dp_read_link(catalog, sdp_map[sdp][header]); } -void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog) +void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog, + enum dp_catalog_audio_sdp_type sdp, + enum dp_catalog_audio_header_type header, + u32 data) { struct dp_catalog_private *catalog; u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; - enum dp_catalog_audio_sdp_type sdp; - enum dp_catalog_audio_header_type header; - u32 data; if (!dp_catalog) return; @@ -1199,17 +1190,14 @@ void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog) struct dp_catalog_private, dp_catalog); sdp_map = catalog->audio_map; - sdp = dp_catalog->sdp_type; - header = dp_catalog->sdp_header; - data = dp_catalog->audio_data; dp_write_link(catalog, sdp_map[sdp][header], data); } -void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog) +void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog, u32 select) { struct dp_catalog_private *catalog; - u32 acr_ctrl, select; + u32 acr_ctrl; if (!dp_catalog) return; @@ -1217,7 +1205,6 @@ void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog) catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); - select = dp_catalog->audio_data; acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14); drm_dbg_dp(catalog->drm_dev, "select: %#x, acr_ctrl: %#x\n", @@ -1226,10 +1213,9 @@ void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog) dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); } -void dp_catalog_audio_enable(struct dp_catalog *dp_catalog) +void dp_catalog_audio_enable(struct dp_catalog *dp_catalog, bool enable) { struct dp_catalog_private *catalog; - bool enable; u32 audio_ctrl; if (!dp_catalog) @@ -1238,7 +1224,6 @@ void dp_catalog_audio_enable(struct dp_catalog *dp_catalog) catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); - enable = !!dp_catalog->audio_data; audio_ctrl = dp_read_link(catalog, MMSS_DP_AUDIO_CFG); if (enable) @@ -1333,10 +1318,10 @@ void dp_catalog_audio_init(struct dp_catalog *dp_catalog) catalog->audio_map = sdp_map; } -void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog) +void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog, u32 safe_to_exit_level) { struct dp_catalog_private *catalog; - u32 mainlink_levels, safe_to_exit_level; + u32 mainlink_levels; if (!dp_catalog) return; @@ -1344,7 +1329,6 @@ void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog) catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); - safe_to_exit_level = dp_catalog->audio_data; mainlink_levels = dp_read_link(catalog, REG_DP_MAINLINK_LEVELS); mainlink_levels &= 0xFE0; mainlink_levels |= safe_to_exit_level; diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h index 72a8581060..4679d50b8c 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.h +++ b/drivers/gpu/drm/msm/dp/dp_catalog.h @@ -28,26 +28,9 @@ #define DP_INTR_FRAME_END BIT(6) #define DP_INTR_CRC_UPDATED BIT(9) -#define DP_AUX_CFG_MAX_VALUE_CNT 3 - #define DP_HW_VERSION_1_0 0x10000000 #define DP_HW_VERSION_1_2 0x10020000 -/* PHY AUX config registers */ -enum dp_phy_aux_config_type { - PHY_AUX_CFG0, - PHY_AUX_CFG1, - PHY_AUX_CFG2, - PHY_AUX_CFG3, - PHY_AUX_CFG4, - PHY_AUX_CFG5, - PHY_AUX_CFG6, - PHY_AUX_CFG7, - PHY_AUX_CFG8, - PHY_AUX_CFG9, - PHY_AUX_CFG_MAX, -}; - enum dp_catalog_audio_sdp_type { DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_TIMESTAMP, @@ -65,14 +48,6 @@ enum dp_catalog_audio_header_type { }; struct dp_catalog { - u32 aux_data; - u32 total; - u32 sync_start; - u32 width_blanking; - u32 dp_active; - enum dp_catalog_audio_sdp_type sdp_type; - enum dp_catalog_audio_header_type sdp_header; - u32 audio_data; bool wide_bus_en; }; @@ -81,8 +56,8 @@ void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *d /* AUX APIs */ u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog); -int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog); -int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog); +int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog, u32 data); +int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog, u32 data); int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read); int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog); void dp_catalog_aux_reset(struct dp_catalog *dp_catalog); @@ -100,7 +75,7 @@ void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog, bool ena void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog); void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb); void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate, - u32 stream_rate_khz, bool fixed_nvid, bool is_ycbcr_420); + u32 stream_rate_khz, bool is_ycbcr_420); int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, u32 pattern); u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog); void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog); @@ -125,7 +100,8 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog); /* DP Panel APIs */ -int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog); +int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog, u32 total, + u32 sync_start, u32 width_blanking, u32 dp_active); void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp); void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog); void dp_catalog_dump_regs(struct dp_catalog *dp_catalog); @@ -136,12 +112,17 @@ void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog); struct dp_catalog *dp_catalog_get(struct device *dev); /* DP Audio APIs */ -void dp_catalog_audio_get_header(struct dp_catalog *catalog); -void dp_catalog_audio_set_header(struct dp_catalog *catalog); -void dp_catalog_audio_config_acr(struct dp_catalog *catalog); -void dp_catalog_audio_enable(struct dp_catalog *catalog); +u32 dp_catalog_audio_get_header(struct dp_catalog *dp_catalog, + enum dp_catalog_audio_sdp_type sdp, + enum dp_catalog_audio_header_type header); +void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog, + enum dp_catalog_audio_sdp_type sdp, + enum dp_catalog_audio_header_type header, + u32 data); +void dp_catalog_audio_config_acr(struct dp_catalog *catalog, u32 select); +void dp_catalog_audio_enable(struct dp_catalog *catalog, bool enable); void dp_catalog_audio_config_sdp(struct dp_catalog *catalog); void dp_catalog_audio_init(struct dp_catalog *catalog); -void dp_catalog_audio_sfe_level(struct dp_catalog *catalog); +void dp_catalog_audio_sfe_level(struct dp_catalog *catalog, u32 safe_to_exit_level); #endif /* _DP_CATALOG_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index 112c7e54fc..7bc8a9f065 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -1566,21 +1566,6 @@ void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl) phy, phy->init_count, phy->power_count); } -static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl) -{ - const u8 *dpcd = ctrl->panel->dpcd; - - /* - * For better interop experience, used a fixed NVID=0x8000 - * whenever connected to a VGA dongle downstream. - */ - if (drm_dp_is_branch(dpcd)) - return (drm_dp_has_quirk(&ctrl->panel->desc, - DP_DPCD_QUIRK_CONSTANT_N)); - - return false; -} - static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl) { struct phy *phy = ctrl->phy; @@ -2022,7 +2007,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) dp_catalog_ctrl_config_msa(ctrl->catalog, ctrl->link->link_params.rate, - pixel_rate_orig, dp_ctrl_use_fixed_nvid(ctrl), + pixel_rate_orig, ctrl->panel->dp_mode.out_fmt_is_yuv_420); dp_ctrl_setup_tr_unit(ctrl); diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h index fa014cee7e..ffcbd9a257 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.h +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h @@ -12,7 +12,6 @@ #include "dp_catalog.h" struct dp_ctrl { - atomic_t aborted; bool wide_bus_en; }; diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c index eca5a02f90..b8611f6d22 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.c +++ b/drivers/gpu/drm/msm/dp/dp_debug.c @@ -21,8 +21,6 @@ struct dp_debug_private { struct dp_link *link; struct dp_panel *panel; struct drm_connector *connector; - - struct dp_debug dp_debug; }; static int dp_debug_show(struct seq_file *seq, void *p) @@ -199,10 +197,24 @@ static const struct file_operations test_active_fops = { .write = dp_test_active_write }; -static void dp_debug_init(struct dp_debug *dp_debug, struct dentry *root, bool is_edp) +int dp_debug_init(struct device *dev, struct dp_panel *panel, + struct dp_link *link, + struct drm_connector *connector, + struct dentry *root, bool is_edp) { - struct dp_debug_private *debug = container_of(dp_debug, - struct dp_debug_private, dp_debug); + struct dp_debug_private *debug; + + if (!dev || !panel || !link) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + debug = devm_kzalloc(dev, sizeof(*debug), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->link = link; + debug->panel = panel; debugfs_create_file("dp_debug", 0444, root, debug, &dp_debug_fops); @@ -220,41 +232,6 @@ static void dp_debug_init(struct dp_debug *dp_debug, struct dentry *root, bool i root, debug, &dp_test_type_fops); } -} -struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, - struct dp_link *link, - struct drm_connector *connector, - struct dentry *root, bool is_edp) -{ - struct dp_debug_private *debug; - struct dp_debug *dp_debug; - int rc; - - if (!dev || !panel || !link) { - DRM_ERROR("invalid input\n"); - rc = -EINVAL; - goto error; - } - - debug = devm_kzalloc(dev, sizeof(*debug), GFP_KERNEL); - if (!debug) { - rc = -ENOMEM; - goto error; - } - - debug->dp_debug.debug_en = false; - debug->link = link; - debug->panel = panel; - - dp_debug = &debug->dp_debug; - dp_debug->vdisplay = 0; - dp_debug->hdisplay = 0; - dp_debug->vrefresh = 0; - - dp_debug_init(dp_debug, root, is_edp); - - return dp_debug; - error: - return ERR_PTR(rc); + return 0; } diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h index 9b3b2e702f..7e1aa892fc 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.h +++ b/drivers/gpu/drm/msm/dp/dp_debug.h @@ -9,22 +9,6 @@ #include "dp_panel.h" #include "dp_link.h" -/** - * struct dp_debug - * @debug_en: specifies whether debug mode enabled - * @vdisplay: used to filter out vdisplay value - * @hdisplay: used to filter out hdisplay value - * @vrefresh: used to filter out vrefresh value - * @tpg_state: specifies whether tpg feature is enabled - */ -struct dp_debug { - bool debug_en; - int aspect_ratio; - int vdisplay; - int hdisplay; - int vrefresh; -}; - #if defined(CONFIG_DEBUG_FS) /** @@ -41,22 +25,22 @@ struct dp_debug { * This function sets up the debug module and provides a way * for debugfs input to be communicated with existing modules */ -struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, - struct dp_link *link, - struct drm_connector *connector, - struct dentry *root, - bool is_edp); +int dp_debug_init(struct device *dev, struct dp_panel *panel, + struct dp_link *link, + struct drm_connector *connector, + struct dentry *root, + bool is_edp); #else static inline -struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, - struct dp_link *link, - struct drm_connector *connector, - struct dentry *root, - bool is_edp) +int dp_debug_init(struct device *dev, struct dp_panel *panel, + struct dp_link *link, + struct drm_connector *connector, + struct dentry *root, + bool is_edp) { - return ERR_PTR(-EINVAL); + return -EINVAL; } #endif /* defined(CONFIG_DEBUG_FS) */ diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 36a0ef1cdc..672a7ba52e 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -74,7 +74,6 @@ struct dp_event { }; struct dp_display_private { - char *name; int irq; unsigned int id; @@ -82,18 +81,15 @@ struct dp_display_private { /* state variables */ bool core_initialized; bool phy_initialized; - bool hpd_irq_on; bool audio_supported; struct drm_device *drm_dev; - struct dentry *root; struct dp_catalog *catalog; struct drm_dp_aux *aux; struct dp_link *link; struct dp_panel *panel; struct dp_ctrl *ctrl; - struct dp_debug *debug; struct dp_display_mode dp_mode; struct msm_dp dp_display; @@ -119,55 +115,49 @@ struct dp_display_private { struct msm_dp_desc { phys_addr_t io_start; unsigned int id; - unsigned int connector_type; bool wide_bus_supported; }; static const struct msm_dp_desc sc7180_dp_descs[] = { - { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort }, + { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 }, {} }; static const struct msm_dp_desc sc7280_dp_descs[] = { - { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true }, - { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_supported = true }, + { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, + { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, {} }; static const struct msm_dp_desc sc8180x_dp_descs[] = { - { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort }, - { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort }, - { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP }, + { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 }, + { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1 }, + { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2 }, {} }; static const struct msm_dp_desc sc8280xp_dp_descs[] = { - { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true }, - { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true }, - { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true }, - { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true }, - { .io_start = 0x22090000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true }, - { .io_start = 0x22098000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true }, - { .io_start = 0x2209a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true }, - { .io_start = 0x220a0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_supported = true }, + { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, + { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, + { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, + { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .wide_bus_supported = true }, + { .io_start = 0x22090000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, + { .io_start = 0x22098000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, + { .io_start = 0x2209a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, + { .io_start = 0x220a0000, .id = MSM_DP_CONTROLLER_3, .wide_bus_supported = true }, {} }; -static const struct msm_dp_desc sc8280xp_edp_descs[] = { - { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_supported = true }, - { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_supported = true }, - { .io_start = 0x2209a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_supported = true }, - { .io_start = 0x220a0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_supported = true }, - {} -}; - -static const struct msm_dp_desc sm8350_dp_descs[] = { - { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort }, +static const struct msm_dp_desc sm8650_dp_descs[] = { + { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0 }, {} }; -static const struct msm_dp_desc sm8650_dp_descs[] = { - { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort }, +static const struct msm_dp_desc x1e80100_dp_descs[] = { + { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, + { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, + { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, + { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .wide_bus_supported = true }, {} }; @@ -178,10 +168,11 @@ static const struct of_device_id dp_dt_match[] = { { .compatible = "qcom,sc8180x-dp", .data = &sc8180x_dp_descs }, { .compatible = "qcom,sc8180x-edp", .data = &sc8180x_dp_descs }, { .compatible = "qcom,sc8280xp-dp", .data = &sc8280xp_dp_descs }, - { .compatible = "qcom,sc8280xp-edp", .data = &sc8280xp_edp_descs }, + { .compatible = "qcom,sc8280xp-edp", .data = &sc8280xp_dp_descs }, { .compatible = "qcom,sdm845-dp", .data = &sc7180_dp_descs }, - { .compatible = "qcom,sm8350-dp", .data = &sm8350_dp_descs }, + { .compatible = "qcom,sm8350-dp", .data = &sc7180_dp_descs }, { .compatible = "qcom,sm8650-dp", .data = &sm8650_dp_descs }, + { .compatible = "qcom,x1e80100-dp", .data = &x1e80100_dp_descs }, {} }; @@ -732,6 +723,14 @@ static int dp_init_sub_modules(struct dp_display_private *dp) if (IS_ERR(phy)) return PTR_ERR(phy); + rc = phy_set_mode_ext(phy, PHY_MODE_DP, + dp->dp_display.is_edp ? PHY_SUBMODE_EDP : PHY_SUBMODE_DP); + if (rc) { + DRM_ERROR("failed to set phy submode, rc = %d\n", rc); + dp->catalog = NULL; + goto error; + } + dp->catalog = dp_catalog_get(dev); if (IS_ERR(dp->catalog)) { rc = PTR_ERR(dp->catalog); @@ -807,7 +806,6 @@ static int dp_display_set_mode(struct msm_dp *dp_display, drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode); dp->panel->dp_mode.bpp = mode->bpp; - dp->panel->dp_mode.capabilities = mode->capabilities; dp->panel->dp_mode.out_fmt_is_yuv_420 = mode->out_fmt_is_yuv_420; dp_panel_init_panel_info(dp->panel); return 0; @@ -1247,6 +1245,25 @@ static int dp_auxbus_done_probe(struct drm_dp_aux *aux) return dp_display_probe_tail(aux->dev); } +static int dp_display_get_connector_type(struct platform_device *pdev, + const struct msm_dp_desc *desc) +{ + struct device_node *node = pdev->dev.of_node; + struct device_node *aux_bus = of_get_child_by_name(node, "aux-bus"); + struct device_node *panel = of_get_child_by_name(aux_bus, "panel"); + int connector_type; + + if (panel) + connector_type = DRM_MODE_CONNECTOR_eDP; + else + connector_type = DRM_MODE_SUBCONNECTOR_DisplayPort; + + of_node_put(panel); + of_node_put(aux_bus); + + return connector_type; +} + static int dp_display_probe(struct platform_device *pdev) { int rc = 0; @@ -1267,9 +1284,8 @@ static int dp_display_probe(struct platform_device *pdev) return -EINVAL; dp->dp_display.pdev = pdev; - dp->name = "drm_dp"; dp->id = desc->id; - dp->dp_display.connector_type = desc->connector_type; + dp->dp_display.connector_type = dp_display_get_connector_type(pdev, desc); dp->wide_bus_supported = desc->wide_bus_supported; dp->dp_display.is_edp = (dp->dp_display.connector_type == DRM_MODE_CONNECTOR_eDP); @@ -1437,14 +1453,9 @@ void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *root, boo dp = container_of(dp_display, struct dp_display_private, dp_display); dev = &dp->dp_display.pdev->dev; - dp->debug = dp_debug_get(dev, dp->panel, - dp->link, dp->dp_display.connector, - root, is_edp); - if (IS_ERR(dp->debug)) { - rc = PTR_ERR(dp->debug); + rc = dp_debug_init(dev, dp->panel, dp->link, dp->dp_display.connector, root, is_edp); + if (rc) DRM_ERROR("failed to initialize debug, rc = %d\n", rc); - dp->debug = NULL; - } } int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h index 234dada886..ec7fa67e05 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.h +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -16,7 +16,6 @@ struct msm_dp { struct drm_device *drm_dev; struct platform_device *pdev; struct device *codec_dev; - struct drm_bridge *bridge; struct drm_connector *connector; struct drm_bridge *next_bridge; bool link_ready; @@ -28,8 +27,6 @@ struct msm_dp { hdmi_codec_plugged_cb plugged_cb; - bool wide_bus_en; - struct dp_audio *dp_audio; bool psr_supported; }; diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index a819a4ff76..1b9be5bd97 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -347,8 +347,6 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, } } - dp_display->bridge = bridge; - return 0; } diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index ea911d9244..d8967615d8 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -36,7 +36,6 @@ struct dp_link_request { struct dp_link_private { u32 prev_sink_count; - struct device *dev; struct drm_device *drm_dev; struct drm_dp_aux *aux; struct dp_link dp_link; @@ -804,8 +803,6 @@ int dp_link_psm_config(struct dp_link *dp_link, if (ret) DRM_ERROR("Failed to %s low power mode\n", enable ? "enter" : "exit"); - else - dp_link->psm_enabled = enable; mutex_unlock(&link->psm_mutex); return ret; @@ -1226,7 +1223,6 @@ struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux) if (!link) return ERR_PTR(-ENOMEM); - link->dev = dev; link->aux = aux; mutex_init(&link->psm_mutex); diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h index 42aed9c90b..5846337bb5 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.h +++ b/drivers/gpu/drm/msm/dp/dp_link.h @@ -62,7 +62,6 @@ struct dp_link_phy_params { struct dp_link { u32 sink_request; u32 test_response; - bool psm_enabled; u8 sink_count; struct dp_link_test_video test_video; diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 8e70694539..07db8f37cd 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -353,6 +353,10 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel) struct dp_catalog *catalog; struct dp_panel_private *panel; struct drm_display_mode *drm_mode; + u32 width_blanking; + u32 sync_start; + u32 dp_active; + u32 total; panel = container_of(dp_panel, struct dp_panel_private, dp_panel); catalog = panel->catalog; @@ -376,13 +380,13 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel) data <<= 16; data |= total_hor; - catalog->total = data; + total = data; data = (drm_mode->vtotal - drm_mode->vsync_start); data <<= 16; data |= (drm_mode->htotal - drm_mode->hsync_start); - catalog->sync_start = data; + sync_start = data; data = drm_mode->vsync_end - drm_mode->vsync_start; data <<= 16; @@ -390,15 +394,15 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel) data |= drm_mode->hsync_end - drm_mode->hsync_start; data |= (panel->dp_panel.dp_mode.h_active_low << 15); - catalog->width_blanking = data; + width_blanking = data; data = drm_mode->vdisplay; data <<= 16; data |= drm_mode->hdisplay; - catalog->dp_active = data; + dp_active = data; - dp_catalog_panel_timing_cfg(catalog); + dp_catalog_panel_timing_cfg(catalog, total, sync_start, width_blanking, dp_active); if (dp_panel->dp_mode.out_fmt_is_yuv_420) dp_panel_setup_vsc_sdp_yuv_420(dp_panel); diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h index e843f5062d..4ea42fa936 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.h +++ b/drivers/gpu/drm/msm/dp/dp_panel.h @@ -15,7 +15,6 @@ struct edid; struct dp_display_mode { struct drm_display_mode drm_mode; - u32 capabilities; u32 bpp; u32 h_active_low; u32 v_active_low; @@ -40,7 +39,6 @@ struct dp_panel { u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; struct dp_link_info link_info; - struct drm_dp_desc desc; struct edid *edid; struct drm_connector *connector; struct dp_display_mode dp_mode; @@ -48,7 +46,6 @@ struct dp_panel { bool video_test; bool vsc_sdp_supported; - u32 vic; u32 max_dp_lanes; u32 max_dp_link_rate; diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 37c4c07005..efd7c23b66 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -120,6 +120,22 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) struct msm_drm_private *priv = dev_get_drvdata(master); struct msm_dsi *msm_dsi = dev_get_drvdata(dev); + /* + * Next bridge doesn't exist for the secondary DSI host in a bonded + * pair. + */ + if (!msm_dsi_is_bonded_dsi(msm_dsi) || + msm_dsi_is_master_dsi(msm_dsi)) { + struct drm_bridge *ext_bridge; + + ext_bridge = devm_drm_of_get_bridge(&msm_dsi->pdev->dev, + msm_dsi->pdev->dev.of_node, 1, 0); + if (IS_ERR(ext_bridge)) + return PTR_ERR(ext_bridge); + + msm_dsi->next_bridge = ext_bridge; + } + priv->dsi[msm_dsi->id] = msm_dsi; return 0; @@ -216,7 +232,6 @@ void __exit msm_dsi_unregister(void) int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, struct drm_encoder *encoder) { - struct drm_bridge *bridge; int ret; msm_dsi->dev = dev; @@ -236,14 +251,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, return 0; } - bridge = msm_dsi_manager_bridge_init(msm_dsi, encoder); - if (IS_ERR(bridge)) { - ret = PTR_ERR(bridge); - DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret); - return ret; - } - - ret = msm_dsi_manager_ext_bridge_init(msm_dsi->id, bridge); + ret = msm_dsi_manager_connector_init(msm_dsi, encoder); if (ret) { DRM_DEV_ERROR(dev->dev, "failed to create dsi connector: %d\n", ret); diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 2ad9a842c6..afc290408b 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -38,6 +38,8 @@ struct msm_dsi { struct mipi_dsi_host *host; struct msm_dsi_phy *phy; + struct drm_bridge *next_bridge; + struct device *phy_dev; bool phy_enabled; @@ -45,9 +47,8 @@ struct msm_dsi { }; /* dsi manager */ -struct drm_bridge *msm_dsi_manager_bridge_init(struct msm_dsi *msm_dsi, - struct drm_encoder *encoder); -int msm_dsi_manager_ext_bridge_init(u8 id, struct drm_bridge *int_bridge); +int msm_dsi_manager_connector_init(struct msm_dsi *msm_dsi, + struct drm_encoder *encoder); int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); int msm_dsi_manager_register(struct msm_dsi *msm_dsi); diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h deleted file mode 100644 index 2a7d980e12..0000000000 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ /dev/null @@ -1,790 +0,0 @@ -#ifndef DSI_XML -#define DSI_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) - -Copyright (C) 2013-2022 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -enum dsi_traffic_mode { - NON_BURST_SYNCH_PULSE = 0, - NON_BURST_SYNCH_EVENT = 1, - BURST_MODE = 2, -}; - -enum dsi_vid_dst_format { - VID_DST_FORMAT_RGB565 = 0, - VID_DST_FORMAT_RGB666 = 1, - VID_DST_FORMAT_RGB666_LOOSE = 2, - VID_DST_FORMAT_RGB888 = 3, -}; - -enum dsi_rgb_swap { - SWAP_RGB = 0, - SWAP_RBG = 1, - SWAP_BGR = 2, - SWAP_BRG = 3, - SWAP_GRB = 4, - SWAP_GBR = 5, -}; - -enum dsi_cmd_trigger { - TRIGGER_NONE = 0, - TRIGGER_SEOF = 1, - TRIGGER_TE = 2, - TRIGGER_SW = 4, - TRIGGER_SW_SEOF = 5, - TRIGGER_SW_TE = 6, -}; - -enum dsi_cmd_dst_format { - CMD_DST_FORMAT_RGB111 = 0, - CMD_DST_FORMAT_RGB332 = 3, - CMD_DST_FORMAT_RGB444 = 4, - CMD_DST_FORMAT_RGB565 = 6, - CMD_DST_FORMAT_RGB666 = 7, - CMD_DST_FORMAT_RGB888 = 8, -}; - -enum dsi_lane_swap { - LANE_SWAP_0123 = 0, - LANE_SWAP_3012 = 1, - LANE_SWAP_2301 = 2, - LANE_SWAP_1230 = 3, - LANE_SWAP_0321 = 4, - LANE_SWAP_1032 = 5, - LANE_SWAP_2103 = 6, - LANE_SWAP_3210 = 7, -}; - -enum video_config_bpp { - VIDEO_CONFIG_18BPP = 0, - VIDEO_CONFIG_24BPP = 1, -}; - -enum video_pattern_sel { - VID_PRBS = 0, - VID_INCREMENTAL = 1, - VID_FIXED = 2, - VID_MDSS_GENERAL_PATTERN = 3, -}; - -enum cmd_mdp_stream0_pattern_sel { - CMD_MDP_PRBS = 0, - CMD_MDP_INCREMENTAL = 1, - CMD_MDP_FIXED = 2, - CMD_MDP_MDSS_GENERAL_PATTERN = 3, -}; - -enum cmd_dma_pattern_sel { - CMD_DMA_PRBS = 0, - CMD_DMA_INCREMENTAL = 1, - CMD_DMA_FIXED = 2, - CMD_DMA_CUSTOM_PATTERN_DMA_FIFO = 3, -}; - -#define DSI_IRQ_CMD_DMA_DONE 0x00000001 -#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002 -#define DSI_IRQ_CMD_MDP_DONE 0x00000100 -#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200 -#define DSI_IRQ_VIDEO_DONE 0x00010000 -#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000 -#define DSI_IRQ_BTA_DONE 0x00100000 -#define DSI_IRQ_MASK_BTA_DONE 0x00200000 -#define DSI_IRQ_ERROR 0x01000000 -#define DSI_IRQ_MASK_ERROR 0x02000000 -#define REG_DSI_6G_HW_VERSION 0x00000000 -#define DSI_6G_HW_VERSION_MAJOR__MASK 0xf0000000 -#define DSI_6G_HW_VERSION_MAJOR__SHIFT 28 -static inline uint32_t DSI_6G_HW_VERSION_MAJOR(uint32_t val) -{ - return ((val) << DSI_6G_HW_VERSION_MAJOR__SHIFT) & DSI_6G_HW_VERSION_MAJOR__MASK; -} -#define DSI_6G_HW_VERSION_MINOR__MASK 0x0fff0000 -#define DSI_6G_HW_VERSION_MINOR__SHIFT 16 -static inline uint32_t DSI_6G_HW_VERSION_MINOR(uint32_t val) -{ - return ((val) << DSI_6G_HW_VERSION_MINOR__SHIFT) & DSI_6G_HW_VERSION_MINOR__MASK; -} -#define DSI_6G_HW_VERSION_STEP__MASK 0x0000ffff -#define DSI_6G_HW_VERSION_STEP__SHIFT 0 -static inline uint32_t DSI_6G_HW_VERSION_STEP(uint32_t val) -{ - return ((val) << DSI_6G_HW_VERSION_STEP__SHIFT) & DSI_6G_HW_VERSION_STEP__MASK; -} - -#define REG_DSI_CTRL 0x00000000 -#define DSI_CTRL_ENABLE 0x00000001 -#define DSI_CTRL_VID_MODE_EN 0x00000002 -#define DSI_CTRL_CMD_MODE_EN 0x00000004 -#define DSI_CTRL_LANE0 0x00000010 -#define DSI_CTRL_LANE1 0x00000020 -#define DSI_CTRL_LANE2 0x00000040 -#define DSI_CTRL_LANE3 0x00000080 -#define DSI_CTRL_CLK_EN 0x00000100 -#define DSI_CTRL_ECC_CHECK 0x00100000 -#define DSI_CTRL_CRC_CHECK 0x01000000 - -#define REG_DSI_STATUS0 0x00000004 -#define DSI_STATUS0_CMD_MODE_ENGINE_BUSY 0x00000001 -#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002 -#define DSI_STATUS0_CMD_MODE_MDP_BUSY 0x00000004 -#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008 -#define DSI_STATUS0_DSI_BUSY 0x00000010 -#define DSI_STATUS0_INTERLEAVE_OP_CONTENTION 0x80000000 - -#define REG_DSI_FIFO_STATUS 0x00000008 -#define DSI_FIFO_STATUS_VIDEO_MDP_FIFO_OVERFLOW 0x00000001 -#define DSI_FIFO_STATUS_VIDEO_MDP_FIFO_UNDERFLOW 0x00000008 -#define DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW 0x00000080 -#define DSI_FIFO_STATUS_CMD_DMA_FIFO_RD_WATERMARK_REACH 0x00000100 -#define DSI_FIFO_STATUS_CMD_DMA_FIFO_WR_WATERMARK_REACH 0x00000200 -#define DSI_FIFO_STATUS_CMD_DMA_FIFO_UNDERFLOW 0x00000400 -#define DSI_FIFO_STATUS_DLN0_LP_FIFO_EMPTY 0x00001000 -#define DSI_FIFO_STATUS_DLN0_LP_FIFO_FULL 0x00002000 -#define DSI_FIFO_STATUS_DLN0_LP_FIFO_OVERFLOW 0x00004000 -#define DSI_FIFO_STATUS_DLN0_HS_FIFO_EMPTY 0x00010000 -#define DSI_FIFO_STATUS_DLN0_HS_FIFO_FULL 0x00020000 -#define DSI_FIFO_STATUS_DLN0_HS_FIFO_OVERFLOW 0x00040000 -#define DSI_FIFO_STATUS_DLN0_HS_FIFO_UNDERFLOW 0x00080000 -#define DSI_FIFO_STATUS_DLN1_HS_FIFO_EMPTY 0x00100000 -#define DSI_FIFO_STATUS_DLN1_HS_FIFO_FULL 0x00200000 -#define DSI_FIFO_STATUS_DLN1_HS_FIFO_OVERFLOW 0x00400000 -#define DSI_FIFO_STATUS_DLN1_HS_FIFO_UNDERFLOW 0x00800000 -#define DSI_FIFO_STATUS_DLN2_HS_FIFO_EMPTY 0x01000000 -#define DSI_FIFO_STATUS_DLN2_HS_FIFO_FULL 0x02000000 -#define DSI_FIFO_STATUS_DLN2_HS_FIFO_OVERFLOW 0x04000000 -#define DSI_FIFO_STATUS_DLN2_HS_FIFO_UNDERFLOW 0x08000000 -#define DSI_FIFO_STATUS_DLN3_HS_FIFO_EMPTY 0x10000000 -#define DSI_FIFO_STATUS_DLN3_HS_FIFO_FULL 0x20000000 -#define DSI_FIFO_STATUS_DLN3_HS_FIFO_OVERFLOW 0x40000000 -#define DSI_FIFO_STATUS_DLN3_HS_FIFO_UNDERFLOW 0x80000000 - -#define REG_DSI_VID_CFG0 0x0000000c -#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003 -#define DSI_VID_CFG0_VIRT_CHANNEL__SHIFT 0 -static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val) -{ - return ((val) << DSI_VID_CFG0_VIRT_CHANNEL__SHIFT) & DSI_VID_CFG0_VIRT_CHANNEL__MASK; -} -#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030 -#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4 -static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_vid_dst_format val) -{ - return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK; -} -#define DSI_VID_CFG0_TRAFFIC_MODE__MASK 0x00000300 -#define DSI_VID_CFG0_TRAFFIC_MODE__SHIFT 8 -static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val) -{ - return ((val) << DSI_VID_CFG0_TRAFFIC_MODE__SHIFT) & DSI_VID_CFG0_TRAFFIC_MODE__MASK; -} -#define DSI_VID_CFG0_BLLP_POWER_STOP 0x00001000 -#define DSI_VID_CFG0_EOF_BLLP_POWER_STOP 0x00008000 -#define DSI_VID_CFG0_HSA_POWER_STOP 0x00010000 -#define DSI_VID_CFG0_HBP_POWER_STOP 0x00100000 -#define DSI_VID_CFG0_HFP_POWER_STOP 0x01000000 -#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000 - -#define REG_DSI_VID_CFG1 0x0000001c -#define DSI_VID_CFG1_R_SEL 0x00000001 -#define DSI_VID_CFG1_G_SEL 0x00000010 -#define DSI_VID_CFG1_B_SEL 0x00000100 -#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00007000 -#define DSI_VID_CFG1_RGB_SWAP__SHIFT 12 -static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val) -{ - return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK; -} - -#define REG_DSI_ACTIVE_H 0x00000020 -#define DSI_ACTIVE_H_START__MASK 0x00000fff -#define DSI_ACTIVE_H_START__SHIFT 0 -static inline uint32_t DSI_ACTIVE_H_START(uint32_t val) -{ - return ((val) << DSI_ACTIVE_H_START__SHIFT) & DSI_ACTIVE_H_START__MASK; -} -#define DSI_ACTIVE_H_END__MASK 0x0fff0000 -#define DSI_ACTIVE_H_END__SHIFT 16 -static inline uint32_t DSI_ACTIVE_H_END(uint32_t val) -{ - return ((val) << DSI_ACTIVE_H_END__SHIFT) & DSI_ACTIVE_H_END__MASK; -} - -#define REG_DSI_ACTIVE_V 0x00000024 -#define DSI_ACTIVE_V_START__MASK 0x00000fff -#define DSI_ACTIVE_V_START__SHIFT 0 -static inline uint32_t DSI_ACTIVE_V_START(uint32_t val) -{ - return ((val) << DSI_ACTIVE_V_START__SHIFT) & DSI_ACTIVE_V_START__MASK; -} -#define DSI_ACTIVE_V_END__MASK 0x0fff0000 -#define DSI_ACTIVE_V_END__SHIFT 16 -static inline uint32_t DSI_ACTIVE_V_END(uint32_t val) -{ - return ((val) << DSI_ACTIVE_V_END__SHIFT) & DSI_ACTIVE_V_END__MASK; -} - -#define REG_DSI_TOTAL 0x00000028 -#define DSI_TOTAL_H_TOTAL__MASK 0x00000fff -#define DSI_TOTAL_H_TOTAL__SHIFT 0 -static inline uint32_t DSI_TOTAL_H_TOTAL(uint32_t val) -{ - return ((val) << DSI_TOTAL_H_TOTAL__SHIFT) & DSI_TOTAL_H_TOTAL__MASK; -} -#define DSI_TOTAL_V_TOTAL__MASK 0x0fff0000 -#define DSI_TOTAL_V_TOTAL__SHIFT 16 -static inline uint32_t DSI_TOTAL_V_TOTAL(uint32_t val) -{ - return ((val) << DSI_TOTAL_V_TOTAL__SHIFT) & DSI_TOTAL_V_TOTAL__MASK; -} - -#define REG_DSI_ACTIVE_HSYNC 0x0000002c -#define DSI_ACTIVE_HSYNC_START__MASK 0x00000fff -#define DSI_ACTIVE_HSYNC_START__SHIFT 0 -static inline uint32_t DSI_ACTIVE_HSYNC_START(uint32_t val) -{ - return ((val) << DSI_ACTIVE_HSYNC_START__SHIFT) & DSI_ACTIVE_HSYNC_START__MASK; -} -#define DSI_ACTIVE_HSYNC_END__MASK 0x0fff0000 -#define DSI_ACTIVE_HSYNC_END__SHIFT 16 -static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val) -{ - return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK; -} - -#define REG_DSI_ACTIVE_VSYNC_HPOS 0x00000030 -#define DSI_ACTIVE_VSYNC_HPOS_START__MASK 0x00000fff -#define DSI_ACTIVE_VSYNC_HPOS_START__SHIFT 0 -static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_START(uint32_t val) -{ - return ((val) << DSI_ACTIVE_VSYNC_HPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_START__MASK; -} -#define DSI_ACTIVE_VSYNC_HPOS_END__MASK 0x0fff0000 -#define DSI_ACTIVE_VSYNC_HPOS_END__SHIFT 16 -static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_END(uint32_t val) -{ - return ((val) << DSI_ACTIVE_VSYNC_HPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_END__MASK; -} - -#define REG_DSI_ACTIVE_VSYNC_VPOS 0x00000034 -#define DSI_ACTIVE_VSYNC_VPOS_START__MASK 0x00000fff -#define DSI_ACTIVE_VSYNC_VPOS_START__SHIFT 0 -static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_START(uint32_t val) -{ - return ((val) << DSI_ACTIVE_VSYNC_VPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_START__MASK; -} -#define DSI_ACTIVE_VSYNC_VPOS_END__MASK 0x0fff0000 -#define DSI_ACTIVE_VSYNC_VPOS_END__SHIFT 16 -static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_END(uint32_t val) -{ - return ((val) << DSI_ACTIVE_VSYNC_VPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_END__MASK; -} - -#define REG_DSI_CMD_DMA_CTRL 0x00000038 -#define DSI_CMD_DMA_CTRL_BROADCAST_EN 0x80000000 -#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000 -#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000 - -#define REG_DSI_CMD_CFG0 0x0000003c -#define DSI_CMD_CFG0_DST_FORMAT__MASK 0x0000000f -#define DSI_CMD_CFG0_DST_FORMAT__SHIFT 0 -static inline uint32_t DSI_CMD_CFG0_DST_FORMAT(enum dsi_cmd_dst_format val) -{ - return ((val) << DSI_CMD_CFG0_DST_FORMAT__SHIFT) & DSI_CMD_CFG0_DST_FORMAT__MASK; -} -#define DSI_CMD_CFG0_R_SEL 0x00000010 -#define DSI_CMD_CFG0_G_SEL 0x00000100 -#define DSI_CMD_CFG0_B_SEL 0x00001000 -#define DSI_CMD_CFG0_INTERLEAVE_MAX__MASK 0x00f00000 -#define DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT 20 -static inline uint32_t DSI_CMD_CFG0_INTERLEAVE_MAX(uint32_t val) -{ - return ((val) << DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT) & DSI_CMD_CFG0_INTERLEAVE_MAX__MASK; -} -#define DSI_CMD_CFG0_RGB_SWAP__MASK 0x00070000 -#define DSI_CMD_CFG0_RGB_SWAP__SHIFT 16 -static inline uint32_t DSI_CMD_CFG0_RGB_SWAP(enum dsi_rgb_swap val) -{ - return ((val) << DSI_CMD_CFG0_RGB_SWAP__SHIFT) & DSI_CMD_CFG0_RGB_SWAP__MASK; -} - -#define REG_DSI_CMD_CFG1 0x00000040 -#define DSI_CMD_CFG1_WR_MEM_START__MASK 0x000000ff -#define DSI_CMD_CFG1_WR_MEM_START__SHIFT 0 -static inline uint32_t DSI_CMD_CFG1_WR_MEM_START(uint32_t val) -{ - return ((val) << DSI_CMD_CFG1_WR_MEM_START__SHIFT) & DSI_CMD_CFG1_WR_MEM_START__MASK; -} -#define DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK 0x0000ff00 -#define DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT 8 -static inline uint32_t DSI_CMD_CFG1_WR_MEM_CONTINUE(uint32_t val) -{ - return ((val) << DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT) & DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK; -} -#define DSI_CMD_CFG1_INSERT_DCS_COMMAND 0x00010000 - -#define REG_DSI_DMA_BASE 0x00000044 - -#define REG_DSI_DMA_LEN 0x00000048 - -#define REG_DSI_CMD_MDP_STREAM0_CTRL 0x00000054 -#define DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__MASK 0x0000003f -#define DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__SHIFT 0 -static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE(uint32_t val) -{ - return ((val) << DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__MASK; -} -#define DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300 -#define DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__SHIFT 8 -static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL(uint32_t val) -{ - return ((val) << DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__MASK; -} -#define DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__MASK 0xffff0000 -#define DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__SHIFT 16 -static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(uint32_t val) -{ - return ((val) << DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__MASK; -} - -#define REG_DSI_CMD_MDP_STREAM0_TOTAL 0x00000058 -#define DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__MASK 0x00000fff -#define DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__SHIFT 0 -static inline uint32_t DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL(uint32_t val) -{ - return ((val) << DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__MASK; -} -#define DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__MASK 0x0fff0000 -#define DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__SHIFT 16 -static inline uint32_t DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL(uint32_t val) -{ - return ((val) << DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__MASK; -} - -#define REG_DSI_CMD_MDP_STREAM1_CTRL 0x0000005c -#define DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__MASK 0x0000003f -#define DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__SHIFT 0 -static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE(uint32_t val) -{ - return ((val) << DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__MASK; -} -#define DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300 -#define DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__SHIFT 8 -static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL(uint32_t val) -{ - return ((val) << DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__MASK; -} -#define DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__MASK 0xffff0000 -#define DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__SHIFT 16 -static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT(uint32_t val) -{ - return ((val) << DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__MASK; -} - -#define REG_DSI_CMD_MDP_STREAM1_TOTAL 0x00000060 -#define DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__MASK 0x0000ffff -#define DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__SHIFT 0 -static inline uint32_t DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL(uint32_t val) -{ - return ((val) << DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__MASK; -} -#define DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__MASK 0xffff0000 -#define DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__SHIFT 16 -static inline uint32_t DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL(uint32_t val) -{ - return ((val) << DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__MASK; -} - -#define REG_DSI_ACK_ERR_STATUS 0x00000064 - -static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; } - -static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; } - -#define REG_DSI_TRIG_CTRL 0x00000080 -#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x00000007 -#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0 -static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val) -{ - return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK; -} -#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x00000070 -#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4 -static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val) -{ - return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK; -} -#define DSI_TRIG_CTRL_STREAM__MASK 0x00000300 -#define DSI_TRIG_CTRL_STREAM__SHIFT 8 -static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val) -{ - return ((val) << DSI_TRIG_CTRL_STREAM__SHIFT) & DSI_TRIG_CTRL_STREAM__MASK; -} -#define DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME 0x00001000 -#define DSI_TRIG_CTRL_TE 0x80000000 - -#define REG_DSI_TRIG_DMA 0x0000008c - -#define REG_DSI_DLN0_PHY_ERR 0x000000b0 -#define DSI_DLN0_PHY_ERR_DLN0_ERR_ESC 0x00000001 -#define DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC 0x00000010 -#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL 0x00000100 -#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 0x00001000 -#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1 0x00010000 - -#define REG_DSI_LP_TIMER_CTRL 0x000000b4 -#define DSI_LP_TIMER_CTRL_LP_RX_TO__MASK 0x0000ffff -#define DSI_LP_TIMER_CTRL_LP_RX_TO__SHIFT 0 -static inline uint32_t DSI_LP_TIMER_CTRL_LP_RX_TO(uint32_t val) -{ - return ((val) << DSI_LP_TIMER_CTRL_LP_RX_TO__SHIFT) & DSI_LP_TIMER_CTRL_LP_RX_TO__MASK; -} -#define DSI_LP_TIMER_CTRL_BTA_TO__MASK 0xffff0000 -#define DSI_LP_TIMER_CTRL_BTA_TO__SHIFT 16 -static inline uint32_t DSI_LP_TIMER_CTRL_BTA_TO(uint32_t val) -{ - return ((val) << DSI_LP_TIMER_CTRL_BTA_TO__SHIFT) & DSI_LP_TIMER_CTRL_BTA_TO__MASK; -} - -#define REG_DSI_HS_TIMER_CTRL 0x000000b8 -#define DSI_HS_TIMER_CTRL_HS_TX_TO__MASK 0x0000ffff -#define DSI_HS_TIMER_CTRL_HS_TX_TO__SHIFT 0 -static inline uint32_t DSI_HS_TIMER_CTRL_HS_TX_TO(uint32_t val) -{ - return ((val) << DSI_HS_TIMER_CTRL_HS_TX_TO__SHIFT) & DSI_HS_TIMER_CTRL_HS_TX_TO__MASK; -} -#define DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__MASK 0x000f0000 -#define DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__SHIFT 16 -static inline uint32_t DSI_HS_TIMER_CTRL_TIMER_RESOLUTION(uint32_t val) -{ - return ((val) << DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__SHIFT) & DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__MASK; -} -#define DSI_HS_TIMER_CTRL_HS_TX_TO_STOP_EN 0x10000000 - -#define REG_DSI_TIMEOUT_STATUS 0x000000bc - -#define REG_DSI_CLKOUT_TIMING_CTRL 0x000000c0 -#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK 0x0000003f -#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT 0 -static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(uint32_t val) -{ - return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK; -} -#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK 0x00003f00 -#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT 8 -static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val) -{ - return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK; -} - -#define REG_DSI_EOT_PACKET_CTRL 0x000000c8 -#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001 -#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010 - -#define REG_DSI_LANE_STATUS 0x000000a4 -#define DSI_LANE_STATUS_DLN0_STOPSTATE 0x00000001 -#define DSI_LANE_STATUS_DLN1_STOPSTATE 0x00000002 -#define DSI_LANE_STATUS_DLN2_STOPSTATE 0x00000004 -#define DSI_LANE_STATUS_DLN3_STOPSTATE 0x00000008 -#define DSI_LANE_STATUS_CLKLN_STOPSTATE 0x00000010 -#define DSI_LANE_STATUS_DLN0_ULPS_ACTIVE_NOT 0x00000100 -#define DSI_LANE_STATUS_DLN1_ULPS_ACTIVE_NOT 0x00000200 -#define DSI_LANE_STATUS_DLN2_ULPS_ACTIVE_NOT 0x00000400 -#define DSI_LANE_STATUS_DLN3_ULPS_ACTIVE_NOT 0x00000800 -#define DSI_LANE_STATUS_CLKLN_ULPS_ACTIVE_NOT 0x00001000 -#define DSI_LANE_STATUS_DLN0_DIRECTION 0x00010000 - -#define REG_DSI_LANE_CTRL 0x000000a8 -#define DSI_LANE_CTRL_HS_REQ_SEL_PHY 0x01000000 -#define DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST 0x10000000 - -#define REG_DSI_LANE_SWAP_CTRL 0x000000ac -#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK 0x00000007 -#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT 0 -static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val) -{ - return ((val) << DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT) & DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK; -} - -#define REG_DSI_ERR_INT_MASK0 0x00000108 - -#define REG_DSI_INTR_CTRL 0x0000010c - -#define REG_DSI_RESET 0x00000114 - -#define REG_DSI_CLK_CTRL 0x00000118 -#define DSI_CLK_CTRL_AHBS_HCLK_ON 0x00000001 -#define DSI_CLK_CTRL_AHBM_SCLK_ON 0x00000002 -#define DSI_CLK_CTRL_PCLK_ON 0x00000004 -#define DSI_CLK_CTRL_DSICLK_ON 0x00000008 -#define DSI_CLK_CTRL_BYTECLK_ON 0x00000010 -#define DSI_CLK_CTRL_ESCCLK_ON 0x00000020 -#define DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK 0x00000200 - -#define REG_DSI_CLK_STATUS 0x0000011c -#define DSI_CLK_STATUS_DSI_AON_AHBM_HCLK_ACTIVE 0x00000001 -#define DSI_CLK_STATUS_DSI_DYN_AHBM_HCLK_ACTIVE 0x00000002 -#define DSI_CLK_STATUS_DSI_AON_AHBS_HCLK_ACTIVE 0x00000004 -#define DSI_CLK_STATUS_DSI_DYN_AHBS_HCLK_ACTIVE 0x00000008 -#define DSI_CLK_STATUS_DSI_AON_DSICLK_ACTIVE 0x00000010 -#define DSI_CLK_STATUS_DSI_DYN_DSICLK_ACTIVE 0x00000020 -#define DSI_CLK_STATUS_DSI_AON_BYTECLK_ACTIVE 0x00000040 -#define DSI_CLK_STATUS_DSI_DYN_BYTECLK_ACTIVE 0x00000080 -#define DSI_CLK_STATUS_DSI_AON_ESCCLK_ACTIVE 0x00000100 -#define DSI_CLK_STATUS_DSI_AON_PCLK_ACTIVE 0x00000200 -#define DSI_CLK_STATUS_DSI_DYN_PCLK_ACTIVE 0x00000400 -#define DSI_CLK_STATUS_DSI_DYN_CMD_PCLK_ACTIVE 0x00001000 -#define DSI_CLK_STATUS_DSI_CMD_PCLK_ACTIVE 0x00002000 -#define DSI_CLK_STATUS_DSI_VID_PCLK_ACTIVE 0x00004000 -#define DSI_CLK_STATUS_DSI_CAM_BIST_PCLK_ACT 0x00008000 -#define DSI_CLK_STATUS_PLL_UNLOCKED 0x00010000 - -#define REG_DSI_PHY_RESET 0x00000128 -#define DSI_PHY_RESET_RESET 0x00000001 - -#define REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL 0x00000160 - -#define REG_DSI_TPG_MAIN_CONTROL 0x00000198 -#define DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN 0x00000100 - -#define REG_DSI_TPG_VIDEO_CONFIG 0x000001a0 -#define DSI_TPG_VIDEO_CONFIG_BPP__MASK 0x00000003 -#define DSI_TPG_VIDEO_CONFIG_BPP__SHIFT 0 -static inline uint32_t DSI_TPG_VIDEO_CONFIG_BPP(enum video_config_bpp val) -{ - return ((val) << DSI_TPG_VIDEO_CONFIG_BPP__SHIFT) & DSI_TPG_VIDEO_CONFIG_BPP__MASK; -} -#define DSI_TPG_VIDEO_CONFIG_RGB 0x00000004 - -#define REG_DSI_TEST_PATTERN_GEN_CTRL 0x00000158 -#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK 0x00030000 -#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT 16 -static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL(enum cmd_dma_pattern_sel val) -{ - return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK; -} -#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK 0x00000300 -#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT 8 -static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(enum cmd_mdp_stream0_pattern_sel val) -{ - return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK; -} -#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK 0x00000030 -#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT 4 -static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(enum video_pattern_sel val) -{ - return ((val) << DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK; -} -#define DSI_TEST_PATTERN_GEN_CTRL_TPG_DMA_FIFO_MODE 0x00000004 -#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_TPG_EN 0x00000002 -#define DSI_TEST_PATTERN_GEN_CTRL_EN 0x00000001 - -#define REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0 0x00000168 - -#define REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER 0x00000180 -#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER 0x00000001 - -#define REG_DSI_TPG_MAIN_CONTROL2 0x0000019c -#define DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN 0x00000080 -#define DSI_TPG_MAIN_CONTROL2_CMD_MDP1_CHECKERED_RECTANGLE_PATTERN 0x00010000 -#define DSI_TPG_MAIN_CONTROL2_CMD_MDP2_CHECKERED_RECTANGLE_PATTERN 0x02000000 - -#define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c -#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001 - -#define REG_DSI_CMD_MODE_MDP_CTRL2 0x000001b4 -#define DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__MASK 0x0000000f -#define DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__SHIFT 0 -static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2(enum dsi_cmd_dst_format val) -{ - return ((val) << DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__MASK; -} -#define DSI_CMD_MODE_MDP_CTRL2_R_SEL 0x00000010 -#define DSI_CMD_MODE_MDP_CTRL2_G_SEL 0x00000020 -#define DSI_CMD_MODE_MDP_CTRL2_B_SEL 0x00000040 -#define DSI_CMD_MODE_MDP_CTRL2_BYTE_MSB_LSB_FLIP 0x00000080 -#define DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__MASK 0x00000700 -#define DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__SHIFT 8 -static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP(enum dsi_rgb_swap val) -{ - return ((val) << DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__MASK; -} -#define DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__MASK 0x00007000 -#define DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__SHIFT 12 -static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP(enum dsi_rgb_swap val) -{ - return ((val) << DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__MASK; -} -#define DSI_CMD_MODE_MDP_CTRL2_BURST_MODE 0x00010000 -#define DSI_CMD_MODE_MDP_CTRL2_DATABUS_WIDEN 0x00100000 - -#define REG_DSI_CMD_MODE_MDP_STREAM2_CTRL 0x000001b8 -#define DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__MASK 0x0000003f -#define DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__SHIFT 0 -static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE(uint32_t val) -{ - return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__MASK; -} -#define DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300 -#define DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__SHIFT 8 -static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL(uint32_t val) -{ - return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__MASK; -} -#define DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__MASK 0xffff0000 -#define DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__SHIFT 16 -static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT(uint32_t val) -{ - return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__MASK; -} - -#define REG_DSI_RDBK_DATA_CTRL 0x000001d0 -#define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000 -#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16 -static inline uint32_t DSI_RDBK_DATA_CTRL_COUNT(uint32_t val) -{ - return ((val) << DSI_RDBK_DATA_CTRL_COUNT__SHIFT) & DSI_RDBK_DATA_CTRL_COUNT__MASK; -} -#define DSI_RDBK_DATA_CTRL_CLR 0x00000001 - -#define REG_DSI_VERSION 0x000001f0 -#define DSI_VERSION_MAJOR__MASK 0xff000000 -#define DSI_VERSION_MAJOR__SHIFT 24 -static inline uint32_t DSI_VERSION_MAJOR(uint32_t val) -{ - return ((val) << DSI_VERSION_MAJOR__SHIFT) & DSI_VERSION_MAJOR__MASK; -} - -#define REG_DSI_CPHY_MODE_CTRL 0x000002d4 - -#define REG_DSI_VIDEO_COMPRESSION_MODE_CTRL 0x0000029c -#define DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__MASK 0xffff0000 -#define DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__SHIFT 16 -static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_WC(uint32_t val) -{ - return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__MASK; -} -#define DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__MASK 0x00003f00 -#define DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__SHIFT 8 -static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE(uint32_t val) -{ - return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__MASK; -} -#define DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__MASK 0x000000c0 -#define DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__SHIFT 6 -static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE(uint32_t val) -{ - return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__MASK; -} -#define DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__MASK 0x00000030 -#define DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__SHIFT 4 -static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM(uint32_t val) -{ - return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__MASK; -} -#define DSI_VIDEO_COMPRESSION_MODE_CTRL_EN 0x00000001 - -#define REG_DSI_COMMAND_COMPRESSION_MODE_CTRL 0x000002a4 -#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__MASK 0x3f000000 -#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__SHIFT 24 -static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE(uint32_t val) -{ - return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__MASK; -} -#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__MASK 0x00c00000 -#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__SHIFT 22 -static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE(uint32_t val) -{ - return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__MASK; -} -#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__MASK 0x00300000 -#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__SHIFT 20 -static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM(uint32_t val) -{ - return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__MASK; -} -#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EN 0x00010000 -#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__MASK 0x00003f00 -#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__SHIFT 8 -static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(uint32_t val) -{ - return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__MASK; -} -#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__MASK 0x000000c0 -#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__SHIFT 6 -static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE(uint32_t val) -{ - return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__MASK; -} -#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__MASK 0x00000030 -#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__SHIFT 4 -static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM(uint32_t val) -{ - return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__MASK; -} -#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EN 0x00000001 - -#define REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2 0x000002a8 -#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__MASK 0xffff0000 -#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__SHIFT 16 -static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH(uint32_t val) -{ - return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__MASK; -} -#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK 0x0000ffff -#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__SHIFT 0 -static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(uint32_t val) -{ - return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK; -} - - -#endif /* DSI_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index c80be74cf1..7252d36687 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -55,7 +55,7 @@ static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) * scratch register which we never touch) */ - ver = msm_readl(base + REG_DSI_VERSION); + ver = readl(base + REG_DSI_VERSION); if (ver) { /* older dsi host, there is no register shift */ ver = FIELD(ver, DSI_VERSION_MAJOR); @@ -73,12 +73,12 @@ static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) * registers are shifted down, read DSI_VERSION again with * the shifted offset */ - ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION); + ver = readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION); ver = FIELD(ver, DSI_VERSION_MAJOR); if (ver == MSM_DSI_VER_MAJOR_6G) { /* 6G version */ *major = ver; - *minor = msm_readl(base + REG_DSI_6G_HW_VERSION); + *minor = readl(base + REG_DSI_6G_HW_VERSION); return 0; } else { return -EINVAL; @@ -186,11 +186,11 @@ struct msm_dsi_host { static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg) { - return msm_readl(msm_host->ctrl_base + reg); + return readl(msm_host->ctrl_base + reg); } static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data) { - msm_writel(data, msm_host->ctrl_base + reg); + writel(data, msm_host->ctrl_base + reg); } static const struct msm_dsi_cfg_handler *dsi_get_config( @@ -754,6 +754,8 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host, data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags)); data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt)); data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel); + if (msm_dsi_host_is_wide_bus_enabled(&msm_host->base)) + data |= DSI_VID_CFG0_DATABUS_WIDEN; dsi_write(msm_host, REG_DSI_VID_CFG0, data); /* Do not swap RGB colors */ @@ -778,7 +780,6 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host, if (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_3) data |= DSI_CMD_MODE_MDP_CTRL2_BURST_MODE; - /* TODO: Allow for video-mode support once tested/fixed */ if (msm_dsi_host_is_wide_bus_enabled(&msm_host->base)) data |= DSI_CMD_MODE_MDP_CTRL2_DATABUS_WIDEN; @@ -856,6 +857,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod u32 slice_per_intf, total_bytes_per_intf; u32 pkt_per_line; u32 eol_byte_num; + u32 bytes_per_pkt; /* first calculate dsc parameters and then program * compress mode registers @@ -863,6 +865,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay); total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf; + bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */ eol_byte_num = total_bytes_per_intf % 3; @@ -900,6 +903,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl); dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2); } else { + reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_WC(bytes_per_pkt); dsi_write(msm_host, REG_DSI_VIDEO_COMPRESSION_MODE_CTRL, reg); } } diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index af2a287cb3..5b3f3068fd 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -423,7 +423,18 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge, return msm_dsi_host_check_dsc(host, mode); } +static int dsi_mgr_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + int id = dsi_mgr_bridge_get_id(bridge); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + + return drm_bridge_attach(bridge->encoder, msm_dsi->next_bridge, + bridge, flags); +} + static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = { + .attach = dsi_mgr_bridge_attach, .pre_enable = dsi_mgr_bridge_pre_enable, .post_disable = dsi_mgr_bridge_post_disable, .mode_set = dsi_mgr_bridge_mode_set, @@ -431,17 +442,19 @@ static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = { }; /* initialize bridge */ -struct drm_bridge *msm_dsi_manager_bridge_init(struct msm_dsi *msm_dsi, - struct drm_encoder *encoder) +int msm_dsi_manager_connector_init(struct msm_dsi *msm_dsi, + struct drm_encoder *encoder) { + struct drm_device *dev = msm_dsi->dev; struct drm_bridge *bridge; struct dsi_bridge *dsi_bridge; + struct drm_connector *connector; int ret; dsi_bridge = devm_kzalloc(msm_dsi->dev->dev, sizeof(*dsi_bridge), GFP_KERNEL); if (!dsi_bridge) - return ERR_PTR(-ENOMEM); + return -ENOMEM; dsi_bridge->id = msm_dsi->id; @@ -450,60 +463,22 @@ struct drm_bridge *msm_dsi_manager_bridge_init(struct msm_dsi *msm_dsi, ret = devm_drm_bridge_add(msm_dsi->dev->dev, bridge); if (ret) - return ERR_PTR(ret); + return ret; - ret = drm_bridge_attach(encoder, bridge, NULL, 0); + ret = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) - return ERR_PTR(ret); - - return bridge; -} - -int msm_dsi_manager_ext_bridge_init(u8 id, struct drm_bridge *int_bridge) -{ - struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); - struct drm_device *dev = msm_dsi->dev; - struct drm_encoder *encoder; - struct drm_bridge *ext_bridge; - int ret; + return ret; - ext_bridge = devm_drm_of_get_bridge(&msm_dsi->pdev->dev, - msm_dsi->pdev->dev.of_node, 1, 0); - if (IS_ERR(ext_bridge)) - return PTR_ERR(ext_bridge); - - encoder = int_bridge->encoder; - - /* - * Try first to create the bridge without it creating its own - * connector.. currently some bridges support this, and others - * do not (and some support both modes) - */ - ret = drm_bridge_attach(encoder, ext_bridge, int_bridge, - DRM_BRIDGE_ATTACH_NO_CONNECTOR); - if (ret == -EINVAL) { - /* - * link the internal dsi bridge to the external bridge, - * connector is created by the next bridge. - */ - ret = drm_bridge_attach(encoder, ext_bridge, int_bridge, 0); - if (ret < 0) - return ret; - } else { - struct drm_connector *connector; - - /* We are in charge of the connector, create one now. */ - connector = drm_bridge_connector_init(dev, encoder); - if (IS_ERR(connector)) { - DRM_ERROR("Unable to create bridge connector\n"); - return PTR_ERR(connector); - } - - ret = drm_connector_attach_encoder(connector, encoder); - if (ret < 0) - return ret; + connector = drm_bridge_connector_init(dev, encoder); + if (IS_ERR(connector)) { + DRM_ERROR("Unable to create bridge connector\n"); + return PTR_ERR(connector); } + ret = drm_connector_attach_encoder(connector, encoder); + if (ret < 0) + return ret; + return 0; } diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h deleted file mode 100644 index a2ae8777e5..0000000000 --- a/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h +++ /dev/null @@ -1,227 +0,0 @@ -#ifndef DSI_PHY_10NM_XML -#define DSI_PHY_10NM_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) - -Copyright (C) 2013-2022 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -#define REG_DSI_10nm_PHY_CMN_REVISION_ID0 0x00000000 - -#define REG_DSI_10nm_PHY_CMN_REVISION_ID1 0x00000004 - -#define REG_DSI_10nm_PHY_CMN_REVISION_ID2 0x00000008 - -#define REG_DSI_10nm_PHY_CMN_REVISION_ID3 0x0000000c - -#define REG_DSI_10nm_PHY_CMN_CLK_CFG0 0x00000010 - -#define REG_DSI_10nm_PHY_CMN_CLK_CFG1 0x00000014 - -#define REG_DSI_10nm_PHY_CMN_GLBL_CTRL 0x00000018 - -#define REG_DSI_10nm_PHY_CMN_RBUF_CTRL 0x0000001c - -#define REG_DSI_10nm_PHY_CMN_VREG_CTRL 0x00000020 - -#define REG_DSI_10nm_PHY_CMN_CTRL_0 0x00000024 - -#define REG_DSI_10nm_PHY_CMN_CTRL_1 0x00000028 - -#define REG_DSI_10nm_PHY_CMN_CTRL_2 0x0000002c - -#define REG_DSI_10nm_PHY_CMN_LANE_CFG0 0x00000030 - -#define REG_DSI_10nm_PHY_CMN_LANE_CFG1 0x00000034 - -#define REG_DSI_10nm_PHY_CMN_PLL_CNTRL 0x00000038 - -#define REG_DSI_10nm_PHY_CMN_LANE_CTRL0 0x00000098 - -#define REG_DSI_10nm_PHY_CMN_LANE_CTRL1 0x0000009c - -#define REG_DSI_10nm_PHY_CMN_LANE_CTRL2 0x000000a0 - -#define REG_DSI_10nm_PHY_CMN_LANE_CTRL3 0x000000a4 - -#define REG_DSI_10nm_PHY_CMN_LANE_CTRL4 0x000000a8 - -#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0 0x000000ac - -#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1 0x000000b0 - -#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2 0x000000b4 - -#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3 0x000000b8 - -#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4 0x000000bc - -#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5 0x000000c0 - -#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6 0x000000c4 - -#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7 0x000000c8 - -#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8 0x000000cc - -#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9 0x000000d0 - -#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10 0x000000d4 - -#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11 0x000000d8 - -#define REG_DSI_10nm_PHY_CMN_PHY_STATUS 0x000000ec - -#define REG_DSI_10nm_PHY_CMN_LANE_STATUS0 0x000000f4 - -#define REG_DSI_10nm_PHY_CMN_LANE_STATUS1 0x000000f8 - -static inline uint32_t REG_DSI_10nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; } - -static inline uint32_t REG_DSI_10nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; } - -static inline uint32_t REG_DSI_10nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; } - -static inline uint32_t REG_DSI_10nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; } - -static inline uint32_t REG_DSI_10nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; } - -static inline uint32_t REG_DSI_10nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; } - -static inline uint32_t REG_DSI_10nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000014 + 0x80*i0; } - -static inline uint32_t REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; } - -static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(uint32_t i0) { return 0x0000001c + 0x80*i0; } - -static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(uint32_t i0) { return 0x00000020 + 0x80*i0; } - -static inline uint32_t REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(uint32_t i0) { return 0x00000024 + 0x80*i0; } - -static inline uint32_t REG_DSI_10nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000028 + 0x80*i0; } - -static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000002c + 0x80*i0; } - -#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000 - -#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004 - -#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010 - -#define REG_DSI_10nm_PHY_PLL_DSM_DIVIDER 0x0000001c - -#define REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000020 - -#define REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES 0x00000024 - -#define REG_DSI_10nm_PHY_PLL_CMODE 0x0000002c - -#define REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000030 - -#define REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000054 - -#define REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000064 - -#define REG_DSI_10nm_PHY_PLL_PFILT 0x0000007c - -#define REG_DSI_10nm_PHY_PLL_IFILT 0x00000080 - -#define REG_DSI_10nm_PHY_PLL_OUTDIV 0x00000094 - -#define REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE 0x000000a4 - -#define REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000a8 - -#define REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000b4 - -#define REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000cc - -#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000d0 - -#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000d4 - -#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000d8 - -#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x0000010c - -#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000110 - -#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000114 - -#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x00000118 - -#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1 0x0000011c - -#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1 0x00000120 - -#define REG_DSI_10nm_PHY_PLL_SSC_CONTROL 0x0000013c - -#define REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000140 - -#define REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000144 - -#define REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x0000014c - -#define REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1 0x00000154 - -#define REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x0000015c - -#define REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000164 - -#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000180 - -#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY 0x00000184 - -#define REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS 0x0000018c - -#define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE 0x000001a0 - - -#endif /* DSI_PHY_10NM_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h deleted file mode 100644 index 24e2fdc0cd..0000000000 --- a/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h +++ /dev/null @@ -1,309 +0,0 @@ -#ifndef DSI_PHY_14NM_XML -#define DSI_PHY_14NM_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) - -Copyright (C) 2013-2022 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -#define REG_DSI_14nm_PHY_CMN_REVISION_ID0 0x00000000 - -#define REG_DSI_14nm_PHY_CMN_REVISION_ID1 0x00000004 - -#define REG_DSI_14nm_PHY_CMN_REVISION_ID2 0x00000008 - -#define REG_DSI_14nm_PHY_CMN_REVISION_ID3 0x0000000c - -#define REG_DSI_14nm_PHY_CMN_CLK_CFG0 0x00000010 -#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK 0x000000f0 -#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT 4 -static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(uint32_t val) -{ - return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK; -} -#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK 0x000000f0 -#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT 4 -static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(uint32_t val) -{ - return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK; -} - -#define REG_DSI_14nm_PHY_CMN_CLK_CFG1 0x00000014 -#define DSI_14nm_PHY_CMN_CLK_CFG1_DSICLK_SEL 0x00000001 - -#define REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL 0x00000018 -#define DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000004 - -#define REG_DSI_14nm_PHY_CMN_CTRL_0 0x0000001c - -#define REG_DSI_14nm_PHY_CMN_CTRL_1 0x00000020 - -#define REG_DSI_14nm_PHY_CMN_HW_TRIGGER 0x00000024 - -#define REG_DSI_14nm_PHY_CMN_SW_CFG0 0x00000028 - -#define REG_DSI_14nm_PHY_CMN_SW_CFG1 0x0000002c - -#define REG_DSI_14nm_PHY_CMN_SW_CFG2 0x00000030 - -#define REG_DSI_14nm_PHY_CMN_HW_CFG0 0x00000034 - -#define REG_DSI_14nm_PHY_CMN_HW_CFG1 0x00000038 - -#define REG_DSI_14nm_PHY_CMN_HW_CFG2 0x0000003c - -#define REG_DSI_14nm_PHY_CMN_HW_CFG3 0x00000040 - -#define REG_DSI_14nm_PHY_CMN_HW_CFG4 0x00000044 - -#define REG_DSI_14nm_PHY_CMN_PLL_CNTRL 0x00000048 -#define DSI_14nm_PHY_CMN_PLL_CNTRL_PLL_START 0x00000001 - -#define REG_DSI_14nm_PHY_CMN_LDO_CNTRL 0x0000004c -#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK 0x0000003f -#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT 0 -static inline uint32_t DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(uint32_t val) -{ - return ((val) << DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT) & DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK; -} - -static inline uint32_t REG_DSI_14nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; } - -static inline uint32_t REG_DSI_14nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; } -#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK 0x000000c0 -#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT 6 -static inline uint32_t DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(uint32_t val) -{ - return ((val) << DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT) & DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK; -} - -static inline uint32_t REG_DSI_14nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; } -#define DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN 0x00000001 - -static inline uint32_t REG_DSI_14nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; } - -static inline uint32_t REG_DSI_14nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; } - -static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; } - -static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_STR(uint32_t i0) { return 0x00000014 + 0x80*i0; } - -static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(uint32_t i0) { return 0x00000018 + 0x80*i0; } -#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff -#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT 0 -static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(uint32_t val) -{ - return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK; -} - -static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(uint32_t i0) { return 0x0000001c + 0x80*i0; } -#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff -#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT 0 -static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(uint32_t val) -{ - return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK; -} - -static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(uint32_t i0) { return 0x00000020 + 0x80*i0; } -#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff -#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT 0 -static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(uint32_t val) -{ - return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK; -} - -static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(uint32_t i0) { return 0x00000024 + 0x80*i0; } -#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff -#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT 0 -static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(uint32_t val) -{ - return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK; -} - -static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(uint32_t i0) { return 0x00000028 + 0x80*i0; } -#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff -#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT 0 -static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(uint32_t val) -{ - return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK; -} - -static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(uint32_t i0) { return 0x0000002c + 0x80*i0; } -#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK 0x00000007 -#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT 0 -static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(uint32_t val) -{ - return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK; -} -#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK 0x00000070 -#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT 4 -static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(uint32_t val) -{ - return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK; -} - -static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(uint32_t i0) { return 0x00000030 + 0x80*i0; } -#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK 0x00000007 -#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT 0 -static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(uint32_t val) -{ - return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK; -} - -static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(uint32_t i0) { return 0x00000034 + 0x80*i0; } -#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff -#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0 -static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) -{ - return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK; -} - -static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(uint32_t i0) { return 0x00000038 + 0x80*i0; } - -static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(uint32_t i0) { return 0x0000003c + 0x80*i0; } - -static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00000064 + 0x80*i0; } - -#define REG_DSI_14nm_PHY_PLL_IE_TRIM 0x00000000 - -#define REG_DSI_14nm_PHY_PLL_IP_TRIM 0x00000004 - -#define REG_DSI_14nm_PHY_PLL_IPTAT_TRIM 0x00000010 - -#define REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN 0x0000001c - -#define REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET 0x00000028 - -#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL 0x0000002c - -#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2 0x00000030 - -#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL3 0x00000034 - -#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL4 0x00000038 - -#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5 0x0000003c - -#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1 0x00000040 - -#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2 0x00000044 - -#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT1 0x00000048 - -#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT2 0x0000004c - -#define REG_DSI_14nm_PHY_PLL_VREF_CFG1 0x0000005c - -#define REG_DSI_14nm_PHY_PLL_KVCO_CODE 0x00000058 - -#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1 0x0000006c - -#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2 0x00000070 - -#define REG_DSI_14nm_PHY_PLL_VCO_COUNT1 0x00000074 - -#define REG_DSI_14nm_PHY_PLL_VCO_COUNT2 0x00000078 - -#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1 0x0000007c - -#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2 0x00000080 - -#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3 0x00000084 - -#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN 0x00000088 - -#define REG_DSI_14nm_PHY_PLL_PLL_VCO_TUNE 0x0000008c - -#define REG_DSI_14nm_PHY_PLL_DEC_START 0x00000090 - -#define REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER 0x00000094 - -#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1 0x00000098 - -#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2 0x0000009c - -#define REG_DSI_14nm_PHY_PLL_SSC_PER1 0x000000a0 - -#define REG_DSI_14nm_PHY_PLL_SSC_PER2 0x000000a4 - -#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1 0x000000a8 - -#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2 0x000000ac - -#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1 0x000000b4 - -#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2 0x000000b8 - -#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3 0x000000bc - -#define REG_DSI_14nm_PHY_PLL_TXCLK_EN 0x000000c0 - -#define REG_DSI_14nm_PHY_PLL_PLL_CRCTRL 0x000000c4 - -#define REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS 0x000000cc - -#define REG_DSI_14nm_PHY_PLL_PLL_MISC1 0x000000e8 - -#define REG_DSI_14nm_PHY_PLL_CP_SET_CUR 0x000000f0 - -#define REG_DSI_14nm_PHY_PLL_PLL_ICPMSET 0x000000f4 - -#define REG_DSI_14nm_PHY_PLL_PLL_ICPCSET 0x000000f8 - -#define REG_DSI_14nm_PHY_PLL_PLL_ICP_SET 0x000000fc - -#define REG_DSI_14nm_PHY_PLL_PLL_LPF1 0x00000100 - -#define REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV 0x00000104 - -#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108 - - -#endif /* DSI_PHY_14NM_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h deleted file mode 100644 index 6352541f37..0000000000 --- a/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h +++ /dev/null @@ -1,237 +0,0 @@ -#ifndef DSI_PHY_20NM_XML -#define DSI_PHY_20NM_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) - -Copyright (C) 2013-2022 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -static inline uint32_t REG_DSI_20nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; } - -static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; } - -static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; } - -static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; } - -static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; } - -static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; } - -static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; } - -static inline uint32_t REG_DSI_20nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; } - -static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; } - -static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; } - -#define REG_DSI_20nm_PHY_LNCK_CFG_0 0x00000100 - -#define REG_DSI_20nm_PHY_LNCK_CFG_1 0x00000104 - -#define REG_DSI_20nm_PHY_LNCK_CFG_2 0x00000108 - -#define REG_DSI_20nm_PHY_LNCK_CFG_3 0x0000010c - -#define REG_DSI_20nm_PHY_LNCK_CFG_4 0x00000110 - -#define REG_DSI_20nm_PHY_LNCK_TEST_DATAPATH 0x00000114 - -#define REG_DSI_20nm_PHY_LNCK_DEBUG_SEL 0x00000118 - -#define REG_DSI_20nm_PHY_LNCK_TEST_STR0 0x0000011c - -#define REG_DSI_20nm_PHY_LNCK_TEST_STR1 0x00000120 - -#define REG_DSI_20nm_PHY_TIMING_CTRL_0 0x00000140 -#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff -#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0 -static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val) -{ - return ((val) << DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK; -} - -#define REG_DSI_20nm_PHY_TIMING_CTRL_1 0x00000144 -#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff -#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0 -static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val) -{ - return ((val) << DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK; -} - -#define REG_DSI_20nm_PHY_TIMING_CTRL_2 0x00000148 -#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff -#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0 -static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val) -{ - return ((val) << DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK; -} - -#define REG_DSI_20nm_PHY_TIMING_CTRL_3 0x0000014c -#define DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001 - -#define REG_DSI_20nm_PHY_TIMING_CTRL_4 0x00000150 -#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff -#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0 -static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val) -{ - return ((val) << DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK; -} - -#define REG_DSI_20nm_PHY_TIMING_CTRL_5 0x00000154 -#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff -#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0 -static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val) -{ - return ((val) << DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK; -} - -#define REG_DSI_20nm_PHY_TIMING_CTRL_6 0x00000158 -#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff -#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0 -static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val) -{ - return ((val) << DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK; -} - -#define REG_DSI_20nm_PHY_TIMING_CTRL_7 0x0000015c -#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff -#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0 -static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val) -{ - return ((val) << DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK; -} - -#define REG_DSI_20nm_PHY_TIMING_CTRL_8 0x00000160 -#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff -#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0 -static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val) -{ - return ((val) << DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK; -} - -#define REG_DSI_20nm_PHY_TIMING_CTRL_9 0x00000164 -#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007 -#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0 -static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val) -{ - return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK; -} -#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070 -#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4 -static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val) -{ - return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK; -} - -#define REG_DSI_20nm_PHY_TIMING_CTRL_10 0x00000168 -#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007 -#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0 -static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val) -{ - return ((val) << DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK; -} - -#define REG_DSI_20nm_PHY_TIMING_CTRL_11 0x0000016c -#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff -#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0 -static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) -{ - return ((val) << DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK; -} - -#define REG_DSI_20nm_PHY_CTRL_0 0x00000170 - -#define REG_DSI_20nm_PHY_CTRL_1 0x00000174 - -#define REG_DSI_20nm_PHY_CTRL_2 0x00000178 - -#define REG_DSI_20nm_PHY_CTRL_3 0x0000017c - -#define REG_DSI_20nm_PHY_CTRL_4 0x00000180 - -#define REG_DSI_20nm_PHY_STRENGTH_0 0x00000184 - -#define REG_DSI_20nm_PHY_STRENGTH_1 0x00000188 - -#define REG_DSI_20nm_PHY_BIST_CTRL_0 0x000001b4 - -#define REG_DSI_20nm_PHY_BIST_CTRL_1 0x000001b8 - -#define REG_DSI_20nm_PHY_BIST_CTRL_2 0x000001bc - -#define REG_DSI_20nm_PHY_BIST_CTRL_3 0x000001c0 - -#define REG_DSI_20nm_PHY_BIST_CTRL_4 0x000001c4 - -#define REG_DSI_20nm_PHY_BIST_CTRL_5 0x000001c8 - -#define REG_DSI_20nm_PHY_GLBL_TEST_CTRL 0x000001d4 -#define DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001 - -#define REG_DSI_20nm_PHY_LDO_CNTRL 0x000001dc - -#define REG_DSI_20nm_PHY_REGULATOR_CTRL_0 0x00000000 - -#define REG_DSI_20nm_PHY_REGULATOR_CTRL_1 0x00000004 - -#define REG_DSI_20nm_PHY_REGULATOR_CTRL_2 0x00000008 - -#define REG_DSI_20nm_PHY_REGULATOR_CTRL_3 0x0000000c - -#define REG_DSI_20nm_PHY_REGULATOR_CTRL_4 0x00000010 - -#define REG_DSI_20nm_PHY_REGULATOR_CTRL_5 0x00000014 - -#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018 - - -#endif /* DSI_PHY_20NM_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h deleted file mode 100644 index 178bd4fd78..0000000000 --- a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h +++ /dev/null @@ -1,384 +0,0 @@ -#ifndef DSI_PHY_28NM_XML -#define DSI_PHY_28NM_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) - -Copyright (C) 2013-2022 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -static inline uint32_t REG_DSI_28nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; } - -static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; } - -static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; } - -static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; } - -static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; } - -static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; } - -static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; } - -static inline uint32_t REG_DSI_28nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; } - -static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; } - -static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; } - -#define REG_DSI_28nm_PHY_LNCK_CFG_0 0x00000100 - -#define REG_DSI_28nm_PHY_LNCK_CFG_1 0x00000104 - -#define REG_DSI_28nm_PHY_LNCK_CFG_2 0x00000108 - -#define REG_DSI_28nm_PHY_LNCK_CFG_3 0x0000010c - -#define REG_DSI_28nm_PHY_LNCK_CFG_4 0x00000110 - -#define REG_DSI_28nm_PHY_LNCK_TEST_DATAPATH 0x00000114 - -#define REG_DSI_28nm_PHY_LNCK_DEBUG_SEL 0x00000118 - -#define REG_DSI_28nm_PHY_LNCK_TEST_STR0 0x0000011c - -#define REG_DSI_28nm_PHY_LNCK_TEST_STR1 0x00000120 - -#define REG_DSI_28nm_PHY_TIMING_CTRL_0 0x00000140 -#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff -#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0 -static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK; -} - -#define REG_DSI_28nm_PHY_TIMING_CTRL_1 0x00000144 -#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff -#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0 -static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK; -} - -#define REG_DSI_28nm_PHY_TIMING_CTRL_2 0x00000148 -#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff -#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0 -static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK; -} - -#define REG_DSI_28nm_PHY_TIMING_CTRL_3 0x0000014c -#define DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001 - -#define REG_DSI_28nm_PHY_TIMING_CTRL_4 0x00000150 -#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff -#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0 -static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK; -} - -#define REG_DSI_28nm_PHY_TIMING_CTRL_5 0x00000154 -#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff -#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0 -static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK; -} - -#define REG_DSI_28nm_PHY_TIMING_CTRL_6 0x00000158 -#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff -#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0 -static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK; -} - -#define REG_DSI_28nm_PHY_TIMING_CTRL_7 0x0000015c -#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff -#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0 -static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK; -} - -#define REG_DSI_28nm_PHY_TIMING_CTRL_8 0x00000160 -#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff -#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0 -static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK; -} - -#define REG_DSI_28nm_PHY_TIMING_CTRL_9 0x00000164 -#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007 -#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0 -static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK; -} -#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070 -#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4 -static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK; -} - -#define REG_DSI_28nm_PHY_TIMING_CTRL_10 0x00000168 -#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007 -#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0 -static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK; -} - -#define REG_DSI_28nm_PHY_TIMING_CTRL_11 0x0000016c -#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff -#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0 -static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK; -} - -#define REG_DSI_28nm_PHY_CTRL_0 0x00000170 - -#define REG_DSI_28nm_PHY_CTRL_1 0x00000174 - -#define REG_DSI_28nm_PHY_CTRL_2 0x00000178 - -#define REG_DSI_28nm_PHY_CTRL_3 0x0000017c - -#define REG_DSI_28nm_PHY_CTRL_4 0x00000180 - -#define REG_DSI_28nm_PHY_STRENGTH_0 0x00000184 - -#define REG_DSI_28nm_PHY_STRENGTH_1 0x00000188 - -#define REG_DSI_28nm_PHY_BIST_CTRL_0 0x000001b4 - -#define REG_DSI_28nm_PHY_BIST_CTRL_1 0x000001b8 - -#define REG_DSI_28nm_PHY_BIST_CTRL_2 0x000001bc - -#define REG_DSI_28nm_PHY_BIST_CTRL_3 0x000001c0 - -#define REG_DSI_28nm_PHY_BIST_CTRL_4 0x000001c4 - -#define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8 - -#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4 -#define DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001 - -#define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc - -#define REG_DSI_28nm_PHY_REGULATOR_CTRL_0 0x00000000 - -#define REG_DSI_28nm_PHY_REGULATOR_CTRL_1 0x00000004 - -#define REG_DSI_28nm_PHY_REGULATOR_CTRL_2 0x00000008 - -#define REG_DSI_28nm_PHY_REGULATOR_CTRL_3 0x0000000c - -#define REG_DSI_28nm_PHY_REGULATOR_CTRL_4 0x00000010 - -#define REG_DSI_28nm_PHY_REGULATOR_CTRL_5 0x00000014 - -#define REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018 - -#define REG_DSI_28nm_PHY_PLL_REFCLK_CFG 0x00000000 -#define DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR 0x00000001 - -#define REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004 - -#define REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008 - -#define REG_DSI_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c - -#define REG_DSI_28nm_PHY_PLL_VREG_CFG 0x00000010 -#define DSI_28nm_PHY_PLL_VREG_CFG_POSTDIV1_BYPASS_B 0x00000002 - -#define REG_DSI_28nm_PHY_PLL_PWRGEN_CFG 0x00000014 - -#define REG_DSI_28nm_PHY_PLL_DMUX_CFG 0x00000018 - -#define REG_DSI_28nm_PHY_PLL_AMUX_CFG 0x0000001c - -#define REG_DSI_28nm_PHY_PLL_GLB_CFG 0x00000020 -#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001 -#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002 -#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004 -#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008 - -#define REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024 - -#define REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028 - -#define REG_DSI_28nm_PHY_PLL_LPFR_CFG 0x0000002c - -#define REG_DSI_28nm_PHY_PLL_LPFC1_CFG 0x00000030 - -#define REG_DSI_28nm_PHY_PLL_LPFC2_CFG 0x00000034 - -#define REG_DSI_28nm_PHY_PLL_SDM_CFG0 0x00000038 -#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK 0x0000003f -#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT 0 -static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK; -} -#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP 0x00000040 - -#define REG_DSI_28nm_PHY_PLL_SDM_CFG1 0x0000003c -#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK 0x0000003f -#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT 0 -static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK; -} -#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK 0x00000040 -#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT 6 -static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK; -} - -#define REG_DSI_28nm_PHY_PLL_SDM_CFG2 0x00000040 -#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK 0x000000ff -#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT 0 -static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK; -} - -#define REG_DSI_28nm_PHY_PLL_SDM_CFG3 0x00000044 -#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK 0x000000ff -#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT 0 -static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(uint32_t val) -{ - return ((val) << DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK; -} - -#define REG_DSI_28nm_PHY_PLL_SDM_CFG4 0x00000048 - -#define REG_DSI_28nm_PHY_PLL_SSC_CFG0 0x0000004c - -#define REG_DSI_28nm_PHY_PLL_SSC_CFG1 0x00000050 - -#define REG_DSI_28nm_PHY_PLL_SSC_CFG2 0x00000054 - -#define REG_DSI_28nm_PHY_PLL_SSC_CFG3 0x00000058 - -#define REG_DSI_28nm_PHY_PLL_LKDET_CFG0 0x0000005c - -#define REG_DSI_28nm_PHY_PLL_LKDET_CFG1 0x00000060 - -#define REG_DSI_28nm_PHY_PLL_LKDET_CFG2 0x00000064 - -#define REG_DSI_28nm_PHY_PLL_TEST_CFG 0x00000068 -#define DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001 - -#define REG_DSI_28nm_PHY_PLL_CAL_CFG0 0x0000006c - -#define REG_DSI_28nm_PHY_PLL_CAL_CFG1 0x00000070 - -#define REG_DSI_28nm_PHY_PLL_CAL_CFG2 0x00000074 - -#define REG_DSI_28nm_PHY_PLL_CAL_CFG3 0x00000078 - -#define REG_DSI_28nm_PHY_PLL_CAL_CFG4 0x0000007c - -#define REG_DSI_28nm_PHY_PLL_CAL_CFG5 0x00000080 - -#define REG_DSI_28nm_PHY_PLL_CAL_CFG6 0x00000084 - -#define REG_DSI_28nm_PHY_PLL_CAL_CFG7 0x00000088 - -#define REG_DSI_28nm_PHY_PLL_CAL_CFG8 0x0000008c - -#define REG_DSI_28nm_PHY_PLL_CAL_CFG9 0x00000090 - -#define REG_DSI_28nm_PHY_PLL_CAL_CFG10 0x00000094 - -#define REG_DSI_28nm_PHY_PLL_CAL_CFG11 0x00000098 - -#define REG_DSI_28nm_PHY_PLL_EFUSE_CFG 0x0000009c - -#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0 - -#define REG_DSI_28nm_PHY_PLL_CTRL_42 0x000000a4 - -#define REG_DSI_28nm_PHY_PLL_CTRL_43 0x000000a8 - -#define REG_DSI_28nm_PHY_PLL_CTRL_44 0x000000ac - -#define REG_DSI_28nm_PHY_PLL_CTRL_45 0x000000b0 - -#define REG_DSI_28nm_PHY_PLL_CTRL_46 0x000000b4 - -#define REG_DSI_28nm_PHY_PLL_CTRL_47 0x000000b8 - -#define REG_DSI_28nm_PHY_PLL_CTRL_48 0x000000bc - -#define REG_DSI_28nm_PHY_PLL_STATUS 0x000000c0 -#define DSI_28nm_PHY_PLL_STATUS_PLL_RDY 0x00000001 - -#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS0 0x000000c4 - -#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS1 0x000000c8 - -#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS2 0x000000cc - -#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS3 0x000000d0 - -#define REG_DSI_28nm_PHY_PLL_CTRL_54 0x000000d4 - - -#endif /* DSI_PHY_28NM_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h deleted file mode 100644 index 5f900bb535..0000000000 --- a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h +++ /dev/null @@ -1,286 +0,0 @@ -#ifndef DSI_PHY_28NM_8960_XML -#define DSI_PHY_28NM_8960_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) - -Copyright (C) 2013-2022 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -static inline uint32_t REG_DSI_28nm_8960_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; } - -static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; } - -static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; } - -static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; } - -static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x40*i0; } - -static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x00000014 + 0x40*i0; } - -static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000018 + 0x40*i0; } - -#define REG_DSI_28nm_8960_PHY_LNCK_CFG_0 0x00000100 - -#define REG_DSI_28nm_8960_PHY_LNCK_CFG_1 0x00000104 - -#define REG_DSI_28nm_8960_PHY_LNCK_CFG_2 0x00000108 - -#define REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH 0x0000010c - -#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0 0x00000114 - -#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1 0x00000118 - -#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_0 0x00000140 -#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff -#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0 -static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val) -{ - return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK; -} - -#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_1 0x00000144 -#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff -#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0 -static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val) -{ - return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK; -} - -#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_2 0x00000148 -#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff -#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0 -static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val) -{ - return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK; -} - -#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_3 0x0000014c - -#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_4 0x00000150 -#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff -#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0 -static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val) -{ - return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK; -} - -#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_5 0x00000154 -#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff -#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0 -static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val) -{ - return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK; -} - -#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_6 0x00000158 -#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff -#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0 -static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val) -{ - return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK; -} - -#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_7 0x0000015c -#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff -#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0 -static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val) -{ - return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK; -} - -#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_8 0x00000160 -#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff -#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0 -static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val) -{ - return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK; -} - -#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_9 0x00000164 -#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007 -#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0 -static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(uint32_t val) -{ - return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK; -} -#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070 -#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4 -static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val) -{ - return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK; -} - -#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_10 0x00000168 -#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007 -#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0 -static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(uint32_t val) -{ - return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK; -} - -#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_11 0x0000016c -#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff -#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0 -static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) -{ - return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK; -} - -#define REG_DSI_28nm_8960_PHY_CTRL_0 0x00000170 - -#define REG_DSI_28nm_8960_PHY_CTRL_1 0x00000174 - -#define REG_DSI_28nm_8960_PHY_CTRL_2 0x00000178 - -#define REG_DSI_28nm_8960_PHY_CTRL_3 0x0000017c - -#define REG_DSI_28nm_8960_PHY_STRENGTH_0 0x00000180 - -#define REG_DSI_28nm_8960_PHY_STRENGTH_1 0x00000184 - -#define REG_DSI_28nm_8960_PHY_STRENGTH_2 0x00000188 - -#define REG_DSI_28nm_8960_PHY_BIST_CTRL_0 0x0000018c - -#define REG_DSI_28nm_8960_PHY_BIST_CTRL_1 0x00000190 - -#define REG_DSI_28nm_8960_PHY_BIST_CTRL_2 0x00000194 - -#define REG_DSI_28nm_8960_PHY_BIST_CTRL_3 0x00000198 - -#define REG_DSI_28nm_8960_PHY_BIST_CTRL_4 0x0000019c - -#define REG_DSI_28nm_8960_PHY_LDO_CTRL 0x000001b0 - -#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0 0x00000000 - -#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1 0x00000004 - -#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2 0x00000008 - -#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3 0x0000000c - -#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4 0x00000010 - -#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_5 0x00000014 - -#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG 0x00000018 - -#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER 0x00000028 - -#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_0 0x0000002c - -#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_1 0x00000030 - -#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2 0x00000034 - -#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0 0x00000038 - -#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1 0x0000003c - -#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_2 0x00000040 - -#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3 0x00000044 - -#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4 0x00000048 - -#define REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS 0x00000050 -#define DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY 0x00000010 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_0 0x00000000 -#define DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE 0x00000001 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_1 0x00000004 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_2 0x00000008 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_3 0x0000000c - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_4 0x00000010 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_5 0x00000014 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_6 0x00000018 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_7 0x0000001c - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_8 0x00000020 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_9 0x00000024 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_10 0x00000028 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_11 0x0000002c - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_12 0x00000030 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_13 0x00000034 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_14 0x00000038 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_15 0x0000003c - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_16 0x00000040 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_17 0x00000044 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_18 0x00000048 - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_19 0x0000004c - -#define REG_DSI_28nm_8960_PHY_PLL_CTRL_20 0x00000050 - -#define REG_DSI_28nm_8960_PHY_PLL_RDY 0x00000080 -#define DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY 0x00000001 - - -#endif /* DSI_PHY_28NM_8960_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h deleted file mode 100644 index 584cbd0205..0000000000 --- a/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h +++ /dev/null @@ -1,483 +0,0 @@ -#ifndef DSI_PHY_7NM_XML -#define DSI_PHY_7NM_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) - -Copyright (C) 2013-2022 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -#define REG_DSI_7nm_PHY_CMN_REVISION_ID0 0x00000000 - -#define REG_DSI_7nm_PHY_CMN_REVISION_ID1 0x00000004 - -#define REG_DSI_7nm_PHY_CMN_REVISION_ID2 0x00000008 - -#define REG_DSI_7nm_PHY_CMN_REVISION_ID3 0x0000000c - -#define REG_DSI_7nm_PHY_CMN_CLK_CFG0 0x00000010 - -#define REG_DSI_7nm_PHY_CMN_CLK_CFG1 0x00000014 - -#define REG_DSI_7nm_PHY_CMN_GLBL_CTRL 0x00000018 - -#define REG_DSI_7nm_PHY_CMN_RBUF_CTRL 0x0000001c - -#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_0 0x00000020 - -#define REG_DSI_7nm_PHY_CMN_CTRL_0 0x00000024 - -#define REG_DSI_7nm_PHY_CMN_CTRL_1 0x00000028 - -#define REG_DSI_7nm_PHY_CMN_CTRL_2 0x0000002c - -#define REG_DSI_7nm_PHY_CMN_CTRL_3 0x00000030 - -#define REG_DSI_7nm_PHY_CMN_LANE_CFG0 0x00000034 - -#define REG_DSI_7nm_PHY_CMN_LANE_CFG1 0x00000038 - -#define REG_DSI_7nm_PHY_CMN_PLL_CNTRL 0x0000003c - -#define REG_DSI_7nm_PHY_CMN_DPHY_SOT 0x00000040 - -#define REG_DSI_7nm_PHY_CMN_LANE_CTRL0 0x000000a0 - -#define REG_DSI_7nm_PHY_CMN_LANE_CTRL1 0x000000a4 - -#define REG_DSI_7nm_PHY_CMN_LANE_CTRL2 0x000000a8 - -#define REG_DSI_7nm_PHY_CMN_LANE_CTRL3 0x000000ac - -#define REG_DSI_7nm_PHY_CMN_LANE_CTRL4 0x000000b0 - -#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0 0x000000b4 - -#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1 0x000000b8 - -#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2 0x000000bc - -#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3 0x000000c0 - -#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4 0x000000c4 - -#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5 0x000000c8 - -#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6 0x000000cc - -#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7 0x000000d0 - -#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8 0x000000d4 - -#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9 0x000000d8 - -#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10 0x000000dc - -#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11 0x000000e0 - -#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12 0x000000e4 - -#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13 0x000000e8 - -#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0 0x000000ec - -#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_1 0x000000f0 - -#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL 0x000000f4 - -#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL 0x000000f8 - -#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_MID_CTRL 0x000000fc - -#define REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL 0x00000100 - -#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0 0x00000104 - -#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1 0x00000108 - -#define REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL 0x0000010c - -#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_1 0x00000110 - -#define REG_DSI_7nm_PHY_CMN_CTRL_4 0x00000114 - -#define REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4 0x00000128 - -#define REG_DSI_7nm_PHY_CMN_PHY_STATUS 0x00000140 - -#define REG_DSI_7nm_PHY_CMN_LANE_STATUS0 0x00000148 - -#define REG_DSI_7nm_PHY_CMN_LANE_STATUS1 0x0000014c - -#define REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10 0x000001ac - -static inline uint32_t REG_DSI_7nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; } - -static inline uint32_t REG_DSI_7nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; } - -static inline uint32_t REG_DSI_7nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; } - -static inline uint32_t REG_DSI_7nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; } - -static inline uint32_t REG_DSI_7nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x80*i0; } - -static inline uint32_t REG_DSI_7nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000010 + 0x80*i0; } - -static inline uint32_t REG_DSI_7nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000014 + 0x80*i0; } - -static inline uint32_t REG_DSI_7nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; } - -#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000 - -#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004 - -#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS 0x00000008 - -#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS_TWO 0x0000000c - -#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010 - -#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FOUR 0x00000014 - -#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE 0x00000018 - -#define REG_DSI_7nm_PHY_PLL_INT_LOOP_CONTROLS 0x0000001c - -#define REG_DSI_7nm_PHY_PLL_DSM_DIVIDER 0x00000020 - -#define REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000024 - -#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES 0x00000028 - -#define REG_DSI_7nm_PHY_PLL_FREQ_UPDATE_CONTROL_OVERRIDES 0x0000002c - -#define REG_DSI_7nm_PHY_PLL_CMODE 0x00000030 - -#define REG_DSI_7nm_PHY_PLL_PSM_CTRL 0x00000034 - -#define REG_DSI_7nm_PHY_PLL_RSM_CTRL 0x00000038 - -#define REG_DSI_7nm_PHY_PLL_VCO_TUNE_MAP 0x0000003c - -#define REG_DSI_7nm_PHY_PLL_PLL_CNTRL 0x00000040 - -#define REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000044 - -#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_LOW 0x00000048 - -#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_HIGH 0x0000004c - -#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS 0x00000050 - -#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MIN 0x00000054 - -#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MAX 0x00000058 - -#define REG_DSI_7nm_PHY_PLL_BAND_SEL_PFILT 0x0000005c - -#define REG_DSI_7nm_PHY_PLL_BAND_SEL_IFILT 0x00000060 - -#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_TWO 0x00000064 - -#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000068 - -#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_FOUR 0x0000006c - -#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_HIGH 0x00000070 - -#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_LOW 0x00000074 - -#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000078 - -#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_THRESH 0x0000007c - -#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_HIGH 0x00000080 - -#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_LOW 0x00000084 - -#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_HIGH 0x00000088 - -#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_LOW 0x0000008c - -#define REG_DSI_7nm_PHY_PLL_PFILT 0x00000090 - -#define REG_DSI_7nm_PHY_PLL_IFILT 0x00000094 - -#define REG_DSI_7nm_PHY_PLL_PLL_GAIN 0x00000098 - -#define REG_DSI_7nm_PHY_PLL_ICODE_LOW 0x0000009c - -#define REG_DSI_7nm_PHY_PLL_ICODE_HIGH 0x000000a0 - -#define REG_DSI_7nm_PHY_PLL_LOCKDET 0x000000a4 - -#define REG_DSI_7nm_PHY_PLL_OUTDIV 0x000000a8 - -#define REG_DSI_7nm_PHY_PLL_FASTLOCK_CONTROL 0x000000ac - -#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_ONE 0x000000b0 - -#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_TWO 0x000000b4 - -#define REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE 0x000000b8 - -#define REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000bc - -#define REG_DSI_7nm_PHY_PLL_RATE_CHANGE 0x000000c0 - -#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS 0x000000c4 - -#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000c8 - -#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START 0x000000cc - -#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW 0x000000d0 - -#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID 0x000000d4 - -#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH 0x000000d8 - -#define REG_DSI_7nm_PHY_PLL_DEC_FRAC_MUXES 0x000000dc - -#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000e0 - -#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000e4 - -#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000e8 - -#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000ec - -#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_2 0x000000f0 - -#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_2 0x000000f4 - -#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_2 0x000000f8 - -#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_2 0x000000fc - -#define REG_DSI_7nm_PHY_PLL_MASH_CONTROL 0x00000100 - -#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW 0x00000104 - -#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH 0x00000108 - -#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW 0x0000010c - -#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH 0x00000110 - -#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW 0x00000114 - -#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH 0x00000118 - -#define REG_DSI_7nm_PHY_PLL_SSC_MUX_CONTROL 0x0000011c - -#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x00000120 - -#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000124 - -#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000128 - -#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x0000012c - -#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1 0x00000130 - -#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1 0x00000134 - -#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_2 0x00000138 - -#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_2 0x0000013c - -#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_2 0x00000140 - -#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_2 0x00000144 - -#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_2 0x00000148 - -#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_2 0x0000014c - -#define REG_DSI_7nm_PHY_PLL_SSC_CONTROL 0x00000150 - -#define REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000154 - -#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000158 - -#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_2 0x0000015c - -#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x00000160 - -#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_2 0x00000164 - -#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1 0x00000168 - -#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_2 0x0000016c - -#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x00000170 - -#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_2 0x00000174 - -#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000178 - -#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_2 0x0000017c - -#define REG_DSI_7nm_PHY_PLL_PLL_FASTLOCK_EN_BAND 0x00000180 - -#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MID 0x00000184 - -#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_HIGH 0x00000188 - -#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MUX 0x0000018c - -#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000190 - -#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY 0x00000194 - -#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_MIN_DELAY 0x00000198 - -#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS 0x0000019c - -#define REG_DSI_7nm_PHY_PLL_SPARE_AND_JPC_OVERRIDES 0x000001a0 - -#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_1 0x000001a4 - -#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_2 0x000001a8 - -#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_CTRL_1 0x000001ac - -#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE 0x000001b0 - -#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_TWO 0x000001b4 - -#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL 0x000001b8 - -#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_LOW 0x000001bc - -#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_HIGH 0x000001c0 - -#define REG_DSI_7nm_PHY_PLL_FD_OUT_LOW 0x000001c4 - -#define REG_DSI_7nm_PHY_PLL_FD_OUT_HIGH 0x000001c8 - -#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_STATUS_1 0x000001cc - -#define REG_DSI_7nm_PHY_PLL_PLL_MISC_CONFIG 0x000001d0 - -#define REG_DSI_7nm_PHY_PLL_FLL_CONFIG 0x000001d4 - -#define REG_DSI_7nm_PHY_PLL_FLL_FREQ_ACQ_TIME 0x000001d8 - -#define REG_DSI_7nm_PHY_PLL_FLL_CODE0 0x000001dc - -#define REG_DSI_7nm_PHY_PLL_FLL_CODE1 0x000001e0 - -#define REG_DSI_7nm_PHY_PLL_FLL_GAIN0 0x000001e4 - -#define REG_DSI_7nm_PHY_PLL_FLL_GAIN1 0x000001e8 - -#define REG_DSI_7nm_PHY_PLL_SW_RESET 0x000001ec - -#define REG_DSI_7nm_PHY_PLL_FAST_PWRUP 0x000001f0 - -#define REG_DSI_7nm_PHY_PLL_LOCKTIME0 0x000001f4 - -#define REG_DSI_7nm_PHY_PLL_LOCKTIME1 0x000001f8 - -#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS_SEL 0x000001fc - -#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS0 0x00000200 - -#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS1 0x00000204 - -#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS2 0x00000208 - -#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS3 0x0000020c - -#define REG_DSI_7nm_PHY_PLL_ANALOG_FLL_CONTROL_OVERRIDES 0x00000210 - -#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG 0x00000214 - -#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE0_STATUS 0x00000218 - -#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE1_STATUS 0x0000021c - -#define REG_DSI_7nm_PHY_PLL_RESET_SM_STATUS 0x00000220 - -#define REG_DSI_7nm_PHY_PLL_TDC_OFFSET 0x00000224 - -#define REG_DSI_7nm_PHY_PLL_PS3_PWRDOWN_CONTROLS 0x00000228 - -#define REG_DSI_7nm_PHY_PLL_PS4_PWRDOWN_CONTROLS 0x0000022c - -#define REG_DSI_7nm_PHY_PLL_PLL_RST_CONTROLS 0x00000230 - -#define REG_DSI_7nm_PHY_PLL_GEAR_BAND_SELECT_CONTROLS 0x00000234 - -#define REG_DSI_7nm_PHY_PLL_PSM_CLK_CONTROLS 0x00000238 - -#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES_2 0x0000023c - -#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1 0x00000240 - -#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_2 0x00000244 - -#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1 0x00000248 - -#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_2 0x0000024c - -#define REG_DSI_7nm_PHY_PLL_CMODE_1 0x00000250 - -#define REG_DSI_7nm_PHY_PLL_CMODE_2 0x00000254 - -#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1 0x00000258 - -#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_2 0x0000025c - -#define REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE 0x00000260 - - -#endif /* DSI_PHY_7NM_XML */ diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h deleted file mode 100644 index 7062f71642..0000000000 --- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h +++ /dev/null @@ -1,131 +0,0 @@ -#ifndef MMSS_CC_XML -#define MMSS_CC_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) - -Copyright (C) 2013-2022 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -enum mmss_cc_clk { - CLK = 0, - PCLK = 1, -}; - -#define REG_MMSS_CC_AHB 0x00000008 - -static inline uint32_t __offset_CLK(enum mmss_cc_clk idx) -{ - switch (idx) { - case CLK: return 0x0000004c; - case PCLK: return 0x00000130; - default: return INVALID_IDX(idx); - } -} -static inline uint32_t REG_MMSS_CC_CLK(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); } - -static inline uint32_t REG_MMSS_CC_CLK_CC(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); } -#define MMSS_CC_CLK_CC_CLK_EN 0x00000001 -#define MMSS_CC_CLK_CC_ROOT_EN 0x00000004 -#define MMSS_CC_CLK_CC_MND_EN 0x00000020 -#define MMSS_CC_CLK_CC_MND_MODE__MASK 0x000000c0 -#define MMSS_CC_CLK_CC_MND_MODE__SHIFT 6 -static inline uint32_t MMSS_CC_CLK_CC_MND_MODE(uint32_t val) -{ - return ((val) << MMSS_CC_CLK_CC_MND_MODE__SHIFT) & MMSS_CC_CLK_CC_MND_MODE__MASK; -} -#define MMSS_CC_CLK_CC_PMXO_SEL__MASK 0x00000300 -#define MMSS_CC_CLK_CC_PMXO_SEL__SHIFT 8 -static inline uint32_t MMSS_CC_CLK_CC_PMXO_SEL(uint32_t val) -{ - return ((val) << MMSS_CC_CLK_CC_PMXO_SEL__SHIFT) & MMSS_CC_CLK_CC_PMXO_SEL__MASK; -} - -static inline uint32_t REG_MMSS_CC_CLK_MD(enum mmss_cc_clk i0) { return 0x00000004 + __offset_CLK(i0); } -#define MMSS_CC_CLK_MD_D__MASK 0x000000ff -#define MMSS_CC_CLK_MD_D__SHIFT 0 -static inline uint32_t MMSS_CC_CLK_MD_D(uint32_t val) -{ - return ((val) << MMSS_CC_CLK_MD_D__SHIFT) & MMSS_CC_CLK_MD_D__MASK; -} -#define MMSS_CC_CLK_MD_M__MASK 0x0000ff00 -#define MMSS_CC_CLK_MD_M__SHIFT 8 -static inline uint32_t MMSS_CC_CLK_MD_M(uint32_t val) -{ - return ((val) << MMSS_CC_CLK_MD_M__SHIFT) & MMSS_CC_CLK_MD_M__MASK; -} - -static inline uint32_t REG_MMSS_CC_CLK_NS(enum mmss_cc_clk i0) { return 0x00000008 + __offset_CLK(i0); } -#define MMSS_CC_CLK_NS_SRC__MASK 0x0000000f -#define MMSS_CC_CLK_NS_SRC__SHIFT 0 -static inline uint32_t MMSS_CC_CLK_NS_SRC(uint32_t val) -{ - return ((val) << MMSS_CC_CLK_NS_SRC__SHIFT) & MMSS_CC_CLK_NS_SRC__MASK; -} -#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK 0x00fff000 -#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT 12 -static inline uint32_t MMSS_CC_CLK_NS_PRE_DIV_FUNC(uint32_t val) -{ - return ((val) << MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT) & MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK; -} -#define MMSS_CC_CLK_NS_VAL__MASK 0xff000000 -#define MMSS_CC_CLK_NS_VAL__SHIFT 24 -static inline uint32_t MMSS_CC_CLK_NS_VAL(uint32_t val) -{ - return ((val) << MMSS_CC_CLK_NS_VAL__SHIFT) & MMSS_CC_CLK_NS_VAL__MASK; -} - -#define REG_MMSS_CC_DSI2_PIXEL_CC 0x00000094 - -#define REG_MMSS_CC_DSI2_PIXEL_NS 0x000000e4 - -#define REG_MMSS_CC_DSI2_PIXEL_CC2 0x00000264 - - -#endif /* MMSS_CC_XML */ diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index e4275d3ad5..5a5dc3faa9 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -12,10 +12,10 @@ #include "dsi.h" -#define dsi_phy_read(offset) msm_readl((offset)) -#define dsi_phy_write(offset, data) msm_writel((data), (offset)) -#define dsi_phy_write_udelay(offset, data, delay_us) { msm_writel((data), (offset)); udelay(delay_us); } -#define dsi_phy_write_ndelay(offset, data, delay_ns) { msm_writel((data), (offset)); ndelay(delay_ns); } +#define dsi_phy_read(offset) readl((offset)) +#define dsi_phy_write(offset, data) writel((data), (offset)) +#define dsi_phy_write_udelay(offset, data, delay_us) { writel((data), (offset)); udelay(delay_us); } +#define dsi_phy_write_ndelay(offset, data, delay_ns) { writel((data), (offset)); ndelay(delay_ns); } struct msm_dsi_phy_ops { int (*pll_init)(struct msm_dsi_phy *phy); diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h deleted file mode 100644 index 344a1a1620..0000000000 --- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h +++ /dev/null @@ -1,70 +0,0 @@ -#ifndef SFPB_XML -#define SFPB_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) - -Copyright (C) 2013-2022 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -enum sfpb_ahb_arb_master_port_en { - SFPB_MASTER_PORT_ENABLE = 3, - SFPB_MASTER_PORT_DISABLE = 0, -}; - -#define REG_SFPB_GPREG 0x00000058 -#define SFPB_GPREG_MASTER_PORT_EN__MASK 0x00001800 -#define SFPB_GPREG_MASTER_PORT_EN__SHIFT 11 -static inline uint32_t SFPB_GPREG_MASTER_PORT_EN(enum sfpb_ahb_arb_master_port_en val) -{ - return ((val) << SFPB_GPREG_MASTER_PORT_EN__SHIFT) & SFPB_GPREG_MASTER_PORT_EN__MASK; -} - - -#endif /* SFPB_XML */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index c8ebd75176..24abcb7254 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -5,8 +5,8 @@ * Author: Rob Clark */ +#include #include -#include #include #include diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index ec57864403..4586baf364 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -115,17 +115,17 @@ void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on); static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data) { - msm_writel(data, hdmi->mmio + reg); + writel(data, hdmi->mmio + reg); } static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg) { - return msm_readl(hdmi->mmio + reg); + return readl(hdmi->mmio + reg); } static inline u32 hdmi_qfprom_read(struct hdmi *hdmi, u32 reg) { - return msm_readl(hdmi->qfprom_mmio + reg); + return readl(hdmi->qfprom_mmio + reg); } /* @@ -166,12 +166,12 @@ struct hdmi_phy { static inline void hdmi_phy_write(struct hdmi_phy *phy, u32 reg, u32 data) { - msm_writel(data, phy->mmio + reg); + writel(data, phy->mmio + reg); } static inline u32 hdmi_phy_read(struct hdmi_phy *phy, u32 reg) { - return msm_readl(phy->mmio + reg); + return readl(phy->mmio + reg); } int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h deleted file mode 100644 index 973b460486..0000000000 --- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h +++ /dev/null @@ -1,1399 +0,0 @@ -#ifndef HDMI_XML -#define HDMI_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) - -Copyright (C) 2013-2022 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -enum hdmi_hdcp_key_state { - HDCP_KEYS_STATE_NO_KEYS = 0, - HDCP_KEYS_STATE_NOT_CHECKED = 1, - HDCP_KEYS_STATE_CHECKING = 2, - HDCP_KEYS_STATE_VALID = 3, - HDCP_KEYS_STATE_AKSV_NOT_VALID = 4, - HDCP_KEYS_STATE_CHKSUM_MISMATCH = 5, - HDCP_KEYS_STATE_PROD_AKSV = 6, - HDCP_KEYS_STATE_RESERVED = 7, -}; - -enum hdmi_ddc_read_write { - DDC_WRITE = 0, - DDC_READ = 1, -}; - -enum hdmi_acr_cts { - ACR_NONE = 0, - ACR_32 = 1, - ACR_44 = 2, - ACR_48 = 3, -}; - -#define REG_HDMI_CTRL 0x00000000 -#define HDMI_CTRL_ENABLE 0x00000001 -#define HDMI_CTRL_HDMI 0x00000002 -#define HDMI_CTRL_ENCRYPTED 0x00000004 - -#define REG_HDMI_AUDIO_PKT_CTRL1 0x00000020 -#define HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND 0x00000001 - -#define REG_HDMI_ACR_PKT_CTRL 0x00000024 -#define HDMI_ACR_PKT_CTRL_CONT 0x00000001 -#define HDMI_ACR_PKT_CTRL_SEND 0x00000002 -#define HDMI_ACR_PKT_CTRL_SELECT__MASK 0x00000030 -#define HDMI_ACR_PKT_CTRL_SELECT__SHIFT 4 -static inline uint32_t HDMI_ACR_PKT_CTRL_SELECT(enum hdmi_acr_cts val) -{ - return ((val) << HDMI_ACR_PKT_CTRL_SELECT__SHIFT) & HDMI_ACR_PKT_CTRL_SELECT__MASK; -} -#define HDMI_ACR_PKT_CTRL_SOURCE 0x00000100 -#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK 0x00070000 -#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT 16 -static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val) -{ - return ((val) << HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT) & HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK; -} -#define HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY 0x80000000 - -#define REG_HDMI_VBI_PKT_CTRL 0x00000028 -#define HDMI_VBI_PKT_CTRL_GC_ENABLE 0x00000010 -#define HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME 0x00000020 -#define HDMI_VBI_PKT_CTRL_ISRC_SEND 0x00000100 -#define HDMI_VBI_PKT_CTRL_ISRC_CONTINUOUS 0x00000200 -#define HDMI_VBI_PKT_CTRL_ACP_SEND 0x00001000 -#define HDMI_VBI_PKT_CTRL_ACP_SRC_SW 0x00002000 - -#define REG_HDMI_INFOFRAME_CTRL0 0x0000002c -#define HDMI_INFOFRAME_CTRL0_AVI_SEND 0x00000001 -#define HDMI_INFOFRAME_CTRL0_AVI_CONT 0x00000002 -#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND 0x00000010 -#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT 0x00000020 -#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040 -#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080 - -#define REG_HDMI_INFOFRAME_CTRL1 0x00000030 -#define HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK 0x0000003f -#define HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__SHIFT 0 -static inline uint32_t HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(uint32_t val) -{ - return ((val) << HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK; -} -#define HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__MASK 0x00003f00 -#define HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__SHIFT 8 -static inline uint32_t HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE(uint32_t val) -{ - return ((val) << HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__MASK; -} -#define HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__MASK 0x003f0000 -#define HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__SHIFT 16 -static inline uint32_t HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE(uint32_t val) -{ - return ((val) << HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__MASK; -} -#define HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__MASK 0x3f000000 -#define HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__SHIFT 24 -static inline uint32_t HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE(uint32_t val) -{ - return ((val) << HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__MASK; -} - -#define REG_HDMI_GEN_PKT_CTRL 0x00000034 -#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001 -#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002 -#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK 0x0000000c -#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT 2 -static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE(uint32_t val) -{ - return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK; -} -#define HDMI_GEN_PKT_CTRL_GENERIC1_SEND 0x00000010 -#define HDMI_GEN_PKT_CTRL_GENERIC1_CONT 0x00000020 -#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK 0x003f0000 -#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT 16 -static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_LINE(uint32_t val) -{ - return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK; -} -#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK 0x3f000000 -#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT 24 -static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC1_LINE(uint32_t val) -{ - return ((val) << HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK; -} - -#define REG_HDMI_GC 0x00000040 -#define HDMI_GC_MUTE 0x00000001 - -#define REG_HDMI_AUDIO_PKT_CTRL2 0x00000044 -#define HDMI_AUDIO_PKT_CTRL2_OVERRIDE 0x00000001 -#define HDMI_AUDIO_PKT_CTRL2_LAYOUT 0x00000002 - -static inline uint32_t REG_HDMI_AVI_INFO(uint32_t i0) { return 0x0000006c + 0x4*i0; } - -#define REG_HDMI_GENERIC0_HDR 0x00000084 - -static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*i0; } - -#define REG_HDMI_GENERIC1_HDR 0x000000a4 - -static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; } - -static inline uint32_t REG_HDMI_ACR(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; } - -static inline uint32_t REG_HDMI_ACR_0(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; } -#define HDMI_ACR_0_CTS__MASK 0xfffff000 -#define HDMI_ACR_0_CTS__SHIFT 12 -static inline uint32_t HDMI_ACR_0_CTS(uint32_t val) -{ - return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK; -} - -static inline uint32_t REG_HDMI_ACR_1(enum hdmi_acr_cts i0) { return 0x000000c8 + 0x8*i0; } -#define HDMI_ACR_1_N__MASK 0xffffffff -#define HDMI_ACR_1_N__SHIFT 0 -static inline uint32_t HDMI_ACR_1_N(uint32_t val) -{ - return ((val) << HDMI_ACR_1_N__SHIFT) & HDMI_ACR_1_N__MASK; -} - -#define REG_HDMI_AUDIO_INFO0 0x000000e4 -#define HDMI_AUDIO_INFO0_CHECKSUM__MASK 0x000000ff -#define HDMI_AUDIO_INFO0_CHECKSUM__SHIFT 0 -static inline uint32_t HDMI_AUDIO_INFO0_CHECKSUM(uint32_t val) -{ - return ((val) << HDMI_AUDIO_INFO0_CHECKSUM__SHIFT) & HDMI_AUDIO_INFO0_CHECKSUM__MASK; -} -#define HDMI_AUDIO_INFO0_CC__MASK 0x00000700 -#define HDMI_AUDIO_INFO0_CC__SHIFT 8 -static inline uint32_t HDMI_AUDIO_INFO0_CC(uint32_t val) -{ - return ((val) << HDMI_AUDIO_INFO0_CC__SHIFT) & HDMI_AUDIO_INFO0_CC__MASK; -} - -#define REG_HDMI_AUDIO_INFO1 0x000000e8 -#define HDMI_AUDIO_INFO1_CA__MASK 0x000000ff -#define HDMI_AUDIO_INFO1_CA__SHIFT 0 -static inline uint32_t HDMI_AUDIO_INFO1_CA(uint32_t val) -{ - return ((val) << HDMI_AUDIO_INFO1_CA__SHIFT) & HDMI_AUDIO_INFO1_CA__MASK; -} -#define HDMI_AUDIO_INFO1_LSV__MASK 0x00007800 -#define HDMI_AUDIO_INFO1_LSV__SHIFT 11 -static inline uint32_t HDMI_AUDIO_INFO1_LSV(uint32_t val) -{ - return ((val) << HDMI_AUDIO_INFO1_LSV__SHIFT) & HDMI_AUDIO_INFO1_LSV__MASK; -} -#define HDMI_AUDIO_INFO1_DM_INH 0x00008000 - -#define REG_HDMI_HDCP_CTRL 0x00000110 -#define HDMI_HDCP_CTRL_ENABLE 0x00000001 -#define HDMI_HDCP_CTRL_ENCRYPTION_ENABLE 0x00000100 - -#define REG_HDMI_HDCP_DEBUG_CTRL 0x00000114 -#define HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER 0x00000004 - -#define REG_HDMI_HDCP_INT_CTRL 0x00000118 -#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT 0x00000001 -#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK 0x00000002 -#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK 0x00000004 -#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT 0x00000010 -#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK 0x00000020 -#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK 0x00000040 -#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK 0x00000080 -#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT 0x00000100 -#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_ACK 0x00000200 -#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_MASK 0x00000400 -#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT 0x00001000 -#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_ACK 0x00002000 -#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_MASK 0x00004000 - -#define REG_HDMI_HDCP_LINK0_STATUS 0x0000011c -#define HDMI_HDCP_LINK0_STATUS_AN_0_READY 0x00000100 -#define HDMI_HDCP_LINK0_STATUS_AN_1_READY 0x00000200 -#define HDMI_HDCP_LINK0_STATUS_RI_MATCHES 0x00001000 -#define HDMI_HDCP_LINK0_STATUS_V_MATCHES 0x00100000 -#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK 0x70000000 -#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT 28 -static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state val) -{ - return ((val) << HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT) & HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK; -} - -#define REG_HDMI_HDCP_DDC_CTRL_0 0x00000120 -#define HDMI_HDCP_DDC_CTRL_0_DISABLE 0x00000001 - -#define REG_HDMI_HDCP_DDC_CTRL_1 0x00000124 -#define HDMI_HDCP_DDC_CTRL_1_FAILED_ACK 0x00000001 - -#define REG_HDMI_HDCP_DDC_STATUS 0x00000128 -#define HDMI_HDCP_DDC_STATUS_XFER_REQ 0x00000010 -#define HDMI_HDCP_DDC_STATUS_XFER_DONE 0x00000400 -#define HDMI_HDCP_DDC_STATUS_ABORTED 0x00001000 -#define HDMI_HDCP_DDC_STATUS_TIMEOUT 0x00002000 -#define HDMI_HDCP_DDC_STATUS_NACK0 0x00004000 -#define HDMI_HDCP_DDC_STATUS_NACK1 0x00008000 -#define HDMI_HDCP_DDC_STATUS_FAILED 0x00010000 - -#define REG_HDMI_HDCP_ENTROPY_CTRL0 0x0000012c - -#define REG_HDMI_HDCP_ENTROPY_CTRL1 0x0000025c - -#define REG_HDMI_HDCP_RESET 0x00000130 -#define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001 - -#define REG_HDMI_HDCP_RCVPORT_DATA0 0x00000134 - -#define REG_HDMI_HDCP_RCVPORT_DATA1 0x00000138 - -#define REG_HDMI_HDCP_RCVPORT_DATA2_0 0x0000013c - -#define REG_HDMI_HDCP_RCVPORT_DATA2_1 0x00000140 - -#define REG_HDMI_HDCP_RCVPORT_DATA3 0x00000144 - -#define REG_HDMI_HDCP_RCVPORT_DATA4 0x00000148 - -#define REG_HDMI_HDCP_RCVPORT_DATA5 0x0000014c - -#define REG_HDMI_HDCP_RCVPORT_DATA6 0x00000150 - -#define REG_HDMI_HDCP_RCVPORT_DATA7 0x00000154 - -#define REG_HDMI_HDCP_RCVPORT_DATA8 0x00000158 - -#define REG_HDMI_HDCP_RCVPORT_DATA9 0x0000015c - -#define REG_HDMI_HDCP_RCVPORT_DATA10 0x00000160 - -#define REG_HDMI_HDCP_RCVPORT_DATA11 0x00000164 - -#define REG_HDMI_HDCP_RCVPORT_DATA12 0x00000168 - -#define REG_HDMI_VENSPEC_INFO0 0x0000016c - -#define REG_HDMI_VENSPEC_INFO1 0x00000170 - -#define REG_HDMI_VENSPEC_INFO2 0x00000174 - -#define REG_HDMI_VENSPEC_INFO3 0x00000178 - -#define REG_HDMI_VENSPEC_INFO4 0x0000017c - -#define REG_HDMI_VENSPEC_INFO5 0x00000180 - -#define REG_HDMI_VENSPEC_INFO6 0x00000184 - -#define REG_HDMI_AUDIO_CFG 0x000001d0 -#define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001 -#define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0 -#define HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT 4 -static inline uint32_t HDMI_AUDIO_CFG_FIFO_WATERMARK(uint32_t val) -{ - return ((val) << HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT) & HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK; -} - -#define REG_HDMI_USEC_REFTIMER 0x00000208 - -#define REG_HDMI_DDC_CTRL 0x0000020c -#define HDMI_DDC_CTRL_GO 0x00000001 -#define HDMI_DDC_CTRL_SOFT_RESET 0x00000002 -#define HDMI_DDC_CTRL_SEND_RESET 0x00000004 -#define HDMI_DDC_CTRL_SW_STATUS_RESET 0x00000008 -#define HDMI_DDC_CTRL_TRANSACTION_CNT__MASK 0x00300000 -#define HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT 20 -static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val) -{ - return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK; -} - -#define REG_HDMI_DDC_ARBITRATION 0x00000210 -#define HDMI_DDC_ARBITRATION_HW_ARBITRATION 0x00000010 - -#define REG_HDMI_DDC_INT_CTRL 0x00000214 -#define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001 -#define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002 -#define HDMI_DDC_INT_CTRL_SW_DONE_MASK 0x00000004 - -#define REG_HDMI_DDC_SW_STATUS 0x00000218 -#define HDMI_DDC_SW_STATUS_NACK0 0x00001000 -#define HDMI_DDC_SW_STATUS_NACK1 0x00002000 -#define HDMI_DDC_SW_STATUS_NACK2 0x00004000 -#define HDMI_DDC_SW_STATUS_NACK3 0x00008000 - -#define REG_HDMI_DDC_HW_STATUS 0x0000021c -#define HDMI_DDC_HW_STATUS_DONE 0x00000008 - -#define REG_HDMI_DDC_SPEED 0x00000220 -#define HDMI_DDC_SPEED_THRESHOLD__MASK 0x00000003 -#define HDMI_DDC_SPEED_THRESHOLD__SHIFT 0 -static inline uint32_t HDMI_DDC_SPEED_THRESHOLD(uint32_t val) -{ - return ((val) << HDMI_DDC_SPEED_THRESHOLD__SHIFT) & HDMI_DDC_SPEED_THRESHOLD__MASK; -} -#define HDMI_DDC_SPEED_PRESCALE__MASK 0xffff0000 -#define HDMI_DDC_SPEED_PRESCALE__SHIFT 16 -static inline uint32_t HDMI_DDC_SPEED_PRESCALE(uint32_t val) -{ - return ((val) << HDMI_DDC_SPEED_PRESCALE__SHIFT) & HDMI_DDC_SPEED_PRESCALE__MASK; -} - -#define REG_HDMI_DDC_SETUP 0x00000224 -#define HDMI_DDC_SETUP_TIMEOUT__MASK 0xff000000 -#define HDMI_DDC_SETUP_TIMEOUT__SHIFT 24 -static inline uint32_t HDMI_DDC_SETUP_TIMEOUT(uint32_t val) -{ - return ((val) << HDMI_DDC_SETUP_TIMEOUT__SHIFT) & HDMI_DDC_SETUP_TIMEOUT__MASK; -} - -static inline uint32_t REG_HDMI_I2C_TRANSACTION(uint32_t i0) { return 0x00000228 + 0x4*i0; } - -static inline uint32_t REG_HDMI_I2C_TRANSACTION_REG(uint32_t i0) { return 0x00000228 + 0x4*i0; } -#define HDMI_I2C_TRANSACTION_REG_RW__MASK 0x00000001 -#define HDMI_I2C_TRANSACTION_REG_RW__SHIFT 0 -static inline uint32_t HDMI_I2C_TRANSACTION_REG_RW(enum hdmi_ddc_read_write val) -{ - return ((val) << HDMI_I2C_TRANSACTION_REG_RW__SHIFT) & HDMI_I2C_TRANSACTION_REG_RW__MASK; -} -#define HDMI_I2C_TRANSACTION_REG_STOP_ON_NACK 0x00000100 -#define HDMI_I2C_TRANSACTION_REG_START 0x00001000 -#define HDMI_I2C_TRANSACTION_REG_STOP 0x00002000 -#define HDMI_I2C_TRANSACTION_REG_CNT__MASK 0x00ff0000 -#define HDMI_I2C_TRANSACTION_REG_CNT__SHIFT 16 -static inline uint32_t HDMI_I2C_TRANSACTION_REG_CNT(uint32_t val) -{ - return ((val) << HDMI_I2C_TRANSACTION_REG_CNT__SHIFT) & HDMI_I2C_TRANSACTION_REG_CNT__MASK; -} - -#define REG_HDMI_DDC_DATA 0x00000238 -#define HDMI_DDC_DATA_DATA_RW__MASK 0x00000001 -#define HDMI_DDC_DATA_DATA_RW__SHIFT 0 -static inline uint32_t HDMI_DDC_DATA_DATA_RW(enum hdmi_ddc_read_write val) -{ - return ((val) << HDMI_DDC_DATA_DATA_RW__SHIFT) & HDMI_DDC_DATA_DATA_RW__MASK; -} -#define HDMI_DDC_DATA_DATA__MASK 0x0000ff00 -#define HDMI_DDC_DATA_DATA__SHIFT 8 -static inline uint32_t HDMI_DDC_DATA_DATA(uint32_t val) -{ - return ((val) << HDMI_DDC_DATA_DATA__SHIFT) & HDMI_DDC_DATA_DATA__MASK; -} -#define HDMI_DDC_DATA_INDEX__MASK 0x00ff0000 -#define HDMI_DDC_DATA_INDEX__SHIFT 16 -static inline uint32_t HDMI_DDC_DATA_INDEX(uint32_t val) -{ - return ((val) << HDMI_DDC_DATA_INDEX__SHIFT) & HDMI_DDC_DATA_INDEX__MASK; -} -#define HDMI_DDC_DATA_INDEX_WRITE 0x80000000 - -#define REG_HDMI_HDCP_SHA_CTRL 0x0000023c - -#define REG_HDMI_HDCP_SHA_STATUS 0x00000240 -#define HDMI_HDCP_SHA_STATUS_BLOCK_DONE 0x00000001 -#define HDMI_HDCP_SHA_STATUS_COMP_DONE 0x00000010 - -#define REG_HDMI_HDCP_SHA_DATA 0x00000244 -#define HDMI_HDCP_SHA_DATA_DONE 0x00000001 - -#define REG_HDMI_HPD_INT_STATUS 0x00000250 -#define HDMI_HPD_INT_STATUS_INT 0x00000001 -#define HDMI_HPD_INT_STATUS_CABLE_DETECTED 0x00000002 - -#define REG_HDMI_HPD_INT_CTRL 0x00000254 -#define HDMI_HPD_INT_CTRL_INT_ACK 0x00000001 -#define HDMI_HPD_INT_CTRL_INT_CONNECT 0x00000002 -#define HDMI_HPD_INT_CTRL_INT_EN 0x00000004 -#define HDMI_HPD_INT_CTRL_RX_INT_ACK 0x00000010 -#define HDMI_HPD_INT_CTRL_RX_INT_EN 0x00000020 -#define HDMI_HPD_INT_CTRL_RCV_PLUGIN_DET_MASK 0x00000200 - -#define REG_HDMI_HPD_CTRL 0x00000258 -#define HDMI_HPD_CTRL_TIMEOUT__MASK 0x00001fff -#define HDMI_HPD_CTRL_TIMEOUT__SHIFT 0 -static inline uint32_t HDMI_HPD_CTRL_TIMEOUT(uint32_t val) -{ - return ((val) << HDMI_HPD_CTRL_TIMEOUT__SHIFT) & HDMI_HPD_CTRL_TIMEOUT__MASK; -} -#define HDMI_HPD_CTRL_ENABLE 0x10000000 - -#define REG_HDMI_DDC_REF 0x0000027c -#define HDMI_DDC_REF_REFTIMER_ENABLE 0x00010000 -#define HDMI_DDC_REF_REFTIMER__MASK 0x0000ffff -#define HDMI_DDC_REF_REFTIMER__SHIFT 0 -static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val) -{ - return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK; -} - -#define REG_HDMI_HDCP_SW_UPPER_AKSV 0x00000284 - -#define REG_HDMI_HDCP_SW_LOWER_AKSV 0x00000288 - -#define REG_HDMI_CEC_CTRL 0x0000028c - -#define REG_HDMI_CEC_WR_DATA 0x00000290 - -#define REG_HDMI_CEC_CEC_RETRANSMIT 0x00000294 - -#define REG_HDMI_CEC_STATUS 0x00000298 - -#define REG_HDMI_CEC_INT 0x0000029c - -#define REG_HDMI_CEC_ADDR 0x000002a0 - -#define REG_HDMI_CEC_TIME 0x000002a4 - -#define REG_HDMI_CEC_REFTIMER 0x000002a8 - -#define REG_HDMI_CEC_RD_DATA 0x000002ac - -#define REG_HDMI_CEC_RD_FILTER 0x000002b0 - -#define REG_HDMI_ACTIVE_HSYNC 0x000002b4 -#define HDMI_ACTIVE_HSYNC_START__MASK 0x00001fff -#define HDMI_ACTIVE_HSYNC_START__SHIFT 0 -static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val) -{ - return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) & HDMI_ACTIVE_HSYNC_START__MASK; -} -#define HDMI_ACTIVE_HSYNC_END__MASK 0x0fff0000 -#define HDMI_ACTIVE_HSYNC_END__SHIFT 16 -static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val) -{ - return ((val) << HDMI_ACTIVE_HSYNC_END__SHIFT) & HDMI_ACTIVE_HSYNC_END__MASK; -} - -#define REG_HDMI_ACTIVE_VSYNC 0x000002b8 -#define HDMI_ACTIVE_VSYNC_START__MASK 0x00001fff -#define HDMI_ACTIVE_VSYNC_START__SHIFT 0 -static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val) -{ - return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK; -} -#define HDMI_ACTIVE_VSYNC_END__MASK 0x1fff0000 -#define HDMI_ACTIVE_VSYNC_END__SHIFT 16 -static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val) -{ - return ((val) << HDMI_ACTIVE_VSYNC_END__SHIFT) & HDMI_ACTIVE_VSYNC_END__MASK; -} - -#define REG_HDMI_VSYNC_ACTIVE_F2 0x000002bc -#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00001fff -#define HDMI_VSYNC_ACTIVE_F2_START__SHIFT 0 -static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val) -{ - return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK; -} -#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x1fff0000 -#define HDMI_VSYNC_ACTIVE_F2_END__SHIFT 16 -static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val) -{ - return ((val) << HDMI_VSYNC_ACTIVE_F2_END__SHIFT) & HDMI_VSYNC_ACTIVE_F2_END__MASK; -} - -#define REG_HDMI_TOTAL 0x000002c0 -#define HDMI_TOTAL_H_TOTAL__MASK 0x00001fff -#define HDMI_TOTAL_H_TOTAL__SHIFT 0 -static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val) -{ - return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK; -} -#define HDMI_TOTAL_V_TOTAL__MASK 0x1fff0000 -#define HDMI_TOTAL_V_TOTAL__SHIFT 16 -static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val) -{ - return ((val) << HDMI_TOTAL_V_TOTAL__SHIFT) & HDMI_TOTAL_V_TOTAL__MASK; -} - -#define REG_HDMI_VSYNC_TOTAL_F2 0x000002c4 -#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00001fff -#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT 0 -static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val) -{ - return ((val) << HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT) & HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK; -} - -#define REG_HDMI_FRAME_CTRL 0x000002c8 -#define HDMI_FRAME_CTRL_RGB_MUX_SEL_BGR 0x00001000 -#define HDMI_FRAME_CTRL_VSYNC_LOW 0x10000000 -#define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000 -#define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000 - -#define REG_HDMI_AUD_INT 0x000002cc -#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001 -#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002 -#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004 -#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008 - -#define REG_HDMI_PHY_CTRL 0x000002d4 -#define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001 -#define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002 -#define HDMI_PHY_CTRL_SW_RESET 0x00000004 -#define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008 - -#define REG_HDMI_CEC_WR_RANGE 0x000002dc - -#define REG_HDMI_CEC_RD_RANGE 0x000002e0 - -#define REG_HDMI_VERSION 0x000002e4 - -#define REG_HDMI_CEC_COMPL_CTL 0x00000360 - -#define REG_HDMI_CEC_RD_START_RANGE 0x00000364 - -#define REG_HDMI_CEC_RD_TOTAL_RANGE 0x00000368 - -#define REG_HDMI_CEC_RD_ERR_RESP_LO 0x0000036c - -#define REG_HDMI_CEC_WR_CHECK_CONFIG 0x00000370 - -#define REG_HDMI_8x60_PHY_REG0 0x00000000 -#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c -#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2 -static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val) -{ - return ((val) << HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT) & HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK; -} - -#define REG_HDMI_8x60_PHY_REG1 0x00000004 -#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK 0x000000f0 -#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT 4 -static inline uint32_t HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(uint32_t val) -{ - return ((val) << HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT) & HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK; -} -#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK 0x0000000f -#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT 0 -static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) -{ - return ((val) << HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT) & HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK; -} - -#define REG_HDMI_8x60_PHY_REG2 0x00000008 -#define HDMI_8x60_PHY_REG2_PD_DESER 0x00000001 -#define HDMI_8x60_PHY_REG2_PD_DRIVE_1 0x00000002 -#define HDMI_8x60_PHY_REG2_PD_DRIVE_2 0x00000004 -#define HDMI_8x60_PHY_REG2_PD_DRIVE_3 0x00000008 -#define HDMI_8x60_PHY_REG2_PD_DRIVE_4 0x00000010 -#define HDMI_8x60_PHY_REG2_PD_PLL 0x00000020 -#define HDMI_8x60_PHY_REG2_PD_PWRGEN 0x00000040 -#define HDMI_8x60_PHY_REG2_RCV_SENSE_EN 0x00000080 - -#define REG_HDMI_8x60_PHY_REG3 0x0000000c -#define HDMI_8x60_PHY_REG3_PLL_ENABLE 0x00000001 - -#define REG_HDMI_8x60_PHY_REG4 0x00000010 - -#define REG_HDMI_8x60_PHY_REG5 0x00000014 - -#define REG_HDMI_8x60_PHY_REG6 0x00000018 - -#define REG_HDMI_8x60_PHY_REG7 0x0000001c - -#define REG_HDMI_8x60_PHY_REG8 0x00000020 - -#define REG_HDMI_8x60_PHY_REG9 0x00000024 - -#define REG_HDMI_8x60_PHY_REG10 0x00000028 - -#define REG_HDMI_8x60_PHY_REG11 0x0000002c - -#define REG_HDMI_8x60_PHY_REG12 0x00000030 -#define HDMI_8x60_PHY_REG12_RETIMING_EN 0x00000001 -#define HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN 0x00000002 -#define HDMI_8x60_PHY_REG12_FORCE_LOCK 0x00000010 - -#define REG_HDMI_8960_PHY_REG0 0x00000000 - -#define REG_HDMI_8960_PHY_REG1 0x00000004 - -#define REG_HDMI_8960_PHY_REG2 0x00000008 - -#define REG_HDMI_8960_PHY_REG3 0x0000000c - -#define REG_HDMI_8960_PHY_REG4 0x00000010 - -#define REG_HDMI_8960_PHY_REG5 0x00000014 - -#define REG_HDMI_8960_PHY_REG6 0x00000018 - -#define REG_HDMI_8960_PHY_REG7 0x0000001c - -#define REG_HDMI_8960_PHY_REG8 0x00000020 - -#define REG_HDMI_8960_PHY_REG9 0x00000024 - -#define REG_HDMI_8960_PHY_REG10 0x00000028 - -#define REG_HDMI_8960_PHY_REG11 0x0000002c - -#define REG_HDMI_8960_PHY_REG12 0x00000030 -#define HDMI_8960_PHY_REG12_SW_RESET 0x00000020 -#define HDMI_8960_PHY_REG12_PWRDN_B 0x00000080 - -#define REG_HDMI_8960_PHY_REG_BIST_CFG 0x00000034 - -#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL 0x00000038 - -#define REG_HDMI_8960_PHY_REG_MISC0 0x0000003c - -#define REG_HDMI_8960_PHY_REG13 0x00000040 - -#define REG_HDMI_8960_PHY_REG14 0x00000044 - -#define REG_HDMI_8960_PHY_REG15 0x00000048 - -#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG 0x00000000 - -#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG 0x00000004 - -#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 0x00000008 - -#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 0x0000000c - -#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG 0x00000010 - -#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG 0x00000014 - -#define REG_HDMI_8960_PHY_PLL_PWRDN_B 0x00000018 -#define HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL 0x00000002 -#define HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B 0x00000008 - -#define REG_HDMI_8960_PHY_PLL_SDM_CFG0 0x0000001c - -#define REG_HDMI_8960_PHY_PLL_SDM_CFG1 0x00000020 - -#define REG_HDMI_8960_PHY_PLL_SDM_CFG2 0x00000024 - -#define REG_HDMI_8960_PHY_PLL_SDM_CFG3 0x00000028 - -#define REG_HDMI_8960_PHY_PLL_SDM_CFG4 0x0000002c - -#define REG_HDMI_8960_PHY_PLL_SSC_CFG0 0x00000030 - -#define REG_HDMI_8960_PHY_PLL_SSC_CFG1 0x00000034 - -#define REG_HDMI_8960_PHY_PLL_SSC_CFG2 0x00000038 - -#define REG_HDMI_8960_PHY_PLL_SSC_CFG3 0x0000003c - -#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 0x00000040 - -#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 0x00000044 - -#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 0x00000048 - -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 0x0000004c - -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 0x00000050 - -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 0x00000054 - -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 0x00000058 - -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 0x0000005c - -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 0x00000060 - -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 0x00000064 - -#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 0x00000068 - -#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL 0x0000006c - -#define REG_HDMI_8960_PHY_PLL_MISC0 0x00000070 - -#define REG_HDMI_8960_PHY_PLL_MISC1 0x00000074 - -#define REG_HDMI_8960_PHY_PLL_MISC2 0x00000078 - -#define REG_HDMI_8960_PHY_PLL_MISC3 0x0000007c - -#define REG_HDMI_8960_PHY_PLL_MISC4 0x00000080 - -#define REG_HDMI_8960_PHY_PLL_MISC5 0x00000084 - -#define REG_HDMI_8960_PHY_PLL_MISC6 0x00000088 - -#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0 0x0000008c - -#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1 0x00000090 - -#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2 0x00000094 - -#define REG_HDMI_8960_PHY_PLL_STATUS0 0x00000098 -#define HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK 0x00000001 - -#define REG_HDMI_8960_PHY_PLL_STATUS1 0x0000009c - -#define REG_HDMI_8x74_ANA_CFG0 0x00000000 - -#define REG_HDMI_8x74_ANA_CFG1 0x00000004 - -#define REG_HDMI_8x74_ANA_CFG2 0x00000008 - -#define REG_HDMI_8x74_ANA_CFG3 0x0000000c - -#define REG_HDMI_8x74_PD_CTRL0 0x00000010 - -#define REG_HDMI_8x74_PD_CTRL1 0x00000014 - -#define REG_HDMI_8x74_GLB_CFG 0x00000018 - -#define REG_HDMI_8x74_DCC_CFG0 0x0000001c - -#define REG_HDMI_8x74_DCC_CFG1 0x00000020 - -#define REG_HDMI_8x74_TXCAL_CFG0 0x00000024 - -#define REG_HDMI_8x74_TXCAL_CFG1 0x00000028 - -#define REG_HDMI_8x74_TXCAL_CFG2 0x0000002c - -#define REG_HDMI_8x74_TXCAL_CFG3 0x00000030 - -#define REG_HDMI_8x74_BIST_CFG0 0x00000034 - -#define REG_HDMI_8x74_BIST_PATN0 0x0000003c - -#define REG_HDMI_8x74_BIST_PATN1 0x00000040 - -#define REG_HDMI_8x74_BIST_PATN2 0x00000044 - -#define REG_HDMI_8x74_BIST_PATN3 0x00000048 - -#define REG_HDMI_8x74_STATUS 0x0000005c - -#define REG_HDMI_28nm_PHY_PLL_REFCLK_CFG 0x00000000 - -#define REG_HDMI_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004 - -#define REG_HDMI_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008 - -#define REG_HDMI_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c - -#define REG_HDMI_28nm_PHY_PLL_VREG_CFG 0x00000010 - -#define REG_HDMI_28nm_PHY_PLL_PWRGEN_CFG 0x00000014 - -#define REG_HDMI_28nm_PHY_PLL_DMUX_CFG 0x00000018 - -#define REG_HDMI_28nm_PHY_PLL_AMUX_CFG 0x0000001c - -#define REG_HDMI_28nm_PHY_PLL_GLB_CFG 0x00000020 -#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001 -#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002 -#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004 -#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008 - -#define REG_HDMI_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024 - -#define REG_HDMI_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028 - -#define REG_HDMI_28nm_PHY_PLL_LPFR_CFG 0x0000002c - -#define REG_HDMI_28nm_PHY_PLL_LPFC1_CFG 0x00000030 - -#define REG_HDMI_28nm_PHY_PLL_LPFC2_CFG 0x00000034 - -#define REG_HDMI_28nm_PHY_PLL_SDM_CFG0 0x00000038 - -#define REG_HDMI_28nm_PHY_PLL_SDM_CFG1 0x0000003c - -#define REG_HDMI_28nm_PHY_PLL_SDM_CFG2 0x00000040 - -#define REG_HDMI_28nm_PHY_PLL_SDM_CFG3 0x00000044 - -#define REG_HDMI_28nm_PHY_PLL_SDM_CFG4 0x00000048 - -#define REG_HDMI_28nm_PHY_PLL_SSC_CFG0 0x0000004c - -#define REG_HDMI_28nm_PHY_PLL_SSC_CFG1 0x00000050 - -#define REG_HDMI_28nm_PHY_PLL_SSC_CFG2 0x00000054 - -#define REG_HDMI_28nm_PHY_PLL_SSC_CFG3 0x00000058 - -#define REG_HDMI_28nm_PHY_PLL_LKDET_CFG0 0x0000005c - -#define REG_HDMI_28nm_PHY_PLL_LKDET_CFG1 0x00000060 - -#define REG_HDMI_28nm_PHY_PLL_LKDET_CFG2 0x00000064 - -#define REG_HDMI_28nm_PHY_PLL_TEST_CFG 0x00000068 -#define HDMI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001 - -#define REG_HDMI_28nm_PHY_PLL_CAL_CFG0 0x0000006c - -#define REG_HDMI_28nm_PHY_PLL_CAL_CFG1 0x00000070 - -#define REG_HDMI_28nm_PHY_PLL_CAL_CFG2 0x00000074 - -#define REG_HDMI_28nm_PHY_PLL_CAL_CFG3 0x00000078 - -#define REG_HDMI_28nm_PHY_PLL_CAL_CFG4 0x0000007c - -#define REG_HDMI_28nm_PHY_PLL_CAL_CFG5 0x00000080 - -#define REG_HDMI_28nm_PHY_PLL_CAL_CFG6 0x00000084 - -#define REG_HDMI_28nm_PHY_PLL_CAL_CFG7 0x00000088 - -#define REG_HDMI_28nm_PHY_PLL_CAL_CFG8 0x0000008c - -#define REG_HDMI_28nm_PHY_PLL_CAL_CFG9 0x00000090 - -#define REG_HDMI_28nm_PHY_PLL_CAL_CFG10 0x00000094 - -#define REG_HDMI_28nm_PHY_PLL_CAL_CFG11 0x00000098 - -#define REG_HDMI_28nm_PHY_PLL_EFUSE_CFG 0x0000009c - -#define REG_HDMI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0 - -#define REG_HDMI_28nm_PHY_PLL_STATUS 0x000000c0 - -#define REG_HDMI_8996_PHY_CFG 0x00000000 - -#define REG_HDMI_8996_PHY_PD_CTL 0x00000004 - -#define REG_HDMI_8996_PHY_MODE 0x00000008 - -#define REG_HDMI_8996_PHY_MISR_CLEAR 0x0000000c - -#define REG_HDMI_8996_PHY_TX0_TX1_BIST_CFG0 0x00000010 - -#define REG_HDMI_8996_PHY_TX0_TX1_BIST_CFG1 0x00000014 - -#define REG_HDMI_8996_PHY_TX0_TX1_PRBS_SEED_BYTE0 0x00000018 - -#define REG_HDMI_8996_PHY_TX0_TX1_PRBS_SEED_BYTE1 0x0000001c - -#define REG_HDMI_8996_PHY_TX0_TX1_BIST_PATTERN0 0x00000020 - -#define REG_HDMI_8996_PHY_TX0_TX1_BIST_PATTERN1 0x00000024 - -#define REG_HDMI_8996_PHY_TX2_TX3_BIST_CFG0 0x00000028 - -#define REG_HDMI_8996_PHY_TX2_TX3_BIST_CFG1 0x0000002c - -#define REG_HDMI_8996_PHY_TX2_TX3_PRBS_SEED_BYTE0 0x00000030 - -#define REG_HDMI_8996_PHY_TX2_TX3_PRBS_SEED_BYTE1 0x00000034 - -#define REG_HDMI_8996_PHY_TX2_TX3_BIST_PATTERN0 0x00000038 - -#define REG_HDMI_8996_PHY_TX2_TX3_BIST_PATTERN1 0x0000003c - -#define REG_HDMI_8996_PHY_DEBUG_BUS_SEL 0x00000040 - -#define REG_HDMI_8996_PHY_TXCAL_CFG0 0x00000044 - -#define REG_HDMI_8996_PHY_TXCAL_CFG1 0x00000048 - -#define REG_HDMI_8996_PHY_TX0_TX1_LANE_CTL 0x0000004c - -#define REG_HDMI_8996_PHY_TX2_TX3_LANE_CTL 0x00000050 - -#define REG_HDMI_8996_PHY_LANE_BIST_CONFIG 0x00000054 - -#define REG_HDMI_8996_PHY_CLOCK 0x00000058 - -#define REG_HDMI_8996_PHY_MISC1 0x0000005c - -#define REG_HDMI_8996_PHY_MISC2 0x00000060 - -#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS0 0x00000064 - -#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS1 0x00000068 - -#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS2 0x0000006c - -#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS0 0x00000070 - -#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS1 0x00000074 - -#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS2 0x00000078 - -#define REG_HDMI_8996_PHY_PRE_MISR_STATUS0 0x0000007c - -#define REG_HDMI_8996_PHY_PRE_MISR_STATUS1 0x00000080 - -#define REG_HDMI_8996_PHY_PRE_MISR_STATUS2 0x00000084 - -#define REG_HDMI_8996_PHY_PRE_MISR_STATUS3 0x00000088 - -#define REG_HDMI_8996_PHY_POST_MISR_STATUS0 0x0000008c - -#define REG_HDMI_8996_PHY_POST_MISR_STATUS1 0x00000090 - -#define REG_HDMI_8996_PHY_POST_MISR_STATUS2 0x00000094 - -#define REG_HDMI_8996_PHY_POST_MISR_STATUS3 0x00000098 - -#define REG_HDMI_8996_PHY_STATUS 0x0000009c - -#define REG_HDMI_8996_PHY_MISC3_STATUS 0x000000a0 - -#define REG_HDMI_8996_PHY_MISC4_STATUS 0x000000a4 - -#define REG_HDMI_8996_PHY_DEBUG_BUS0 0x000000a8 - -#define REG_HDMI_8996_PHY_DEBUG_BUS1 0x000000ac - -#define REG_HDMI_8996_PHY_DEBUG_BUS2 0x000000b0 - -#define REG_HDMI_8996_PHY_DEBUG_BUS3 0x000000b4 - -#define REG_HDMI_8996_PHY_PHY_REVISION_ID0 0x000000b8 - -#define REG_HDMI_8996_PHY_PHY_REVISION_ID1 0x000000bc - -#define REG_HDMI_8996_PHY_PHY_REVISION_ID2 0x000000c0 - -#define REG_HDMI_8996_PHY_PHY_REVISION_ID3 0x000000c4 - -#define REG_HDMI_PHY_QSERDES_COM_ATB_SEL1 0x00000000 - -#define REG_HDMI_PHY_QSERDES_COM_ATB_SEL2 0x00000004 - -#define REG_HDMI_PHY_QSERDES_COM_FREQ_UPDATE 0x00000008 - -#define REG_HDMI_PHY_QSERDES_COM_BG_TIMER 0x0000000c - -#define REG_HDMI_PHY_QSERDES_COM_SSC_EN_CENTER 0x00000010 - -#define REG_HDMI_PHY_QSERDES_COM_SSC_ADJ_PER1 0x00000014 - -#define REG_HDMI_PHY_QSERDES_COM_SSC_ADJ_PER2 0x00000018 - -#define REG_HDMI_PHY_QSERDES_COM_SSC_PER1 0x0000001c - -#define REG_HDMI_PHY_QSERDES_COM_SSC_PER2 0x00000020 - -#define REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE1 0x00000024 - -#define REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE2 0x00000028 - -#define REG_HDMI_PHY_QSERDES_COM_POST_DIV 0x0000002c - -#define REG_HDMI_PHY_QSERDES_COM_POST_DIV_MUX 0x00000030 - -#define REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x00000034 - -#define REG_HDMI_PHY_QSERDES_COM_CLK_ENABLE1 0x00000038 - -#define REG_HDMI_PHY_QSERDES_COM_SYS_CLK_CTRL 0x0000003c - -#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE 0x00000040 - -#define REG_HDMI_PHY_QSERDES_COM_PLL_EN 0x00000044 - -#define REG_HDMI_PHY_QSERDES_COM_PLL_IVCO 0x00000048 - -#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0 0x0000004c - -#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0 0x00000050 - -#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0 0x00000054 - -#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE1 0x00000058 - -#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE1 0x0000005c - -#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE1 0x00000060 - -#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE2 0x00000064 - -#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD0 0x00000064 - -#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE2 0x00000068 - -#define REG_HDMI_PHY_QSERDES_COM_EP_CLOCK_DETECT_CTRL 0x00000068 - -#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE2 0x0000006c - -#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_DET_COMP_STATUS 0x0000006c - -#define REG_HDMI_PHY_QSERDES_COM_BG_TRIM 0x00000070 - -#define REG_HDMI_PHY_QSERDES_COM_CLK_EP_DIV 0x00000074 - -#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE0 0x00000078 - -#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE1 0x0000007c - -#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE2 0x00000080 - -#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD1 0x00000080 - -#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE0 0x00000084 - -#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE1 0x00000088 - -#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE2 0x0000008c - -#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD2 0x0000008c - -#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE0 0x00000090 - -#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE1 0x00000094 - -#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE2 0x00000098 - -#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD3 0x00000098 - -#define REG_HDMI_PHY_QSERDES_COM_PLL_CNTRL 0x0000009c - -#define REG_HDMI_PHY_QSERDES_COM_PHASE_SEL_CTRL 0x000000a0 - -#define REG_HDMI_PHY_QSERDES_COM_PHASE_SEL_DC 0x000000a4 - -#define REG_HDMI_PHY_QSERDES_COM_CORE_CLK_IN_SYNC_SEL 0x000000a8 - -#define REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x000000a8 - -#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_EN_SEL 0x000000ac - -#define REG_HDMI_PHY_QSERDES_COM_CML_SYSCLK_SEL 0x000000b0 - -#define REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL 0x000000b4 - -#define REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL2 0x000000b8 - -#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CTRL 0x000000bc - -#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CTRL2 0x000000c0 - -#define REG_HDMI_PHY_QSERDES_COM_RESCODE_DIV_NUM 0x000000c4 - -#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_EN 0x000000c8 - -#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_CFG 0x000000cc - -#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE0 0x000000d0 - -#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE1 0x000000d4 - -#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE2 0x000000d8 - -#define REG_HDMI_PHY_QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x000000d8 - -#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0 0x000000dc - -#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0 0x000000e0 - -#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0 0x000000e4 - -#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE1 0x000000e8 - -#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE1 0x000000ec - -#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE1 0x000000f0 - -#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE2 0x000000f4 - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MINVAL1 0x000000f4 - -#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE2 0x000000f8 - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MINVAL2 0x000000f8 - -#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE2 0x000000fc - -#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD4 0x000000fc - -#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_INITVAL 0x00000100 - -#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_EN 0x00000104 - -#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x00000108 - -#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x0000010c - -#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x00000110 - -#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x00000114 - -#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE2 0x00000118 - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAXVAL1 0x00000118 - -#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE2 0x0000011c - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAXVAL2 0x0000011c - -#define REG_HDMI_PHY_QSERDES_COM_RES_TRIM_CONTROL2 0x00000120 - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_CTRL 0x00000124 - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAP 0x00000128 - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE0 0x0000012c - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE0 0x00000130 - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE1 0x00000134 - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE1 0x00000138 - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE2 0x0000013c - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_INITVAL1 0x0000013c - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE2 0x00000140 - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_INITVAL2 0x00000140 - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_TIMER1 0x00000144 - -#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_TIMER2 0x00000148 - -#define REG_HDMI_PHY_QSERDES_COM_SAR 0x0000014c - -#define REG_HDMI_PHY_QSERDES_COM_SAR_CLK 0x00000150 - -#define REG_HDMI_PHY_QSERDES_COM_SAR_CODE_OUT_STATUS 0x00000154 - -#define REG_HDMI_PHY_QSERDES_COM_SAR_CODE_READY_STATUS 0x00000158 - -#define REG_HDMI_PHY_QSERDES_COM_CMN_STATUS 0x0000015c - -#define REG_HDMI_PHY_QSERDES_COM_RESET_SM_STATUS 0x00000160 - -#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CODE_STATUS 0x00000164 - -#define REG_HDMI_PHY_QSERDES_COM_PLLCAL_CODE1_STATUS 0x00000168 - -#define REG_HDMI_PHY_QSERDES_COM_PLLCAL_CODE2_STATUS 0x0000016c - -#define REG_HDMI_PHY_QSERDES_COM_BG_CTRL 0x00000170 - -#define REG_HDMI_PHY_QSERDES_COM_CLK_SELECT 0x00000174 - -#define REG_HDMI_PHY_QSERDES_COM_HSCLK_SEL 0x00000178 - -#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_BINCODE_STATUS 0x0000017c - -#define REG_HDMI_PHY_QSERDES_COM_PLL_ANALOG 0x00000180 - -#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV 0x00000184 - -#define REG_HDMI_PHY_QSERDES_COM_SW_RESET 0x00000188 - -#define REG_HDMI_PHY_QSERDES_COM_CORE_CLK_EN 0x0000018c - -#define REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS 0x00000190 - -#define REG_HDMI_PHY_QSERDES_COM_CMN_CONFIG 0x00000194 - -#define REG_HDMI_PHY_QSERDES_COM_CMN_RATE_OVERRIDE 0x00000198 - -#define REG_HDMI_PHY_QSERDES_COM_SVS_MODE_CLK_SEL 0x0000019c - -#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS0 0x000001a0 - -#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS1 0x000001a4 - -#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS2 0x000001a8 - -#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS3 0x000001ac - -#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS_SEL 0x000001b0 - -#define REG_HDMI_PHY_QSERDES_COM_CMN_MISC1 0x000001b4 - -#define REG_HDMI_PHY_QSERDES_COM_CMN_MISC2 0x000001b8 - -#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV_MODE1 0x000001bc - -#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV_MODE2 0x000001c0 - -#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD5 0x000001c4 - -#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_MODE_LANENO 0x00000000 - -#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_INVERT 0x00000004 - -#define REG_HDMI_PHY_QSERDES_TX_LX_CLKBUF_ENABLE 0x00000008 - -#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_ONE 0x0000000c - -#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_TWO 0x00000010 - -#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_THREE 0x00000014 - -#define REG_HDMI_PHY_QSERDES_TX_LX_TX_EMP_POST1_LVL 0x00000018 - -#define REG_HDMI_PHY_QSERDES_TX_LX_TX_POST2_EMPH 0x0000001c - -#define REG_HDMI_PHY_QSERDES_TX_LX_TX_BOOST_LVL_UP_DN 0x00000020 - -#define REG_HDMI_PHY_QSERDES_TX_LX_HP_PD_ENABLES 0x00000024 - -#define REG_HDMI_PHY_QSERDES_TX_LX_TX_IDLE_LVL_LARGE_AMP 0x00000028 - -#define REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL 0x0000002c - -#define REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL_OFFSET 0x00000030 - -#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_TSYNC_EN 0x00000034 - -#define REG_HDMI_PHY_QSERDES_TX_LX_PRE_STALL_LDO_BOOST_EN 0x00000038 - -#define REG_HDMI_PHY_QSERDES_TX_LX_TX_BAND 0x0000003c - -#define REG_HDMI_PHY_QSERDES_TX_LX_SLEW_CNTL 0x00000040 - -#define REG_HDMI_PHY_QSERDES_TX_LX_INTERFACE_SELECT 0x00000044 - -#define REG_HDMI_PHY_QSERDES_TX_LX_LPB_EN 0x00000048 - -#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_TX 0x0000004c - -#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_RX 0x00000050 - -#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_OFFSET 0x00000054 - -#define REG_HDMI_PHY_QSERDES_TX_LX_PERL_LENGTH1 0x00000058 - -#define REG_HDMI_PHY_QSERDES_TX_LX_PERL_LENGTH2 0x0000005c - -#define REG_HDMI_PHY_QSERDES_TX_LX_SERDES_BYP_EN_OUT 0x00000060 - -#define REG_HDMI_PHY_QSERDES_TX_LX_DEBUG_BUS_SEL 0x00000064 - -#define REG_HDMI_PHY_QSERDES_TX_LX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x00000068 - -#define REG_HDMI_PHY_QSERDES_TX_LX_TX_POL_INV 0x0000006c - -#define REG_HDMI_PHY_QSERDES_TX_LX_PARRATE_REC_DETECT_IDLE_EN 0x00000070 - -#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN1 0x00000074 - -#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN2 0x00000078 - -#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN3 0x0000007c - -#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN4 0x00000080 - -#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN5 0x00000084 - -#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN6 0x00000088 - -#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN7 0x0000008c - -#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN8 0x00000090 - -#define REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE 0x00000094 - -#define REG_HDMI_PHY_QSERDES_TX_LX_IDAC_CAL_LANE_MODE 0x00000098 - -#define REG_HDMI_PHY_QSERDES_TX_LX_IDAC_CAL_LANE_MODE_CONFIGURATION 0x0000009c - -#define REG_HDMI_PHY_QSERDES_TX_LX_ATB_SEL1 0x000000a0 - -#define REG_HDMI_PHY_QSERDES_TX_LX_ATB_SEL2 0x000000a4 - -#define REG_HDMI_PHY_QSERDES_TX_LX_RCV_DETECT_LVL 0x000000a8 - -#define REG_HDMI_PHY_QSERDES_TX_LX_RCV_DETECT_LVL_2 0x000000ac - -#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED1 0x000000b0 - -#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED2 0x000000b4 - -#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED3 0x000000b8 - -#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED4 0x000000bc - -#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_GEN 0x000000c0 - -#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_GEN_MUXES 0x000000c4 - -#define REG_HDMI_PHY_QSERDES_TX_LX_TRAN_DRVR_EMP_EN 0x000000c8 - -#define REG_HDMI_PHY_QSERDES_TX_LX_TX_INTERFACE_MODE 0x000000cc - -#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_CTRL 0x000000d0 - -#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_ENCODED_OR_DATA 0x000000d4 - -#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_1_DIVIDER_BAND2 0x000000d8 - -#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_2_DIVIDER_BAND2 0x000000dc - -#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_3_DIVIDER_BAND2 0x000000e0 - -#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_4_DIVIDER_BAND2 0x000000e4 - -#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_1_DIVIDER_BAND0_1 0x000000e8 - -#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_2_DIVIDER_BAND0_1 0x000000ec - -#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_3_DIVIDER_BAND0_1 0x000000f0 - -#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_4_DIVIDER_BAND0_1 0x000000f4 - -#define REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL1 0x000000f8 - -#define REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL2 0x000000fc - -#define REG_HDMI_PHY_QSERDES_TX_LX_TX_ALOG_INTF_OBSV_CNTL 0x00000100 - -#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_STATUS 0x00000104 - -#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_ERROR_COUNT1 0x00000108 - -#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_ERROR_COUNT2 0x0000010c - -#define REG_HDMI_PHY_QSERDES_TX_LX_TX_ALOG_INTF_OBSV 0x00000110 - - -#endif /* HDMI_XML */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c index 4dd0554166..8c8d80b595 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c @@ -86,18 +86,18 @@ static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8996 *pll) static inline void hdmi_pll_write(struct hdmi_pll_8996 *pll, int offset, u32 data) { - msm_writel(data, pll->mmio_qserdes_com + offset); + writel(data, pll->mmio_qserdes_com + offset); } static inline u32 hdmi_pll_read(struct hdmi_pll_8996 *pll, int offset) { - return msm_readl(pll->mmio_qserdes_com + offset); + return readl(pll->mmio_qserdes_com + offset); } static inline void hdmi_tx_chan_write(struct hdmi_pll_8996 *pll, int channel, int offset, int data) { - msm_writel(data, pll->mmio_qserdes_tx[channel] + offset); + writel(data, pll->mmio_qserdes_tx[channel] + offset); } static inline u32 pll_get_cpctrl(u64 frac_start, unsigned long ref_clk, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c index cb35a297af..83c8781fcc 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c @@ -236,12 +236,12 @@ static const struct pll_rate freqtbl[] = { static inline void pll_write(struct hdmi_pll_8960 *pll, u32 reg, u32 data) { - msm_writel(data, pll->mmio + reg); + writel(data, pll->mmio + reg); } static inline u32 pll_read(struct hdmi_pll_8960 *pll, u32 reg) { - return msm_readl(pll->mmio + reg); + return readl(pll->mmio + reg); } static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8960 *pll) diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h deleted file mode 100644 index 4988015266..0000000000 --- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h +++ /dev/null @@ -1,61 +0,0 @@ -#ifndef QFPROM_XML -#define QFPROM_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) - -Copyright (C) 2013-2022 by the following authors: -- Rob Clark (robclark) -- Ilia Mirkin (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -#define REG_QFPROM_CONFIG_ROW0_LSB 0x00000238 -#define QFPROM_CONFIG_ROW0_LSB_HDMI_DISABLE 0x00200000 -#define QFPROM_CONFIG_ROW0_LSB_HDCP_DISABLE 0x00400000 - - -#endif /* QFPROM_XML */ diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 97790faffd..9c33f4e3f8 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -17,8 +17,9 @@ #include "msm_drv.h" #include "msm_debugfs.h" +#include "msm_gem.h" +#include "msm_gpu.h" #include "msm_kms.h" -#include "adreno/adreno_gpu.h" /* * MSM driver version: diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 65f2136604..912ebaa5df 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -239,9 +239,7 @@ struct msm_drm_private { bool disable_err_irq; }; -struct msm_format { - uint32_t pixel_format; -}; +const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier); struct msm_pending_timer; @@ -488,15 +486,12 @@ void __iomem *msm_ioremap_mdss(struct platform_device *mdss_pdev, struct icc_path *msm_icc_get(struct device *dev, const char *name); -#define msm_writel(data, addr) writel((data), (addr)) -#define msm_readl(addr) readl((addr)) - static inline void msm_rmw(void __iomem *addr, u32 mask, u32 or) { - u32 val = msm_readl(addr); + u32 val = readl(addr); val &= ~mask; - msm_writel(val | or, addr); + writel(val | or, addr); } /** diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 80166f702a..09268e4168 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -176,16 +176,16 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, const struct msm_format *format; int ret, i, n; - drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)\n", - mode_cmd, mode_cmd->width, mode_cmd->height, - (char *)&mode_cmd->pixel_format); + drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%p4cc)\n", + mode_cmd, mode_cmd->width, mode_cmd->height, + &mode_cmd->pixel_format); n = info->num_planes; - format = kms->funcs->get_format(kms, mode_cmd->pixel_format, + format = mdp_get_format(kms, mode_cmd->pixel_format, mode_cmd->modifier[0]); if (!format) { - DRM_DEV_ERROR(dev->dev, "unsupported pixel format: %4.4s\n", - (char *)&mode_cmd->pixel_format); + DRM_DEV_ERROR(dev->dev, "unsupported pixel format: %p4cc\n", + &mode_cmd->pixel_format); ret = -EINVAL; goto fail; } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 175ee4ab8a..a5c6498a43 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -219,7 +219,7 @@ static void put_pages(struct drm_gem_object *obj) } } -static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj, +static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv) { struct msm_gem_object *msm_obj = to_msm_bo(obj); @@ -257,24 +257,24 @@ static void pin_obj_locked(struct drm_gem_object *obj) mutex_unlock(&priv->lru.lock); } -struct page **msm_gem_pin_pages(struct drm_gem_object *obj) +struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj) { struct page **p; - msm_gem_lock(obj); - p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED); + msm_gem_assert_locked(obj); + + p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED); if (!IS_ERR(p)) pin_obj_locked(obj); - msm_gem_unlock(obj); return p; } -void msm_gem_unpin_pages(struct drm_gem_object *obj) +void msm_gem_unpin_pages_locked(struct drm_gem_object *obj) { - msm_gem_lock(obj); + msm_gem_assert_locked(obj); + msm_gem_unpin_locked(obj); - msm_gem_unlock(obj); } static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot) @@ -489,7 +489,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) msm_gem_assert_locked(obj); - pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED); + pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED); if (IS_ERR(pages)) return PTR_ERR(pages); @@ -703,7 +703,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) if (obj->import_attach) return ERR_PTR(-ENODEV); - pages = msm_gem_pin_pages_locked(obj, madv); + pages = msm_gem_get_pages_locked(obj, madv); if (IS_ERR(pages)) return ERR_CAST(pages); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 8d414b072c..85f0257e83 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -140,8 +140,8 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, void msm_gem_unpin_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace); void msm_gem_pin_obj_locked(struct drm_gem_object *obj); -struct page **msm_gem_pin_pages(struct drm_gem_object *obj); -void msm_gem_unpin_pages(struct drm_gem_object *obj); +struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj); +void msm_gem_unpin_pages_locked(struct drm_gem_object *obj); int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 0915f3b687..ee267490c9 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -47,13 +47,23 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, int msm_gem_prime_pin(struct drm_gem_object *obj) { - if (!obj->import_attach) - msm_gem_pin_pages(obj); - return 0; + struct page **pages; + int ret = 0; + + if (obj->import_attach) + return 0; + + pages = msm_gem_pin_pages_locked(obj); + if (IS_ERR(pages)) + ret = PTR_ERR(pages); + + return ret; } void msm_gem_prime_unpin(struct drm_gem_object *obj) { - if (!obj->import_attach) - msm_gem_unpin_pages(obj); + if (obj->import_attach) + return; + + msm_gem_unpin_pages_locked(obj); } diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 655002b21b..cd185b9636 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -11,7 +11,7 @@ #include "msm_mmu.h" #include "msm_fence.h" #include "msm_gpu_trace.h" -#include "adreno/adreno_gpu.h" +//#include "adreno/adreno_gpu.h" #include #include diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 2bfcb222e3..a0c1bd6d1d 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -555,12 +555,12 @@ struct msm_gpu_state { static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) { - msm_writel(data, gpu->mmio + (reg << 2)); + writel(data, gpu->mmio + (reg << 2)); } static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg) { - return msm_readl(gpu->mmio + (reg << 2)); + return readl(gpu->mmio + (reg << 2)); } static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or) @@ -586,8 +586,8 @@ static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg) * when the lo is read, so make sure to read the lo first to trigger * that */ - val = (u64) msm_readl(gpu->mmio + (reg << 2)); - val |= ((u64) msm_readl(gpu->mmio + ((reg + 1) << 2)) << 32); + val = (u64) readl(gpu->mmio + (reg << 2)); + val |= ((u64) readl(gpu->mmio + ((reg + 1) << 2)) << 32); return val; } @@ -595,8 +595,8 @@ static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg) static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val) { /* Why not a writeq here? Read the screed above */ - msm_writel(lower_32_bits(val), gpu->mmio + (reg << 2)); - msm_writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2)); + writel(lower_32_bits(val), gpu->mmio + (reg << 2)); + writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2)); } int msm_gpu_pm_suspend(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c deleted file mode 100644 index f7d1945e0c..0000000000 --- a/drivers/gpu/drm/msm/msm_gpummu.c +++ /dev/null @@ -1,121 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */ - -#include - -#include "msm_drv.h" -#include "msm_mmu.h" -#include "adreno/adreno_gpu.h" -#include "adreno/a2xx.xml.h" - -struct msm_gpummu { - struct msm_mmu base; - struct msm_gpu *gpu; - dma_addr_t pt_base; - uint32_t *table; -}; -#define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base) - -#define GPUMMU_VA_START SZ_16M -#define GPUMMU_VA_RANGE (0xfff * SZ_64K) -#define GPUMMU_PAGE_SIZE SZ_4K -#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE) - -static void msm_gpummu_detach(struct msm_mmu *mmu) -{ -} - -static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova, - struct sg_table *sgt, size_t len, int prot) -{ - struct msm_gpummu *gpummu = to_msm_gpummu(mmu); - unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; - struct sg_dma_page_iter dma_iter; - unsigned prot_bits = 0; - - if (prot & IOMMU_WRITE) - prot_bits |= 1; - if (prot & IOMMU_READ) - prot_bits |= 2; - - for_each_sgtable_dma_page(sgt, &dma_iter, 0) { - dma_addr_t addr = sg_page_iter_dma_address(&dma_iter); - int i; - - for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE) - gpummu->table[idx++] = (addr + i) | prot_bits; - } - - /* we can improve by deferring flush for multiple map() */ - gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE, - A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL | - A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC); - return 0; -} - -static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) -{ - struct msm_gpummu *gpummu = to_msm_gpummu(mmu); - unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; - unsigned i; - - for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++) - gpummu->table[idx] = 0; - - gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE, - A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL | - A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC); - return 0; -} - -static void msm_gpummu_resume_translation(struct msm_mmu *mmu) -{ -} - -static void msm_gpummu_destroy(struct msm_mmu *mmu) -{ - struct msm_gpummu *gpummu = to_msm_gpummu(mmu); - - dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base, - DMA_ATTR_FORCE_CONTIGUOUS); - - kfree(gpummu); -} - -static const struct msm_mmu_funcs funcs = { - .detach = msm_gpummu_detach, - .map = msm_gpummu_map, - .unmap = msm_gpummu_unmap, - .destroy = msm_gpummu_destroy, - .resume_translation = msm_gpummu_resume_translation, -}; - -struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu) -{ - struct msm_gpummu *gpummu; - - gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL); - if (!gpummu) - return ERR_PTR(-ENOMEM); - - gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base, - GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS); - if (!gpummu->table) { - kfree(gpummu); - return ERR_PTR(-ENOMEM); - } - - gpummu->gpu = gpu; - msm_mmu_init(&gpummu->base, dev, &funcs, MSM_MMU_GPUMMU); - - return &gpummu->base; -} - -void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base, - dma_addr_t *tran_error) -{ - dma_addr_t base = to_msm_gpummu(mmu)->pt_base; - - *pt_base = base; - *tran_error = base + TABLE_SIZE; /* 32-byte aligned */ -} diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 0641f6111b..1e0c54de37 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -92,10 +92,6 @@ struct msm_kms_funcs { * Format handling: */ - /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */ - const struct msm_format *(*get_format)(struct msm_kms *kms, - const uint32_t format, - const uint64_t modifiers); /* do format checking on format modified through fb_cmd2 modifiers */ int (*check_modified_format)(const struct msm_kms *kms, const struct msm_format *msm_fmt, diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index eb72d3645c..88af4f4908 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -42,7 +42,6 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks); struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks); -struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu); static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, int (*handler)(void *arg, unsigned long iova, int flags, void *data)) @@ -53,10 +52,6 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent); -void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base, - dma_addr_t *tran_error); - - int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr, int *asid); struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu); diff --git a/drivers/gpu/drm/msm/registers/.gitignore b/drivers/gpu/drm/msm/registers/.gitignore new file mode 100644 index 0000000000..848e0e3efb --- /dev/null +++ b/drivers/gpu/drm/msm/registers/.gitignore @@ -0,0 +1,4 @@ +# ignore XML files present at Mesa but not used by the kernel +adreno/adreno_control_regs.xml +adreno/adreno_pipe_regs.xml +adreno/ocmem.xml diff --git a/drivers/gpu/drm/msm/registers/adreno/a2xx.xml b/drivers/gpu/drm/msm/registers/adreno/a2xx.xml new file mode 100644 index 0000000000..22caddaa0d --- /dev/null +++ b/drivers/gpu/drm/msm/registers/adreno/a2xx.xml @@ -0,0 +1,1865 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + note: only 0x3f worth of valid register values for VS_REGS and + PS_REGS, but high bit is set to indicate '0 registers used': + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Texture state dwords + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/adreno/a3xx.xml b/drivers/gpu/drm/msm/registers/adreno/a3xx.xml new file mode 100644 index 0000000000..6717abc0a8 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/adreno/a3xx.xml @@ -0,0 +1,1751 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + The pair of MEM_SIZE/ADDR registers get programmed + in sequence with the size/addr of each buffer. + + + + + + + + + + + + + + + + + aka clip_halfz + + + + + + + + + + + + + + + + + + + + + + + + + + range of -8.0 to 8.0 + + + range of -512.0 to 512.0 + + + + + + + + + + + + + + + + + + + + + + + + + + RENDER_MODE is RB_RESOLVE_PASS for gmem->mem, otherwise RB_RENDER_PASS + + + + render targets - 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Pitch (actually, appears to be pitch in bytes, so really is a stride) + in GMEM, so pitch of the current tile. + + + + + offset into GMEM (or system memory address in bypass mode) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + actually, appears to be pitch in bytes, so really is a stride + + + + + + + + + + + + + + + + + + + + Z_READ_ENABLE bit is set for zfunc other than GL_ALWAYS or GL_NEVER + + + + seems to be always set to 0x00000000 + + + + + DEPTH_BASE is offset in GMEM to depth/stencil buffer, ie + bin_w * bin_h / 1024 (possible rounded up to multiple of + something?? ie. 39 becomes 40, 78 becomes 80.. 75 becomes + 80.. so maybe it needs to be multiple of 8?? + + + + + + Pitch of depth buffer or combined depth+stencil buffer + in z24s8 cases. + + + + + + + + + + + + + + + + + + seems to be always set to 0x00000000 + + + Base address for stencil when not using interleaved depth/stencil + + + + pitch of stencil buffer when not using interleaved depth/stencil + + + + + + seems to be set to 0x00000002 during binning pass + + + + X/Y offset of current bin + + + + + + + + + + + + + + + seems to be where firmware writes BIN_DATA_ADDR from + CP_SET_BIN_DATA packet.. probably should be called + PC_BIN_BASE (just using name from yamato for now) + + + + probably should be PC_BIN_SIZE + + + SIZE is current pipe width * height (in tiles) + + + N is some sort of slot # between 0..(SIZE-1). In case + multiple tiles use same pipe, each tile gets unique slot # + + + + + + + STRIDE_IN_VPC: ALIGN(next_outloc - 8, 4) / 4 + (but, in cases where you'd expect 1, the blob driver uses + 2, so possibly 0 (no varying) or minimum of 2) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + indexed by dimension + + + + + + + + indexed by dimension, global_size / local_size + + + + + + + + + + TOTALATTRTOVS is # of attributes to vertex shader, in register + slots (ie. vec4+vec3 -> 7) + + + + STRMDECINSTRCNT is # of VFD_DECODE_INSTR registers valid + + STRMFETCHINSTRCNT is # of VFD_FETCH_INSTR registers valid + + + + MAXSTORAGE could be # of attributes/vbo's + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SHIFTCNT appears to be size, ie. FLOAT_32_32_32 is 12, and BYTE_8 is 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From register spec: + SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET [16:24]: Constant object + start offset in on chip RAM, + 128bit aligned + + + + + + + + + + + + + + + + + + + + + + The full/half register footprint is in units of four components, + so if r0.x is used, that counts as all of r0.[xyzw] as used. + There are separate full/half register footprint values as the + full and half registers are independent (not overlapping). + Presumably the thread scheduler hardware allocates the full/half + register names from the actual physical register file and + handles the register renaming. + + + + + + + From regspec: + SP_FS_CTRL_REG0.FS_LENGTH [31:24]: FS length, unit = 256bits. + If bit31 is 1, it means overflow + or any long shader. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + These seem to be offsets for storage of the varyings. + Always seems to start from 8, possibly loc 0 and 4 + are for gl_Position and gl_PointSize? + + + + + + + + + + SP_VS_OBJ_START_REG contains pointer to the vertex shader program, + immediately followed by the binning shader program (although I + guess that is probably just re-using the same gpu buffer) + + + + + The size of memory that ldp/stp can address, in 128 byte increments. + + + + + + + + + + + + + + + + + + The full/half register footprint is in units of four components, + so if r0.x is used, that counts as all of r0.[xyzw] as used. + There are separate full/half register footprint values as the + full and half registers are independent (not overlapping). + Presumably the thread scheduler hardware allocates the full/half + register names from the actual physical register file and + handles the register renaming. + + + + + + + + + + + + From regspec: + SP_FS_CTRL_REG0.FS_LENGTH [31:24]: FS length, unit = 256bits. + If bit31 is 1, it means overflow + or any long shader. + + + + + + + + + + + SP_FS_OBJ_START_REG contains pointer to fragment shader program + + + + + + + + + + + + + seems to be one bit per scalar, '1' for flat, '0' for smooth + + + seems to be one bit per scalar, '1' for flat, '0' for smooth + + + + render targets - 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Configures the mapping between VSC_PIPE buffer and + bin, X/Y specify the bin index in the horiz/vert + direction (0,0 is upper left, 0,1 is leftmost bin + on second row, and so on). W/H specify the number + of bins assigned to this VSC_PIPE in the horiz/vert + dimension. + + + + + + + + + + + seems to be set to 0x00000001 during binning pass + + + + seems to be always set to 0x00000001 + + + + + + + seems to be always set to 0x00000001 + + + + + + + + + + + + + + + + + + + + + + + + + + + + seems to be always set to 0x00000001 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + seems to be always set to 0x00000003 + + + seems to be always set to 0x00000001 + + + + + + + + + + + + + + + + + + + Texture sampler dwords + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Texture constant dwords + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + INDX is index of texture address(es) in MIPMAP state block + + Pitch in bytes (so actually stride) + + SWAP bit is set for BGRA instead of RGBA + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/adreno/a4xx.xml b/drivers/gpu/drm/msm/registers/adreno/a4xx.xml new file mode 100644 index 0000000000..69a9f9b02b --- /dev/null +++ b/drivers/gpu/drm/msm/registers/adreno/a4xx.xml @@ -0,0 +1,2409 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Pitch (actually, appears to be pitch in bytes, so really is a stride) + in GMEM, so pitch of the current tile. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + actually, appears to be pitch in bytes, so really is a stride + + + + + + + + + + + + + + + + + + + + + + + + + + Z_READ_ENABLE bit is set for zfunc other than GL_ALWAYS or GL_NEVER + + + + + + + DEPTH_BASE is offset in GMEM to depth/stencil buffer, ie + bin_w * bin_h / 1024 (possible rounded up to multiple of + something?? ie. 39 becomes 40, 78 becomes 80.. 75 becomes + 80.. so maybe it needs to be multiple of 8?? + + + + + stride of depth/stencil buffer + + + ??? + + + + + + + + + + + + + + + + + + + + + + Base address for stencil when not using interleaved depth/stencil + + + + pitch of stencil buffer when not using interleaved depth/stencil + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + The full/half register footprint is in units of four components, + so if r0.x is used, that counts as all of r0.[xyzw] as used. + There are separate full/half register footprint values as the + full and half registers are independent (not overlapping). + Presumably the thread scheduler hardware allocates the full/half + register names from the actual physical register file and + handles the register renaming. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + These seem to be offsets for storage of the varyings. + Always seems to start from 8, possibly loc 0 and 4 + are for gl_Position and gl_PointSize? + + + + + + + + + + + + From register spec: + SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET [16:24]: Constant object + start offset in on chip RAM, + 128bit aligned + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + These seem to be offsets for storage of the varyings. + Always seems to start from 8, possibly loc 0 and 4 + are for gl_Position and gl_PointSize? + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + These seem to be offsets for storage of the varyings. + Always seems to start from 8, possibly loc 0 and 4 + are for gl_Position and gl_PointSize? + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Configures the mapping between VSC_PIPE buffer and + bin, X/Y specify the bin index in the horiz/vert + direction (0,0 is upper left, 0,1 is leftmost bin + on second row, and so on). W/H specify the number + of bins assigned to this VSC_PIPE in the horiz/vert + dimension. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + TOTALATTRTOVS is # of attributes to vertex shader, in register + slots (ie. vec4+vec3 -> 7) + + + + BYPASSATTROVS seems to count varyings that are just directly + assigned from attributes (ie, "vFoo = aFoo;") + + + STRMDECINSTRCNT is # of VFD_DECODE_INSTR registers valid + + STRMFETCHINSTRCNT is # of VFD_FETCH_INSTR registers valid + + + + MAXSTORAGE could be # of attributes/vbo's + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SHIFTCNT appears to be size, ie. FLOAT_32_32_32 is 12, and BYTE_8 is 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SIZE is current pipe width * height (in tiles) + + + N is some sort of slot # between 0..(SIZE-1). In case + multiple tiles use same pipe, each tile gets unique slot # + + + + + + + in groups of 4x vec4, blob only uses values + 0, 1, 2, 4, 6, 8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Texture sampler dwords + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Texture constant dwords + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Pitch in bytes (so actually stride) + + + + + + + + + + + + + + + + + + + + + + + Pitch in bytes (so actually stride) + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/adreno/a5xx.xml b/drivers/gpu/drm/msm/registers/adreno/a5xx.xml new file mode 100644 index 0000000000..bd8df59451 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/adreno/a5xx.xml @@ -0,0 +1,3039 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Configures the mapping between VSC_PIPE buffer and + bin, X/Y specify the bin index in the horiz/vert + direction (0,0 is upper left, 0,1 is leftmost bin + on second row, and so on). W/H specify the number + of bins assigned to this VSC_PIPE in the horiz/vert + dimension. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + LRZ: (Low Resolution Z ??) + ---- + + I think it serves two functions, early discard of primitives in binning + pass without needing full resolution depth buffer, and also functions as + a depth-prepass, used during the GMEM draws to discard primitives that + would not be visible due to later draws. + + The LRZ buffer always seems to be z16 format, regardless of actual + depth buffer format. + + Note that LRZ write should be disabled when blend/stencil/etc is enabled, + since the occluded primitive can still contribute to final color value + of a fragment. + + Only enabled for GL_LESS/GL_LEQUAL/GL_GREATER/GL_GEQUAL? + + + + LRZ write also disabled for blend/etc. + + update MAX instead of MIN value, ie. GL_GREATER/GL_GEQUAL + + + + + + + + Pitch is depth width (in pixels) / 8 (aligned to 32). Height + is also divided by 8 (ie. covers 8x8 pixels) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Z_READ_ENABLE bit is set for zfunc other than GL_ALWAYS or GL_NEVER + + + + + + + + + stride of depth/stencil buffer + + + size of layer + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Blits: + ------ + + Blits are triggered by CP_EVENT_WRITE:BLIT, compared to previous + generations where they shared most of the gl pipeline and were + triggered by CP_DRAW_INDX* + + For gmem->mem blob uses RB_BLIT_CNTL.BUF to specify src of + blit (ie MRTn, ZS, etc) and RB_BLIT_DST_LO/HI for destination + gpuaddr. The gmem offset is taken from RB_MRT[n].BASE_LO/HI + + For mem->gmem blob uses just MRT0 or ZS and RB_BLIT_DST_LO/HI + for the GMEM offset, and gpuaddr from RB_MRT[0].BASE_LO/HI + (I suppose this is just to avoid trashing RB_MRT[1..7]??) + + + + + + + + + + + + + + + + + + + + + + + + + For MASK, if RB_BLIT_CNTL.BUF=BLIT_ZS: + 1 - depth + 2 - stencil + 3 - depth+stencil + if RB_BLIT_CNTL.BUF=BLIT_MRTn + then probably a component mask, I always see 0xf + + + + + + Buffer Metadata (flag buffers): + ------------------------------- + + Blob seems to stick some metadata at the front of the buffer, + both z/s and MRT. I think this is same as UBWC (bandwidth + compression) metadata that mdp 1.7 and later supports. See + 1d3fae5698ce5358caab87a15383b690941697e8 in downstream kernel. + UBWC seems to stand for "universal bandwidth compression". + + Before glReadPixels() it does a pair of BYPASS blits (at least + if metadata is used) presumably to resolve metadata. + + NOTES: see: getUBwcBlockSize(), getUBwcMetaBufferSize() at + https://android.googlesource.com/platform/hardware/qcom/display/+/android-6.0.1_r40/msm8994/libgralloc/alloc_controller.cpp + (note that bpp in bytes, not bits, so really cpp) + + Example Layout 2d w/ mipmap levels: + + 100x2000, ifmt=GL_RG, fmt=GL_RG16F, type=GL_FLOAT, meta=64x512@0x8000 (7x500) + base=c072e000, offset=16384, size=1703936 + + color flags + 0 c073a000 c0732000 - level 0 flags is address + 1 c0838000 c0834000 programmed in texture state + 2 c0879000 c0877000 + 3 c089a000 c0899000 + 4 c08ab000 c08aa000 + 5 c08b4000 c08b3000 + 6 c08b9000 c08b8000 + 7 c08bc000 c08bb000 + 8 c08be000 c08bd000 + 9 c08c0000 c08bf000 + 10 c08c2000 c08c1000 + + ARRAY_PITCH is the combined size of all the levels plus flags, + so 0xc08c3000 - 0xc0732000 = 0x00191000 (1642496); each level + takes up a minimum of 2 pages (since color and flags parts are + each page aligned. + + { TILE_MODE = TILE5_3 | SWIZ_X = A5XX_TEX_X | SWIZ_Y = A5XX_TEX_Y | SWIZ_Z = A5XX_TEX_ZERO | SWIZ_W = A5XX_TEX_ONE | MIPLVLS = 0 | FMT = TFMT5_16_16_FLOAT | SWAP = WZYX } + { WIDTH = 100 | HEIGHT = 2000 } + { FETCHSIZE = TFETCH5_4_BYTE | PITCH = 512 | TYPE = A5XX_TEX_2D } + { ARRAY_PITCH = 1642496 | 0x18800000 } - NOTE c2dc always has 0x18800000 but + { BASE_LO = 0xc0732000 } this varies for blob gles driver.. + { BASE_HI = 0 | DEPTH = 1 } not sure what it is + + + + + + + + + + + + + + + + + + + + + + + + + + num of varyings plus four for gl_Position (plus one if gl_PointSize) + plus # of transform-feedback (streamout) varyings if using the + hw streamout (rather than stg instructions in shader) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Stream-Out: + ----------- + + VPC_SO[0..3] registers setup details about streamout buffers, and + number of components to write to each. + + VPC_SO_PROG provides the mapping between output varyings and the SO + buffers. It is written multiple times (via a CP_CONTEXT_REG_BUNCH + packet, not sure if that matters), each write can handle up to two + components of stream-out output. Order matches up to OUTLOC, + including padding. So, if outputting first 3 varyings: + + SP_VS_OUT[0].REG: { A_REGID = r0.w | A_COMPMASK = 0xf | B_REGID = r0.x | B_COMPMASK = 0x7 } + SP_VS_OUT[0x1].REG: { A_REGID = r1.w | A_COMPMASK = 0x3 | B_REGID = r2.y | B_COMPMASK = 0xf } + SP_VS_VPC_DST[0].REG: { OUTLOC0 = 0 | OUTLOC1 = 4 | OUTLOC2 = 8 | OUTLOC3 = 12 } + + Then: + + VPC_SO_PROG: { A_BUF = 0 | A_OFF = 0 | A_EN | A_BUF = 0 | B_OFF = 4 | B_EN } + VPC_SO_PROG: { A_BUF = 0 | A_OFF = 8 | A_EN | A_BUF = 0 | B_OFF = 12 | B_EN } + VPC_SO_PROG: { A_BUF = 2 | A_OFF = 0 | A_EN | A_BUF = 2 | B_OFF = 4 | B_EN } + VPC_SO_PROG: { A_BUF = 2 | A_OFF = 8 | A_EN | A_BUF = 0 | B_OFF = 0 } + VPC_SO_PROG: { A_BUF = 1 | A_OFF = 0 | A_EN | A_BUF = 1 | B_OFF = 4 | B_EN } + + Note that varying order is OUTLOC0, OUTLOC2, OUTLOC1, and note + the padding between OUTLOC1 and OUTLOC2. + + The BUF bitfield indicates which of the four streamout buffers + to write into at the specified offset. + + The VPC_SO[n].FLUSH_BASE_LO/HI is used for hw to write back next + offset which gets loaded back into VPC_SO[n].BUFFER_OFFSET via a + CP_MEM_TO_REG. Probably can be ignored until we have GS/etc, at + which point we can't calculate the offset on the CPU. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + The size of memory that ldp/stp can address. + + + + Guessing that this is the same as a3xx/a6xx. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + per MRT + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Texture sampler dwords + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Texture constant dwords + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Pitch in bytes (so actually stride) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Pitch in bytes (so actually stride) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml new file mode 100644 index 0000000000..2dfe6913ab --- /dev/null +++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml @@ -0,0 +1,5011 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Allow early z-test and early-lrz (if applicable) + + Disable early z-test and early-lrz test (if applicable) + + + A special mode that allows early-lrz test but disables + early-z test. Which might sound a bit funny, since + lrz-test happens before z-test. But as long as a couple + conditions are maintained this allows using lrz-test in + cases where fragment shader has kill/discard: + + 1) Disable lrz-write in cases where it is uncertain during + binning pass that a fragment will pass. Ie. if frag + shader has-kill, writes-z, or alpha/stencil test is + enabled. (For correctness, lrz-write must be disabled + when blend is enabled.) This is analogous to how a + z-prepass works. + + 2) Disable lrz-write and test if a depth-test direction + reversal is detected. Due to condition (1), the contents + of the lrz buffer are a conservative estimation of the + depth buffer during the draw pass. Meaning that geometry + that we know for certain will not be visible will not pass + lrz-test. But geometry which may be (or contributes to + blend) will pass the lrz-test. + + This allows us to keep early-lrz-test in cases where the frag + shader does not write-z (ie. we know the z-value before FS) + and does not have side-effects (image/ssbo writes, etc), but + does have kill/discard. Which turns out to be a common + enough case that it is useful to keep early-lrz test against + the conservative lrz buffer to discard fragments that we + know will definitely not be visible. + + + Not a real hw value, used internally by mesa + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + b0..7 identifies where MRB data starts (and RB data ends) + b8.15 identifies where VSD data starts (and MRB data ends) + b16..23 identifies where IB1 data starts (and RB data ends) + b24..31 identifies where IB2 data starts (and IB1 data ends) + + + + + + + + + low bits identify where CP_SET_DRAW_STATE stateobj + processing starts (and IB2 data ends). I'm guessing + b8 is part of this since (from downstream kgsl): + + /* ROQ sizes are twice as big on a640/a680 than on a630 */ + if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev)) { + kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); + kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C); + } ... + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + number of remaining dwords incl current dword being consumed? + + + + number of remaining dwords incl current dword being consumed? + + + + number of remaining dwords incl current dword being consumed? + + + + number of remaining dwords incl current dword being consumed? + + + + number of dwords that have already been read but haven't been consumed by $addr + + + + number of remaining dwords incl current dword being consumed? + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Set to true when binning, isn't changed afterwards + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Configures the mapping between VSC_PIPE buffer and + bin, X/Y specify the bin index in the horiz/vert + direction (0,0 is upper left, 0,1 is leftmost bin + on second row, and so on). W/H specify the number + of bins assigned to this VSC_PIPE in the horiz/vert + dimension. + + + + + + + + + + + + + + + + + + Seems to be a bitmap of which tiles mapped to the VSC + pipe contain geometry. + + I suppose we can connect a maximum of 32 tiles to a + single VSC pipe. + + + + + + + Has the size of data written to corresponding VSC_PRIM_STRM + buffer. + + + + + + + Has the size of data written to corresponding VSC pipe, ie. + same thing that is written out to VSC_DRAW_STRM_SIZE_ADDRESS_LO/HI + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + In addition to FLUSH_PER_OVERLAP, guarantee that UCHE + and CCU don't get out of sync when fetching the previous + value for the current pixel. With NO_FLUSH, there's the + possibility that the flags for the current pixel are + flushed before the data or vice-versa, leading to + texture fetches via UCHE getting out of sync values. + This mode should eliminate that. It's used in bypass + mode for coherent blending + (GL_KHR_blend_equation_advanced_coherent) as well as + non-coherent blending. + + + + Invalidate UCHE and wait for any pending work to finish + if there was possibly an overlapping primitive prior to + the current one. This is similar to a combination of + GRAS_SC_CONTROL::INJECT_L2_INVALIDATE_EVENT and + WAIT_RB_IDLE_ALL_TRI on a3xx. It's used in GMEM mode for + coherent blending + (GL_KHR_blend_equation_advanced_coherent). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + LRZ write also disabled for blend/etc. + + update MAX instead of MIN value, ie. GL_GREATER/GL_GEQUAL + + + Clears the LRZ block being touched to: + - 0.0 if GREATER + - 1.0 if LESS + + + + + + + + If DISABLE_ON_WRONG_DIR enabled - write new LRZ direction into + buffer, in case of mismatched direction writes 0 (disables LRZ). + + + + Disable LRZ based on previous direction and the current one. + If DIR_WRITE is not enabled - there is no write to direction buffer. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Z_READ_ENABLE bit is set for zfunc other than GL_ALWAYS or GL_NEVER + also set when Z_BOUNDS_ENABLE is set + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + For clearing depth/stencil + 1 - depth + 2 - stencil + 3 - depth+stencil + For clearing color buffer: + then probably a component mask, I always see 0xf + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + RB_SAMPLE_COUNT_ADDR register is used up to (and including) a730. After that + the address is specified through CP_EVENT_WRITE7::WRITE_SAMPLE_COUNT. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Swaps TESS_CW_TRIS/TESS_CCW_TRIS, and also makes + triangle fans and triangle strips use the D3D + order instead of the OpenGL order. + + + + + + + + geometry shader + + + + + + + + + + + + + + Multi-position output lets the last geometry + stage shader write multiple copies of + gl_Position. If disabled then the VS is run once + for each view, and ViewID is passed as a + register to the VS. + + + + + + + + + + + + + + + + + + + + + + + + + + + Packed array of a6xx_varying_interp_mode + + + + Packed array of a6xx_varying_ps_repl_mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + num of varyings plus four for gl_Position (plus one if gl_PointSize) + plus # of transform-feedback (streamout) varyings if using the + hw streamout (rather than stg instructions in shader) + + + + + + + The number of extra copies of POSITION, i.e. + number of views minus one when multi-position + output is enabled, otherwise 0. + + + + + + + + + + + + + + + This VPC location will be overwritten with + ViewID when multiview is enabled. It's used when + fragment shaders read ViewID. It's only + strictly required for multi-position output, + where the same VS invocation is used for all the + views at once, but it can be used when multi-pos + output is disabled too, to avoid having to pass + ViewID through the VS. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + num of varyings plus four for gl_Position (plus one if gl_PointSize) + plus # of transform-feedback (streamout) varyings if using the + hw streamout (rather than stg instructions in shader) + + + + + + + + + + + + + + + + + + + + + + size in vec4s of per-primitive storage for gs. TODO: not actually in VPC + + + + + + + + + + + + + + + + + + + + + + + + + + Possibly not really "initiating" the draw but the layout is similar + to VGT_DRAW_INITIATOR on older gens + + + + + + + + + + + + + + + + Written by CP_SET_VISIBILITY_OVERRIDE handler + + + + + + + + + + + + + + + + + + + + + + + + + + This is the ID of the current patch within the + subdraw, used to calculate the offset of the + patch within the HS->DS buffers. When a draw is + split into multiple subdraws then this differs + from gl_PrimitiveID on the second, third, etc. + subdraws. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + The size of memory that ldp/stp can address. + + + + Seems to be the same as a3xx. The maximum stack + size in units of 4 calls, so a call depth of 7 + would result in a value of 2. + TODO: What's the actual size per call, i.e. the + size of the PC? a3xx docs say it's 16 bits + there, but the length register now takes 28 bits + so it's probably been bumped to 32 bits. + + + + + + + + + There are four indices used to compute the + private memory location for an access: + + - stp/ldp offset + - fiber id + - wavefront id (a swizzled version of what "getwid" returns) + - SP ID (the same as what "getspid" returns) + + The stride for the SP ID is always set by + TOTALPVTMEMSIZE. In the per-wave layout, the + indices are used in this order: + + - offset % 4 (offset within dword) + - fiber id + - offset / 4 + - wavefront id + - SP ID + + and the stride for the wavefront ID is + MEMSIZEPERITEM, multiplied by 128 (fibers per + wavefront). In the per-fiber layout, the indices + are used in this order: + + - offset + - fiber id % 4 + - wavefront id + - fiber id / 4 + - SP ID + + and the stride for the fiber id/wavefront id + combo is MEMSIZEPERITEM. + + Note: Accesses of more than 1 dword do not work + with per-fiber layout. The blob will fall back + to per-wave instead. + + + + + + + This seems to be be the equivalent of HWSTACKOFFSET in + a3xx. The ldp/stp offset formula above isn't affected by + HWSTACKSIZEPERTHREAD at all, so the HW return address + stack seems to be after all the normal per-SP private + memory. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Normally the size of the output of the last stage in + dwords. It should be programmed as follows: + + size less than 63 - size + size of 63 (?) or 64 - 63 + size greater than 64 - 64 + + What to program when the size is 61-63 is a guess, but + both the blob and ir3 align the size to 4 dword's so it + doesn't matter in practice. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Enable ALL helper invocations in a quad. Necessary for + fine derivatives and quad subgroup ops. + + + + + + + + Enable helper invocations. Enables 3 out of 4 fragments, + because the coarse derivatives only use half of the quad + and so one pixel's value is always unused. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + per MRT + + + + + + + + + + + + + + + + + + + + Similar to "(eq)" flag but disables helper invocations + after the texture prefetch. + + + + Bypass writing to regs and overwrite output with color from + CONSTSLOTID const regs. + + + + + + + + + + + + + + + Results in color being zero + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + If 0 - all 32k of shared storage is enabled, otherwise + (SHARED_SIZE + 1) * 1k is enabled. + The ldl/stl offset seems to be rewritten to 0 when it is beyond + this limit. This is different from ldlw/stlw, which wraps at + 64k (and has 36k of storage on A640 - reads between 36k-64k + always return 0) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This can alternatively be interpreted as a pitch shift, ie, the + descriptor size is 2 << N dwords + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Specify for which components the output color should be read + from alias, e.g. for: + + alias.1.b32.0 r3.x, c8.x + alias.1.b32.0 r2.x, c4.x + alias.1.b32.0 r1.x, c4.x + alias.1.b32.0 r0.x, c0.x + + the SP_PS_ALIASED_COMPONENTS would be 0x00001111 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This register clears pending loads queued up by + CP_LOAD_STATE6. Each bit resets a particular kind(s) of + CP_LOAD_STATE6. + + + + + + + + + + + + + + + + + + + + + + + + + This register clears pending loads queued up by + CP_LOAD_STATE6. Each bit resets a particular kind(s) of + CP_LOAD_STATE6. + + + + + + + + + + + + + + + + + + + + + + + + + + Shared constants are intended to be used for Vulkan push + constants. When enabled, 8 vec4's are reserved in the FS + const pool and 16 in the geometry const pool although + only 8 are actually used (why?) and they are mapped to + c504-c511 in each stage. Both VS and FS shared consts + are written using ST6_CONSTANTS/SB6_IBO, so that both + the geometry and FS shared consts can be written at once + by using CP_LOAD_STATE6 rather than + CP_LOAD_STATE6_FRAG/CP_LOAD_STATE6_GEOM. In addition + DST_OFF and NUM_UNIT are in units of dwords instead of + vec4's. + + There is also a separate shared constant pool for CS, + which is loaded through CP_LOAD_STATE6_FRAG with + ST6_UBO/ST6_IBO. However the only real difference for CS + is the dword units. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Texture sampler dwords + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + clamp result to [0, 1] if the format is unorm or + [-1, 1] if the format is snorm, *after* + filtering. Has no effect for other formats. + + + + + + + + + + + + + + + + + + + Texture constant dwords + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + probably for D3D structured UAVs, normally set to 1 + + + + + + Pitch in bytes (so actually stride) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml new file mode 100644 index 0000000000..6531749d30 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml @@ -0,0 +1,228 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_common.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_common.xml new file mode 100644 index 0000000000..218ec8bb96 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/adreno/adreno_common.xml @@ -0,0 +1,400 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Registers in common between a2xx and a3xx + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Address mode for a5xx+ + + + + + + + Line mode for a5xx+ + Note that Bresenham lines are only supported with MSAA disabled. + + + + + + + + Blob (v615) seem to only use SAM and I wasn't able to coerce + it to produce any other command. + Probably valid for a4xx+ but not enabled or tested on anything + but a6xx. + + + Produces garbage + + + + + + + Causes reads from an invalid address + + Results in color being zero + + + + diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml new file mode 100644 index 0000000000..cab01af55d --- /dev/null +++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml @@ -0,0 +1,2268 @@ + + + + + + + + + + + + Flushes dirty data from UCHE, and also writes a GPU timestamp to + the address if one is provided. + + + + + + + + + + + + + + + + + + + If A6XX_RB_SAMPLE_COUNT_CONTROL.copy is true, writes OQ Z passed + sample counts to RB_SAMPLE_COUNT_ADDR. This writes to main + memory, skipping UCHE. + + + + + + Writes the GPU timestamp to the address that follows, once RB + access and flushes are complete. + + + + + + + + + + + + + + + + + + + + + Invalidates depth attachment data from the CCU. We assume this + happens in the last stage. + + + + + Invalidates color attachment data from the CCU. We assume this + happens in the last stage. + + + + + Flushes the small cache used by CP_EVENT_WRITE::BLIT (which, + along with its registers, would be better named RESOLVE). + + + + + Flushes depth attachment data from the CCU. We assume this + happens in the last stage. + + + + + Flushes color attachment data from the CCU. We assume this + happens in the last stage. + + + + + 2D blit to resolve GMEM to system memory (skipping CCU) at the + end of a render pass. Compare to CP_BLIT's BLIT_OP_SCALE for + more general blitting. + + + + + Clears based on GRAS_LRZ_CNTL configuration, could clear + fast-clear buffer or LRZ direction. + LRZ direction is stored at lrz_fc_offset + 0x200, has 1 byte which + could be expressed by enum: + CUR_DIR_DISABLED = 0x0 + CUR_DIR_GE = 0x1 + CUR_DIR_LE = 0x2 + CUR_DIR_UNSET = 0x3 + Clear of direction means setting the direction to CUR_DIR_UNSET. + + + + + + + + + + + + + + + Invalidates UCHE. + + + + + + + + Doesn't seem to do anything + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + initialize CP's micro-engine + + skip N 32-bit words to get to the next packet + + + indirect buffer dispatch. prefetch parser uses this packet + type to determine whether to pre-fetch the IB + + + + + + Takes the same arguments as CP_INDIRECT_BUFFER, but jumps to + another buffer at the same level. Must be at the end of IB, and + doesn't work with draw state IB's. + + + indirect buffer dispatch. same as IB, but init is pipelined + + + Waits for the IDLE state of the engine before further drawing. + This is pipelined, so the CP may continue. + + + wait until a register or memory location is a specific value + + wait until a register location is equal to a specific value + + wait until a register location is >= a specific value + + wait until a read completes + + wait until all base/size writes from an IB_PFD packet have completed + + + register read/modify/write + + Set binning configuration registers + + + reads register in chip and writes to memory + + write N 32-bit words to memory + + write CP_PROG_COUNTER value to memory + + conditional execution of a sequence of packets + + conditional write to memory or register + + + generate an event that creates a write to memory when completed + + + generate a VS|PS_done event + + generate a cache flush done event + + generate a z_pass done event + + + not sure the real name, but this seems to be what is used for + opencl, instead of CP_DRAW_INDX.. + + + initiate fetch of index buffer and draw + + draw using supplied indices in packet + + initiate fetch of index buffer and binIDs and draw + + initiate fetch of bin IDs and draw using supplied indices + + begin/end initiator for viz query extent processing + + fetch state sub-blocks and initiate shader code DMAs + + load constant into chip and to memory + + load sequencer instruction memory (pointer-based) + + load sequencer instruction memory (code embedded in packet) + + load constants from a location in memory + + selective invalidation of state pointers + + dynamically changes shader instruction memory partition + + sets the 64-bit BIN_MASK register in the PFP + + sets the 64-bit BIN_SELECT register in the PFP + + updates the current context, if needed + + generate interrupt from the command stream + + copy sequencer instruction memory to system memory + + + + + + + + sets draw initiator flags register in PFP, gets bitwise-ORed into + every draw initiator + + + sets the register protection mode + + + + + + load high level sequencer command + + + Conditionally load a IB based on a flag, prefetch enabled + + Conditionally load a IB based on a flag, prefetch disabled + + Load a buffer with pre-fetch enabled + + Set bin (?) + + + test 2 memory locations to dword values specified + + + Write register, ignoring context state for context sensitive registers + + + Record the real-time when this packet is processed by PFP + + + + + + PFP waits until the FIFO between the PFP and the ME is empty + + + + + Used a bit like CP_SET_CONSTANT on a2xx, but can write multiple + groups of registers. Looks like it can be used to create state + objects in GPU memory, and on state change only emit pointer + (via CP_SET_DRAW_STATE), which should be nice for reducing CPU + overhead: + + (A4x) save PM4 stream pointers to execute upon a visible draw + + + + + + + + + + Enable or disable predication globally. Also resets the + predicate to "passing" and the local bit to enabled when + enabling global predication. + + + + + Enable or disable predication locally. Unlike globally enabling + predication, this packet doesn't touch any other state. + Predication only happens when enabled globally and locally and a + predicate has been set. This should be used for internal draws + which aren't supposed to use the predication state: + + CP_DRAW_PRED_ENABLE_LOCAL(0) + ... do draw... + CP_DRAW_PRED_ENABLE_LOCAL(1) + + + + + Latch a draw predicate into the internal register. + + + + + for A4xx + Write to register with address that does not fit into type-0 pkt + + + + copy from ME scratch RAM to a register + + + Copy from REG to ME scratch RAM + + + Wait for memory writes to complete + + + Conditional execution based on register comparison + + + Memory to REG copy + + + + + + + for a5xx + + + + + + Tells CP the current mode of GPU operation + + Instruct CP to set a few internal CP registers + + + + + + + Enables IB2 skipping. If both GLOBAL and LOCAL are 1 and + nothing is left in the visibility stream, then + CP_INDIRECT_BUFFER will be skipped, and draws will early return + from their IB. + + + + + + + + + + + + + + + + + + + + General purpose 2D blit engine for image transfers and mipmap + generation. Reads through UCHE, writes through the CCU cache in + the PS stage. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Write CP_CONTEXT_SWITCH_*_INFO from CP to the following dwords, + and forcibly switch to the indicated context. + + + + + + + + + + These first appear in a650_sqe.bin. They can in theory be used + to loop any sequence of IB1 commands, but in practice they are + used to loop over bins. There is a fixed-size per-iteration + prefix, used to set per-bin state, and then the following IB1 + commands are executed until CP_END_BIN which are always the same + for each iteration and usually contain a list of + CP_INDIRECT_BUFFER calls to IB2 commands which setup state and + execute restore/draw/save commands. This replaces the previous + technique of just repeating the CP_INDIRECT_BUFFER calls and + "unrolling" the loop. + + + + + Make next dword 1 to disable preemption, 0 to re-enable it. + + + + + + + + + Can clear BV/BR counters, or wait until one catches up to another + + Clears, adds to local, or adds to global timestamp + + + + + Write to a scratch memory that is read by CP_REG_TEST with + SOURCE_SCRATCH_MEM set. It's not the same scratch as scratch registers. + However it uses the same memory space. + + + + + Executes an array of fixed-size command buffers where each + buffer is assumed to have one draw call, skipping buffers with + non-visible draw calls. + + + + Reset various on-chip state used for synchronization + + + + + + Load state, a3xx (and later?) + + + + + + + + + + + + + + + + + inline with the CP_LOAD_STATE packet + + + + + in buffer pointed to by EXT_SRC_ADDR + + + + + + + + + + + + + + + + + + Load state, a4xx+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Load state, a6xx+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + SS6_UBO used by the a6xx vulkan blob with tesselation constants + in this case, EXT_SRC_ADDR is (ubo_id shl 16 | offset) + to load constants from a UBO loaded with DST_OFF = 14 and offset 0, + EXT_SRC_ADDR = 0xe0000 + (offset is a guess, should be in bytes given that maxUniformBufferRange=64k) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + DST_OFF same as in CP_LOAD_STATE6 - vec4 VS const at this offset will + be updated for each draw to {draw_id, first_vertex, first_instance, 0} + value of 0 disables it + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Read a 64-bit value at the given address and + test if it equals/doesn't equal 0. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + value at offset 0 always seems to be 0x00000000.. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Like CP_SET_BIN_DATA5, but set the pointers as offsets from the + pointers stored in VSC_PIPE_{DATA,DATA2,SIZE}_ADDRESS. Useful + for Vulkan where these values aren't known when the command + stream is recorded. + + + + + + + + + + + + + + + + + + + + + + + + Modifies DST_REG using two sources that can either be registers + or immediates. If SRC1_ADD is set, then do the following: + + $dst = (($dst & $src0) rot $rotate) + $src1 + + Otherwise: + + $dst = (($dst & $src0) rot $rotate) | $src1 + + Here "rot" means rotate left. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Like CP_REG_TO_MEM, but the memory address to write to can be + offsetted using either one or two registers or scratch + registers. + + + + + + + + + + + + + + + + + + + + + + + + Like CP_REG_TO_MEM, but the memory address to write to can be + offsetted using a DWORD in memory. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Wait until a memory value is greater than or equal to the + reference, using signed comparison. + + + + + + + + + + + + + + + + + + + This uses the same internal comparison as CP_COND_WRITE, + but waits until the comparison is true instead. It busy-loops in + the CP for the given number of cycles before trying again. + + + + + + + + + + + + + + + + + + + + + + + + + + + Waits for REG0 to not be 0 or REG1 to not equal REF + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Tell CP the current operation mode, indicates save and restore procedure + + + + + + + + + + + + + + + + + + + + + + + + + + Set internal CP registers, used to indicate context save data addresses + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Tests bit in specified register and sets predicate for CP_COND_REG_EXEC. + So: + + opcode: CP_REG_TEST (39) (2 dwords) + { REG = 0xc10 | BIT = 0 } + 0000: 70b90001 00000c10 + opcode: CP_COND_REG_EXEC (47) (3 dwords) + 0000: 70c70002 10000000 00000004 + opcode: CP_INDIRECT_BUFFER (3f) (4 dwords) + + Will execute the CP_INDIRECT_BUFFER only if b0 in the register at + offset 0x0c10 is 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Executes the following DWORDs of commands if the dword at ADDR0 + is not equal to 0 and the dword at ADDR1 is less than REF + (signed comparison). + + + + + + + + + + + + + + + + + + + + + + + + Used by the userspace driver to set various IB's which are + executed during context save/restore for handling + state that isn't restored by the + context switch routine itself. + + + + Executed unconditionally when switching back to the context. + + + + Executed when switching back after switching + away during execution of + a CP_SET_MARKER packet with RM6_YIELD as the + payload *and* the normal save routine was + bypassed for a shorter one. I think this is + connected to the "skipsaverestore" bit set by + the kernel when preempting. + + + + + Executed when switching away from the context, + except for context switches initiated via + CP_YIELD. + + + + + This can only be set by the RB (i.e. the kernel) + and executes with protected mode off, but + is otherwise similar to SAVE_IB. + + Note, kgsl calls this CP_KMD_AMBLE_TYPE + + + + + + + + + + + + + + + + + + + Keep shadow copies of these registers and only set them + when drawing, avoiding redundant writes: + - VPC_CNTL_0 + - HLSQ_CONTROL_1_REG + - HLSQ_UNKNOWN_B980 + + + + Track RB_RENDER_CNTL, and insert a WFI in the following + situation: + - There is a write that disables binning + - There was a draw with binning left enabled, but in + BYPASS mode + Presumably this is a hang workaround? + + + + Do a mysterious CP_EVENT_WRITE 0x3f when the low bit of + the data to write is 0. Used by the Vulkan blob with + PC_MULTIVIEW_CNTL, but this isn't predicated on particular + register(s) like the others. + + + + Tracks GRAS_LRZ_CNTL::GREATER, GRAS_LRZ_CNTL::DIR, and + GRAS_LRZ_DEPTH_VIEW with previous values, and if one of + the following is true: + - GRAS_LRZ_CNTL::GREATER has changed + - GRAS_LRZ_CNTL::DIR has changed, the old value is not + CUR_DIR_GE, and the new value is not CUR_DIR_DISABLED + - GRAS_LRZ_DEPTH_VIEW has changed + then it does a LRZ_FLUSH with GRAS_LRZ_CNTL::ENABLE + forced to 1. + Only exists in a650_sqe.fw. + + + + + + + + + + + + + Note that the SMMU's definition of TTBRn can take different forms + depending on the pgtable format. But a5xx+ only uses aarch64 + format. + + + + + + + + + + Unused, does not apply to aarch64 pgtable format + + + + + + + + + + + + + Size of prefix for each bin. For each bin index i, the + prefix commands at PREFIX_ADDR + i * PREFIX_DWORDS are + executed in an IB2 before the IB1 commands following + this packet. + + + + Number of dwords after this packet until CP_END_BIN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Best guess is that it is a faster way to fetch all the VSC_STATE registers + and keep them in a local scratch memory instead of fetching every time + when skipping IBs. + + + + + + Scratch memory size is 48 dwords` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/display/dsi.xml b/drivers/gpu/drm/msm/registers/display/dsi.xml new file mode 100644 index 0000000000..501ffc585a --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/dsi.xml @@ -0,0 +1,390 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_10nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_10nm.xml new file mode 100644 index 0000000000..874c3db3e1 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_10nm.xml @@ -0,0 +1,102 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_14nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_14nm.xml new file mode 100644 index 0000000000..314b74489d --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_14nm.xml @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_20nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_20nm.xml new file mode 100644 index 0000000000..99e9deb361 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_20nm.xml @@ -0,0 +1,100 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_28nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_28nm.xml new file mode 100644 index 0000000000..81d5b96f18 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_28nm.xml @@ -0,0 +1,180 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_28nm_8960.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_28nm_8960.xml new file mode 100644 index 0000000000..4c4de4dda6 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_28nm_8960.xml @@ -0,0 +1,134 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml new file mode 100644 index 0000000000..d54b72f924 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml @@ -0,0 +1,230 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/display/edp.xml b/drivers/gpu/drm/msm/registers/display/edp.xml new file mode 100644 index 0000000000..354f90eb6d --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/edp.xml @@ -0,0 +1,239 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/display/hdmi.xml b/drivers/gpu/drm/msm/registers/display/hdmi.xml new file mode 100644 index 0000000000..6c81581016 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/hdmi.xml @@ -0,0 +1,1015 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/display/mdp4.xml b/drivers/gpu/drm/msm/registers/display/mdp4.xml new file mode 100644 index 0000000000..6abb4a3c04 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/mdp4.xml @@ -0,0 +1,504 @@ + + + + + + + + pipe names, index into PIPE[] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + appears to map pipe to mixer stage + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 8bit characters per pixel minus 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/display/mdp5.xml b/drivers/gpu/drm/msm/registers/display/mdp5.xml new file mode 100644 index 0000000000..92f3263af1 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/mdp5.xml @@ -0,0 +1,806 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 8bit characters per pixel minus 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/display/mdp_common.xml b/drivers/gpu/drm/msm/registers/display/mdp_common.xml new file mode 100644 index 0000000000..f1b6345c13 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/mdp_common.xml @@ -0,0 +1,90 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + bits per component (non-alpha channel) + + + + + + + + bits per component (alpha channel) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/display/msm.xml b/drivers/gpu/drm/msm/registers/display/msm.xml new file mode 100644 index 0000000000..429c35b73b --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/msm.xml @@ -0,0 +1,32 @@ + + + + + + Register definitions for the display related hw blocks on + msm/snapdragon + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/display/sfpb.xml b/drivers/gpu/drm/msm/registers/display/sfpb.xml new file mode 100644 index 0000000000..de1cf43c13 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/display/sfpb.xml @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/freedreno_copyright.xml b/drivers/gpu/drm/msm/registers/freedreno_copyright.xml new file mode 100644 index 0000000000..854efdd2e5 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/freedreno_copyright.xml @@ -0,0 +1,40 @@ + + + + + + +Initial Author. + + + +many a3xx/a4xx contributions + + + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + + + diff --git a/drivers/gpu/drm/msm/registers/gen_header.py b/drivers/gpu/drm/msm/registers/gen_header.py new file mode 100644 index 0000000000..3926485bb1 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/gen_header.py @@ -0,0 +1,971 @@ +#!/usr/bin/python3 +# +# Copyright © 2019-2024 Google, Inc. +# +# SPDX-License-Identifier: MIT + +import xml.parsers.expat +import sys +import os +import collections +import argparse +import time +import datetime + +class Error(Exception): + def __init__(self, message): + self.message = message + +class Enum(object): + def __init__(self, name): + self.name = name + self.values = [] + + def has_name(self, name): + for (n, value) in self.values: + if n == name: + return True + return False + + def names(self): + return [n for (n, value) in self.values] + + def dump(self): + use_hex = False + for (name, value) in self.values: + if value > 0x1000: + use_hex = True + + print("enum %s {" % self.name) + for (name, value) in self.values: + if use_hex: + print("\t%s = 0x%08x," % (name, value)) + else: + print("\t%s = %d," % (name, value)) + print("};\n") + + def dump_pack_struct(self): + pass + +class Field(object): + def __init__(self, name, low, high, shr, type, parser): + self.name = name + self.low = low + self.high = high + self.shr = shr + self.type = type + + builtin_types = [ None, "a3xx_regid", "boolean", "uint", "hex", "int", "fixed", "ufixed", "float", "address", "waddress" ] + + maxpos = parser.current_bitsize - 1 + + if low < 0 or low > maxpos: + raise parser.error("low attribute out of range: %d" % low) + if high < 0 or high > maxpos: + raise parser.error("high attribute out of range: %d" % high) + if high < low: + raise parser.error("low is greater than high: low=%d, high=%d" % (low, high)) + if self.type == "boolean" and not low == high: + raise parser.error("booleans should be 1 bit fields") + elif self.type == "float" and not (high - low == 31 or high - low == 15): + raise parser.error("floats should be 16 or 32 bit fields") + elif not self.type in builtin_types and not self.type in parser.enums: + raise parser.error("unknown type '%s'" % self.type) + + def ctype(self, var_name): + if self.type == None: + type = "uint32_t" + val = var_name + elif self.type == "boolean": + type = "bool" + val = var_name + elif self.type == "uint" or self.type == "hex" or self.type == "a3xx_regid": + type = "uint32_t" + val = var_name + elif self.type == "int": + type = "int32_t" + val = var_name + elif self.type == "fixed": + type = "float" + val = "((int32_t)(%s * %d.0))" % (var_name, 1 << self.radix) + elif self.type == "ufixed": + type = "float" + val = "((uint32_t)(%s * %d.0))" % (var_name, 1 << self.radix) + elif self.type == "float" and self.high - self.low == 31: + type = "float" + val = "fui(%s)" % var_name + elif self.type == "float" and self.high - self.low == 15: + type = "float" + val = "_mesa_float_to_half(%s)" % var_name + elif self.type in [ "address", "waddress" ]: + type = "uint64_t" + val = var_name + else: + type = "enum %s" % self.type + val = var_name + + if self.shr > 0: + val = "(%s >> %d)" % (val, self.shr) + + return (type, val) + +def tab_to(name, value): + tab_count = (68 - (len(name) & ~7)) // 8 + if tab_count <= 0: + tab_count = 1 + print(name + ('\t' * tab_count) + value) + +def mask(low, high): + return ((0xffffffffffffffff >> (64 - (high + 1 - low))) << low) + +def field_name(reg, f): + if f.name: + name = f.name.lower() + else: + # We hit this path when a reg is defined with no bitset fields, ie. + # + name = reg.name.lower() + + if (name in [ "double", "float", "int" ]) or not (name[0].isalpha()): + name = "_" + name + + return name + +# indices - array of (ctype, stride, __offsets_NAME) +def indices_varlist(indices): + return ", ".join(["i%d" % i for i in range(len(indices))]) + +def indices_prototype(indices): + return ", ".join(["%s i%d" % (ctype, idx) + for (idx, (ctype, stride, offset)) in enumerate(indices)]) + +def indices_strides(indices): + return " + ".join(["0x%x*i%d" % (stride, idx) + if stride else + "%s(i%d)" % (offset, idx) + for (idx, (ctype, stride, offset)) in enumerate(indices)]) + +class Bitset(object): + def __init__(self, name, template): + self.name = name + self.inline = False + if template: + self.fields = template.fields[:] + else: + self.fields = [] + + # Get address field if there is one in the bitset, else return None: + def get_address_field(self): + for f in self.fields: + if f.type in [ "address", "waddress" ]: + return f + return None + + def dump_regpair_builder(self, reg): + print("#ifndef NDEBUG") + known_mask = 0 + for f in self.fields: + known_mask |= mask(f.low, f.high) + if f.type in [ "boolean", "address", "waddress" ]: + continue + type, val = f.ctype("fields.%s" % field_name(reg, f)) + print(" assert((%-40s & 0x%08x) == 0);" % (val, 0xffffffff ^ mask(0 , f.high - f.low))) + print(" assert((%-40s & 0x%08x) == 0);" % ("fields.unknown", known_mask)) + print("#endif\n") + + print(" return (struct fd_reg_pair) {") + if reg.array: + print(" .reg = REG_%s(__i)," % reg.full_name) + else: + print(" .reg = REG_%s," % reg.full_name) + + print(" .value =") + for f in self.fields: + if f.type in [ "address", "waddress" ]: + continue + else: + type, val = f.ctype("fields.%s" % field_name(reg, f)) + print(" (%-40s << %2d) |" % (val, f.low)) + value_name = "dword" + if reg.bit_size == 64: + value_name = "qword" + print(" fields.unknown | fields.%s," % (value_name,)) + + address = self.get_address_field() + if address: + print(" .bo = fields.bo,") + print(" .is_address = true,") + if f.type == "waddress": + print(" .bo_write = true,") + print(" .bo_offset = fields.bo_offset,") + print(" .bo_shift = %d," % address.shr) + print(" .bo_low = %d," % address.low) + + print(" };") + + def dump_pack_struct(self, reg=None): + if not reg: + return + + prefix = reg.full_name + + print("struct %s {" % prefix) + for f in self.fields: + if f.type in [ "address", "waddress" ]: + tab_to(" __bo_type", "bo;") + tab_to(" uint32_t", "bo_offset;") + continue + name = field_name(reg, f) + + type, val = f.ctype("var") + + tab_to(" %s" % type, "%s;" % name) + if reg.bit_size == 64: + tab_to(" uint64_t", "unknown;") + tab_to(" uint64_t", "qword;") + else: + tab_to(" uint32_t", "unknown;") + tab_to(" uint32_t", "dword;") + print("};\n") + + if reg.array: + print("static inline struct fd_reg_pair\npack_%s(uint32_t __i, struct %s fields)\n{" % + (prefix, prefix)) + else: + print("static inline struct fd_reg_pair\npack_%s(struct %s fields)\n{" % + (prefix, prefix)) + + self.dump_regpair_builder(reg) + + print("\n}\n") + + if self.get_address_field(): + skip = ", { .reg = 0 }" + else: + skip = "" + + if reg.array: + print("#define %s(__i, ...) pack_%s(__i, __struct_cast(%s) { __VA_ARGS__ })%s\n" % + (prefix, prefix, prefix, skip)) + else: + print("#define %s(...) pack_%s(__struct_cast(%s) { __VA_ARGS__ })%s\n" % + (prefix, prefix, prefix, skip)) + + + def dump(self, prefix=None): + if prefix == None: + prefix = self.name + for f in self.fields: + if f.name: + name = prefix + "_" + f.name + else: + name = prefix + + if not f.name and f.low == 0 and f.shr == 0 and not f.type in ["float", "fixed", "ufixed"]: + pass + elif f.type == "boolean" or (f.type == None and f.low == f.high): + tab_to("#define %s" % name, "0x%08x" % (1 << f.low)) + else: + tab_to("#define %s__MASK" % name, "0x%08x" % mask(f.low, f.high)) + tab_to("#define %s__SHIFT" % name, "%d" % f.low) + type, val = f.ctype("val") + + print("static inline uint32_t %s(%s val)\n{" % (name, type)) + if f.shr > 0: + print("\tassert(!(val & 0x%x));" % mask(0, f.shr - 1)) + print("\treturn ((%s) << %s__SHIFT) & %s__MASK;\n}" % (val, name, name)) + print() + +class Array(object): + def __init__(self, attrs, domain, variant, parent, index_type): + if "name" in attrs: + self.local_name = attrs["name"] + else: + self.local_name = "" + self.domain = domain + self.variant = variant + self.parent = parent + if self.parent: + self.name = self.parent.name + "_" + self.local_name + else: + self.name = self.local_name + if "offsets" in attrs: + self.offsets = map(lambda i: "0x%08x" % int(i, 0), attrs["offsets"].split(",")) + self.fixed_offsets = True + elif "doffsets" in attrs: + self.offsets = map(lambda s: "(%s)" % s , attrs["doffsets"].split(",")) + self.fixed_offsets = True + else: + self.offset = int(attrs["offset"], 0) + self.stride = int(attrs["stride"], 0) + self.fixed_offsets = False + if "index" in attrs: + self.index_type = index_type + else: + self.index_type = None + self.length = int(attrs["length"], 0) + if "usage" in attrs: + self.usages = attrs["usage"].split(',') + else: + self.usages = None + + def index_ctype(self): + if not self.index_type: + return "uint32_t" + else: + return "enum %s" % self.index_type.name + + # Generate array of (ctype, stride, __offsets_NAME) + def indices(self): + if self.parent: + indices = self.parent.indices() + else: + indices = [] + if self.length != 1: + if self.fixed_offsets: + indices.append((self.index_ctype(), None, "__offset_%s" % self.local_name)) + else: + indices.append((self.index_ctype(), self.stride, None)) + return indices + + def total_offset(self): + offset = 0 + if not self.fixed_offsets: + offset += self.offset + if self.parent: + offset += self.parent.total_offset() + return offset + + def dump(self): + proto = indices_varlist(self.indices()) + strides = indices_strides(self.indices()) + array_offset = self.total_offset() + if self.fixed_offsets: + print("static inline uint32_t __offset_%s(%s idx)" % (self.local_name, self.index_ctype())) + print("{\n\tswitch (idx) {") + if self.index_type: + for val, offset in zip(self.index_type.names(), self.offsets): + print("\t\tcase %s: return %s;" % (val, offset)) + else: + for idx, offset in enumerate(self.offsets): + print("\t\tcase %d: return %s;" % (idx, offset)) + print("\t\tdefault: return INVALID_IDX(idx);") + print("\t}\n}") + if proto == '': + tab_to("#define REG_%s_%s" % (self.domain, self.name), "0x%08x\n" % array_offset) + else: + tab_to("#define REG_%s_%s(%s)" % (self.domain, self.name, proto), "(0x%08x + %s )\n" % (array_offset, strides)) + + def dump_pack_struct(self): + pass + + def dump_regpair_builder(self): + pass + +class Reg(object): + def __init__(self, attrs, domain, array, bit_size): + self.name = attrs["name"] + self.domain = domain + self.array = array + self.offset = int(attrs["offset"], 0) + self.type = None + self.bit_size = bit_size + if array: + self.name = array.name + "_" + self.name + self.full_name = self.domain + "_" + self.name + if "stride" in attrs: + self.stride = int(attrs["stride"], 0) + self.length = int(attrs["length"], 0) + else: + self.stride = None + self.length = None + + # Generate array of (ctype, stride, __offsets_NAME) + def indices(self): + if self.array: + indices = self.array.indices() + else: + indices = [] + if self.stride: + indices.append(("uint32_t", self.stride, None)) + return indices + + def total_offset(self): + if self.array: + return self.array.total_offset() + self.offset + else: + return self.offset + + def dump(self): + proto = indices_prototype(self.indices()) + strides = indices_strides(self.indices()) + offset = self.total_offset() + if proto == '': + tab_to("#define REG_%s" % self.full_name, "0x%08x" % offset) + else: + print("static inline uint32_t REG_%s(%s) { return 0x%08x + %s; }" % (self.full_name, proto, offset, strides)) + + if self.bitset.inline: + self.bitset.dump(self.full_name) + + def dump_pack_struct(self): + if self.bitset.inline: + self.bitset.dump_pack_struct(self) + + def dump_regpair_builder(self): + if self.bitset.inline: + self.bitset.dump_regpair_builder(self) + + def dump_py(self): + print("\tREG_%s = 0x%08x" % (self.full_name, self.offset)) + + +class Parser(object): + def __init__(self): + self.current_array = None + self.current_domain = None + self.current_prefix = None + self.current_prefix_type = None + self.current_stripe = None + self.current_bitset = None + self.current_bitsize = 32 + # The varset attribute on the domain specifies the enum which + # specifies all possible hw variants: + self.current_varset = None + # Regs that have multiple variants.. we only generated the C++ + # template based struct-packers for these + self.variant_regs = {} + # Information in which contexts regs are used, to be used in + # debug options + self.usage_regs = collections.defaultdict(list) + self.bitsets = {} + self.enums = {} + self.variants = set() + self.file = [] + self.xml_files = [] + self.copyright_year = None + self.authors = [] + self.license = None + + def error(self, message): + parser, filename = self.stack[-1] + return Error("%s:%d:%d: %s" % (filename, parser.CurrentLineNumber, parser.CurrentColumnNumber, message)) + + def prefix(self, variant=None): + if self.current_prefix_type == "variant" and variant: + return variant + elif self.current_stripe: + return self.current_stripe + "_" + self.current_domain + elif self.current_prefix: + return self.current_prefix + "_" + self.current_domain + else: + return self.current_domain + + def parse_field(self, name, attrs): + try: + if "pos" in attrs: + high = low = int(attrs["pos"], 0) + elif "high" in attrs and "low" in attrs: + high = int(attrs["high"], 0) + low = int(attrs["low"], 0) + else: + low = 0 + high = self.current_bitsize - 1 + + if "type" in attrs: + type = attrs["type"] + else: + type = None + + if "shr" in attrs: + shr = int(attrs["shr"], 0) + else: + shr = 0 + + b = Field(name, low, high, shr, type, self) + + if type == "fixed" or type == "ufixed": + b.radix = int(attrs["radix"], 0) + + self.current_bitset.fields.append(b) + except ValueError as e: + raise self.error(e) + + def parse_varset(self, attrs): + # Inherit the varset from the enclosing domain if not overriden: + varset = self.current_varset + if "varset" in attrs: + varset = self.enums[attrs["varset"]] + return varset + + def parse_variants(self, attrs): + if not "variants" in attrs: + return None + variant = attrs["variants"].split(",")[0] + if "-" in variant: + variant = variant[:variant.index("-")] + + varset = self.parse_varset(attrs) + + assert varset.has_name(variant) + + return variant + + def add_all_variants(self, reg, attrs, parent_variant): + # TODO this should really handle *all* variants, including dealing + # with open ended ranges (ie. "A2XX,A4XX-") (we have the varset + # enum now to make that possible) + variant = self.parse_variants(attrs) + if not variant: + variant = parent_variant + + if reg.name not in self.variant_regs: + self.variant_regs[reg.name] = {} + else: + # All variants must be same size: + v = next(iter(self.variant_regs[reg.name])) + assert self.variant_regs[reg.name][v].bit_size == reg.bit_size + + self.variant_regs[reg.name][variant] = reg + + def add_all_usages(self, reg, usages): + if not usages: + return + + for usage in usages: + self.usage_regs[usage].append(reg) + + self.variants.add(reg.domain) + + def do_validate(self, schemafile): + if not self.validate: + return + + try: + from lxml import etree + + parser, filename = self.stack[-1] + dirname = os.path.dirname(filename) + + # we expect this to look like schema.xsd.. I think + # technically it is supposed to be just a URL, but that doesn't + # quite match up to what we do.. Just skip over everything up to + # and including the first whitespace character: + schemafile = schemafile[schemafile.rindex(" ")+1:] + + # this is a bit cheezy, but the xml file to validate could be + # in a child director, ie. we don't really know where the schema + # file is, the way the rnn C code does. So if it doesn't exist + # just look one level up + if not os.path.exists(dirname + "/" + schemafile): + schemafile = "../" + schemafile + + if not os.path.exists(dirname + "/" + schemafile): + raise self.error("Cannot find schema for: " + filename) + + xmlschema_doc = etree.parse(dirname + "/" + schemafile) + xmlschema = etree.XMLSchema(xmlschema_doc) + + xml_doc = etree.parse(filename) + if not xmlschema.validate(xml_doc): + error_str = str(xmlschema.error_log.filter_from_errors()[0]) + raise self.error("Schema validation failed for: " + filename + "\n" + error_str) + except ImportError as e: + if self.validate: + raise e + + print("lxml not found, skipping validation", file=sys.stderr) + + def do_parse(self, filename): + filepath = os.path.abspath(filename) + if filepath in self.xml_files: + return + self.xml_files.append(filepath) + file = open(filename, "rb") + parser = xml.parsers.expat.ParserCreate() + self.stack.append((parser, filename)) + parser.StartElementHandler = self.start_element + parser.EndElementHandler = self.end_element + parser.CharacterDataHandler = self.character_data + parser.buffer_text = True + parser.ParseFile(file) + self.stack.pop() + file.close() + + def parse(self, rnn_path, filename, validate): + self.path = rnn_path + self.stack = [] + self.validate = validate + self.do_parse(filename) + + def parse_reg(self, attrs, bit_size): + self.current_bitsize = bit_size + if "type" in attrs and attrs["type"] in self.bitsets: + bitset = self.bitsets[attrs["type"]] + if bitset.inline: + self.current_bitset = Bitset(attrs["name"], bitset) + self.current_bitset.inline = True + else: + self.current_bitset = bitset + else: + self.current_bitset = Bitset(attrs["name"], None) + self.current_bitset.inline = True + if "type" in attrs: + self.parse_field(None, attrs) + + variant = self.parse_variants(attrs) + if not variant and self.current_array: + variant = self.current_array.variant + + self.current_reg = Reg(attrs, self.prefix(variant), self.current_array, bit_size) + self.current_reg.bitset = self.current_bitset + + if len(self.stack) == 1: + self.file.append(self.current_reg) + + if variant is not None: + self.add_all_variants(self.current_reg, attrs, variant) + + usages = None + if "usage" in attrs: + usages = attrs["usage"].split(',') + elif self.current_array: + usages = self.current_array.usages + + self.add_all_usages(self.current_reg, usages) + + def start_element(self, name, attrs): + self.cdata = "" + if name == "import": + filename = attrs["file"] + self.do_parse(os.path.join(self.path, filename)) + elif name == "domain": + self.current_domain = attrs["name"] + if "prefix" in attrs: + self.current_prefix = self.parse_variants(attrs) + self.current_prefix_type = attrs["prefix"] + else: + self.current_prefix = None + self.current_prefix_type = None + if "varset" in attrs: + self.current_varset = self.enums[attrs["varset"]] + elif name == "stripe": + self.current_stripe = self.parse_variants(attrs) + elif name == "enum": + self.current_enum_value = 0 + self.current_enum = Enum(attrs["name"]) + self.enums[attrs["name"]] = self.current_enum + if len(self.stack) == 1: + self.file.append(self.current_enum) + elif name == "value": + if "value" in attrs: + value = int(attrs["value"], 0) + else: + value = self.current_enum_value + self.current_enum.values.append((attrs["name"], value)) + elif name == "reg32": + self.parse_reg(attrs, 32) + elif name == "reg64": + self.parse_reg(attrs, 64) + elif name == "array": + self.current_bitsize = 32 + variant = self.parse_variants(attrs) + index_type = self.enums[attrs["index"]] if "index" in attrs else None + self.current_array = Array(attrs, self.prefix(variant), variant, self.current_array, index_type) + if len(self.stack) == 1: + self.file.append(self.current_array) + elif name == "bitset": + self.current_bitset = Bitset(attrs["name"], None) + if "inline" in attrs and attrs["inline"] == "yes": + self.current_bitset.inline = True + self.bitsets[self.current_bitset.name] = self.current_bitset + if len(self.stack) == 1 and not self.current_bitset.inline: + self.file.append(self.current_bitset) + elif name == "bitfield" and self.current_bitset: + self.parse_field(attrs["name"], attrs) + elif name == "database": + self.do_validate(attrs["xsi:schemaLocation"]) + elif name == "copyright": + self.copyright_year = attrs["year"] + elif name == "author": + self.authors.append(attrs["name"] + " <" + attrs["email"] + "> " + attrs["name"]) + + def end_element(self, name): + if name == "domain": + self.current_domain = None + self.current_prefix = None + self.current_prefix_type = None + elif name == "stripe": + self.current_stripe = None + elif name == "bitset": + self.current_bitset = None + elif name == "reg32": + self.current_reg = None + elif name == "array": + self.current_array = self.current_array.parent + elif name == "enum": + self.current_enum = None + elif name == "license": + self.license = self.cdata + + def character_data(self, data): + self.cdata += data + + def dump_reg_usages(self): + d = collections.defaultdict(list) + for usage, regs in self.usage_regs.items(): + for reg in regs: + variants = self.variant_regs.get(reg.name) + if variants: + for variant, vreg in variants.items(): + if reg == vreg: + d[(usage, variant)].append(reg) + else: + for variant in self.variants: + d[(usage, variant)].append(reg) + + print("#ifdef __cplusplus") + + for usage, regs in self.usage_regs.items(): + print("template constexpr inline uint16_t %s_REGS[] = {};" % (usage.upper())) + + for (usage, variant), regs in d.items(): + offsets = [] + + for reg in regs: + if reg.array: + for i in range(reg.array.length): + offsets.append(reg.array.offset + reg.offset + i * reg.array.stride) + if reg.bit_size == 64: + offsets.append(offsets[-1] + 1) + else: + offsets.append(reg.offset) + if reg.bit_size == 64: + offsets.append(offsets[-1] + 1) + + offsets.sort() + + print("template<> constexpr inline uint16_t %s_REGS<%s>[] = {" % (usage.upper(), variant)) + for offset in offsets: + print("\t%s," % hex(offset)) + print("};") + + print("#endif") + + def dump(self): + enums = [] + bitsets = [] + regs = [] + for e in self.file: + if isinstance(e, Enum): + enums.append(e) + elif isinstance(e, Bitset): + bitsets.append(e) + else: + regs.append(e) + + for e in enums + bitsets + regs: + e.dump() + + self.dump_reg_usages() + + + def dump_regs_py(self): + regs = [] + for e in self.file: + if isinstance(e, Reg): + regs.append(e) + + for e in regs: + e.dump_py() + + + def dump_reg_variants(self, regname, variants): + # Don't bother for things that only have a single variant: + if len(variants) == 1: + return + print("#ifdef __cplusplus") + print("struct __%s {" % regname) + # TODO be more clever.. we should probably figure out which + # fields have the same type in all variants (in which they + # appear) and stuff everything else in a variant specific + # sub-structure. + seen_fields = [] + bit_size = 32 + array = False + address = None + for variant in variants.keys(): + print(" /* %s fields: */" % variant) + reg = variants[variant] + bit_size = reg.bit_size + array = reg.array + for f in reg.bitset.fields: + fld_name = field_name(reg, f) + if fld_name in seen_fields: + continue + seen_fields.append(fld_name) + name = fld_name.lower() + if f.type in [ "address", "waddress" ]: + if address: + continue + address = f + tab_to(" __bo_type", "bo;") + tab_to(" uint32_t", "bo_offset;") + continue + type, val = f.ctype("var") + tab_to(" %s" %type, "%s;" %name) + print(" /* fallback fields: */") + if bit_size == 64: + tab_to(" uint64_t", "unknown;") + tab_to(" uint64_t", "qword;") + else: + tab_to(" uint32_t", "unknown;") + tab_to(" uint32_t", "dword;") + print("};") + # TODO don't hardcode the varset enum name + varenum = "chip" + print("template <%s %s>" % (varenum, varenum.upper())) + print("static inline struct fd_reg_pair") + xtra = "" + xtravar = "" + if array: + xtra = "int __i, " + xtravar = "__i, " + print("__%s(%sstruct __%s fields) {" % (regname, xtra, regname)) + for variant in variants.keys(): + print(" if (%s == %s) {" % (varenum.upper(), variant)) + reg = variants[variant] + reg.dump_regpair_builder() + print(" } else") + print(" assert(!\"invalid variant\");") + print("}") + + if bit_size == 64: + skip = ", { .reg = 0 }" + else: + skip = "" + + print("#define %s(VARIANT, %s...) __%s(%s{__VA_ARGS__})%s" % (regname, xtravar, regname, xtravar, skip)) + print("#endif /* __cplusplus */") + + def dump_structs(self): + for e in self.file: + e.dump_pack_struct() + + for regname in self.variant_regs: + self.dump_reg_variants(regname, self.variant_regs[regname]) + + +def dump_c(args, guard, func): + p = Parser() + + try: + p.parse(args.rnn, args.xml, args.validate) + except Error as e: + print(e, file=sys.stderr) + exit(1) + + print("#ifndef %s\n#define %s\n" % (guard, guard)) + + print("""/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng gen_header.py tool in this git repository: +http://gitlab.freedesktop.org/mesa/mesa/ +git clone https://gitlab.freedesktop.org/mesa/mesa.git + +The rules-ng-ng source files this header was generated from are: +""") + maxlen = 0 + for filepath in p.xml_files: + maxlen = max(maxlen, len(filepath)) + for filepath in p.xml_files: + pad = " " * (maxlen - len(filepath)) + filesize = str(os.path.getsize(filepath)) + filesize = " " * (7 - len(filesize)) + filesize + filetime = time.ctime(os.path.getmtime(filepath)) + print("- " + filepath + pad + " (" + filesize + " bytes, from " + filetime + ")") + if p.copyright_year: + current_year = str(datetime.date.today().year) + print() + print("Copyright (C) %s-%s by the following authors:" % (p.copyright_year, current_year)) + for author in p.authors: + print("- " + author) + if p.license: + print(p.license) + print("*/") + + print() + print("#ifdef __KERNEL__") + print("#include ") + print("#define assert(x) BUG_ON(!(x))") + print("#else") + print("#include ") + print("#endif") + print() + + print("#ifdef __cplusplus") + print("#define __struct_cast(X)") + print("#else") + print("#define __struct_cast(X) (struct X)") + print("#endif") + print() + + func(p) + + print("\n#endif /* %s */" % guard) + + +def dump_c_defines(args): + guard = str.replace(os.path.basename(args.xml), '.', '_').upper() + dump_c(args, guard, lambda p: p.dump()) + + +def dump_c_pack_structs(args): + guard = str.replace(os.path.basename(args.xml), '.', '_').upper() + '_STRUCTS' + dump_c(args, guard, lambda p: p.dump_structs()) + + +def dump_py_defines(args): + p = Parser() + + try: + p.parse(args.rnn, args.xml) + except Error as e: + print(e, file=sys.stderr) + exit(1) + + file_name = os.path.splitext(os.path.basename(args.xml))[0] + + print("from enum import IntEnum") + print("class %sRegs(IntEnum):" % file_name.upper()) + + os.path.basename(args.xml) + + p.dump_regs_py() + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--rnn', type=str, required=True) + parser.add_argument('--xml', type=str, required=True) + parser.add_argument('--validate', default=False, action='store_true') + parser.add_argument('--no-validate', dest='validate', action='store_false') + + subparsers = parser.add_subparsers() + subparsers.required = True + + parser_c_defines = subparsers.add_parser('c-defines') + parser_c_defines.set_defaults(func=dump_c_defines) + + parser_c_pack_structs = subparsers.add_parser('c-pack-structs') + parser_c_pack_structs.set_defaults(func=dump_c_pack_structs) + + parser_py_defines = subparsers.add_parser('py-defines') + parser_py_defines.set_defaults(func=dump_py_defines) + + args = parser.parse_args() + args.func(args) + + +if __name__ == '__main__': + main() diff --git a/drivers/gpu/drm/msm/registers/rules-fd.xsd b/drivers/gpu/drm/msm/registers/rules-fd.xsd new file mode 100644 index 0000000000..2eedb099a4 --- /dev/null +++ b/drivers/gpu/drm/msm/registers/rules-fd.xsd @@ -0,0 +1,404 @@ + + + + + + An updated version of the old rules.xml file from the + RivaTV project. Specifications by Pekka Paalanen, + preliminary attempt by KoalaBR, + first working version by Jakob Bornecrantz. + For specifications, see the file rules-ng-format.txt + in Nouveau CVS module 'rules-ng'. + + Version 0.1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + register database author + + + + + + + + + + + + nickType + + + + + + + + + databaseType + + + + + + + + + + importType + + + + + + + copyrightType + + + + + + + + + + + + + domainType + + + + + + + + + + + + + + + + arrayType + + + + + + + + + + + + + + + + + + + + + stripeType + + + + + + + + + + + + + + + registerType used by reg32, reg64 + + + + + + + + + + + + + + + + + + + + + + + + + + + bitsetType + + + + + + + + + + + + + + bitfieldType + + + + + + + + + + + + + + + + + + + + + enumType + + + + + + + + + + + + + + + valueType + + + + + + + + + + + + + + + + + brief documentation, no markup + + + + + + + + + + + root element of documentation sub-tree + + + + + + + + + + + + + for bold, underline, italics + + + + + + + + + + + + + + + + + + + definition of a list, ordered or unordered + + + + + + + + + + + items of a list + + + + + + + + + + + + + + + + + + + + + + + + HexOrNumber + + + + + + + + + + + + + + + + + + DomainWidth + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild index cf6b3a80c0..c32c01827c 100644 --- a/drivers/gpu/drm/nouveau/Kbuild +++ b/drivers/gpu/drm/nouveau/Kbuild @@ -1,10 +1,8 @@ -NOUVEAU_PATH ?= $(srctree) - # SPDX-License-Identifier: MIT -ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include -ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include/nvkm -ccflags-y += -I $(NOUVEAU_PATH)/$(src)/nvkm -ccflags-y += -I $(NOUVEAU_PATH)/$(src) +ccflags-y += -I $(src)/include +ccflags-y += -I $(src)/include/nvkm +ccflags-y += -I $(src)/nvkm +ccflags-y += -I $(src) # NVKM - HW resource manager #- code also used by various userspace tools/tests diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c index 9c942fbd83..5936b6b3b1 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/crc.c +++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: MIT +#include #include + #include #include #include diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 47e53e17b4..d56909071d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -315,11 +315,21 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { if (init->fb_ctxdma_handle == ~0) { switch (init->tt_ctxdma_handle) { - case 0x01: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR ; break; - case 0x02: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC; break; - case 0x04: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP ; break; - case 0x08: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD ; break; - case 0x30: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE ; break; + case NOUVEAU_FIFO_ENGINE_GR: + engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR; + break; + case NOUVEAU_FIFO_ENGINE_VP: + engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC; + break; + case NOUVEAU_FIFO_ENGINE_PPP: + engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP; + break; + case NOUVEAU_FIFO_ENGINE_BSP: + engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD; + break; + case NOUVEAU_FIFO_ENGINE_CE: + engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE; + break; default: return nouveau_abi16_put(abi16, -ENOSYS); } diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h index 11c8c4a800..661b901d8e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.h +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h @@ -50,18 +50,6 @@ struct drm_nouveau_grobj_alloc { int class; }; -struct drm_nouveau_notifierobj_alloc { - uint32_t channel; - uint32_t handle; - uint32_t size; - uint32_t offset; -}; - -struct drm_nouveau_gpuobj_free { - int channel; - uint32_t handle; -}; - struct drm_nouveau_setparam { uint64_t param; uint64_t value; diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 79cfab53f8..8c3c1f1e01 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -43,11 +43,6 @@ #define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg) #define LOG_OLD_VALUE(x) -struct init_exec { - bool execute; - bool repeat; -}; - static bool nv_cksum(const uint8_t *data, unsigned int length) { /* diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 186add400e..70fb003a66 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -461,17 +461,14 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain, set_placement_range(nvbo, domain); } -int -nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) +int nouveau_bo_pin_locked(struct nouveau_bo *nvbo, uint32_t domain, bool contig) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; bool force = false, evict = false; - int ret; + int ret = 0; - ret = ttm_bo_reserve(bo, false, false, NULL); - if (ret) - return ret; + dma_resv_assert_held(bo->base.resv); if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) { @@ -534,20 +531,15 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) out: if (force && ret) nvbo->contig = false; - ttm_bo_unreserve(bo); return ret; } -int -nouveau_bo_unpin(struct nouveau_bo *nvbo) +void nouveau_bo_unpin_locked(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; - int ret; - ret = ttm_bo_reserve(bo, false, false, NULL); - if (ret) - return ret; + dma_resv_assert_held(bo->base.resv); ttm_bo_unpin(&nvbo->bo); if (!nvbo->bo.pin_count) { @@ -562,8 +554,33 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) break; } } +} + +int nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) +{ + struct ttm_buffer_object *bo = &nvbo->bo; + int ret; + ret = ttm_bo_reserve(bo, false, false, NULL); + if (ret) + return ret; + ret = nouveau_bo_pin_locked(nvbo, domain, contig); + ttm_bo_unreserve(bo); + + return ret; +} + +int nouveau_bo_unpin(struct nouveau_bo *nvbo) +{ + struct ttm_buffer_object *bo = &nvbo->bo; + int ret; + + ret = ttm_bo_reserve(bo, false, false, NULL); + if (ret) + return ret; + nouveau_bo_unpin_locked(nvbo); ttm_bo_unreserve(bo); + return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index e9dfab6a81..4e891752c2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -85,6 +85,8 @@ int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain, u32 tile_mode, u32 tile_flags, struct sg_table *sg, struct dma_resv *robj, struct nouveau_bo **); +int nouveau_bo_pin_locked(struct nouveau_bo *nvbo, uint32_t domain, bool contig); +void nouveau_bo_unpin_locked(struct nouveau_bo *nvbo); int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig); int nouveau_bo_unpin(struct nouveau_bo *); int nouveau_bo_map(struct nouveau_bo *); diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 60c3224421..d4725a9688 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -83,7 +83,7 @@ static bool nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos, ktime_t *stime, ktime_t *etime) { - struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)]; + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); struct nvif_head *head = &nouveau_crtc(crtc)->head; struct nvif_head_scanoutpos_v0 args; int retry = 20; diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index a72c458094..bcda010516 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -181,7 +181,7 @@ nouveau_dp_probe_dpcd(struct nouveau_connector *nv_connector, if (nouveau_mst) { mstm = outp->dp.mstm; if (mstm) - mstm->can_mst = drm_dp_read_mst_cap(aux, dpcd); + mstm->can_mst = drm_dp_read_mst_cap(aux, dpcd) == DRM_DP_MST; } if (nouveau_dp_has_sink_count(connector, outp)) { diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index 1b2ff0c40f..b58ab595fa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -89,18 +89,18 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj) int ret; /* pin buffer into GTT */ - ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_GART, false); + ret = nouveau_bo_pin_locked(nvbo, NOUVEAU_GEM_DOMAIN_GART, false); if (ret) - return -EINVAL; + ret = -EINVAL; - return 0; + return ret; } void nouveau_gem_prime_unpin(struct drm_gem_object *obj) { struct nouveau_bo *nvbo = nouveau_gem_object(obj); - nouveau_bo_unpin(nvbo); + nouveau_bo_unpin_locked(nvbo); } struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj, diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c index 4d1aaee8fe..1d19c87eae 100644 --- a/drivers/gpu/drm/nouveau/nvif/object.c +++ b/drivers/gpu/drm/nouveau/nvif/object.c @@ -142,11 +142,16 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size) struct nvif_ioctl_v0 ioctl; struct nvif_ioctl_mthd_v0 mthd; } *args; + u32 args_size; u8 stack[128]; int ret; - if (sizeof(*args) + size > sizeof(stack)) { - if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) + if (check_add_overflow(sizeof(*args), size, &args_size)) + return -ENOMEM; + + if (args_size > sizeof(stack)) { + args = kmalloc(args_size, GFP_KERNEL); + if (!args) return -ENOMEM; } else { args = (void *)stack; @@ -157,7 +162,7 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size) args->mthd.method = mthd; memcpy(args->mthd.data, data, size); - ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL); + ret = nvif_object_ioctl(object, args, args_size, NULL); memcpy(data, args->mthd.data, size); if (args != (void *)stack) kfree(args); @@ -276,7 +281,15 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle, object->map.size = 0; if (parent) { - if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) { + u32 args_size; + + if (check_add_overflow(sizeof(*args), size, &args_size)) { + nvif_object_dtor(object); + return -ENOMEM; + } + + args = kmalloc(args_size, GFP_KERNEL); + if (!args) { nvif_object_dtor(object); return -ENOMEM; } @@ -293,8 +306,7 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle, args->new.oclass = oclass; memcpy(args->new.data, data, size); - ret = nvif_object_ioctl(parent, args, sizeof(*args) + size, - &object->priv); + ret = nvif_object_ioctl(parent, args, args_size, &object->priv); memcpy(data, args->new.data, size); kfree(args); if (ret == 0) diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 9753c1e1f9..1aca306033 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -1212,7 +1212,6 @@ struct platform_driver omap_dmm_driver = { .probe = omap_dmm_probe, .remove_new = omap_dmm_remove, .driver = { - .owner = THIS_MODULE, .name = DMM_DRIVER_NAME, .of_match_table = of_match_ptr(dmm_of_match), .pm = &omap_dmm_pm_ops, diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 1d414b33fe..449d521c78 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -5,6 +5,7 @@ */ #include +#include #include #include diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 3421e83892..9ea0c64c26 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 5b15d02948..2ae0eb0638 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -335,6 +335,19 @@ config DRM_PANEL_LG_LG4573 Say Y here if you want to enable support for LG4573 RGB panel. To compile this driver as a module, choose M here. +config DRM_PANEL_LG_SW43408 + tristate "LG SW43408 panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_HELPER + help + Say Y here if you want to enable support for LG sw43408 panel. + The panel has a 1080x2160@60Hz resolution and uses 24 bit RGB per + pixel. It provides a MIPI DSI interface to the host and has a + built-in LED backlight. + config DRM_PANEL_MAGNACHIP_D53E6EA8966 tristate "Magnachip D53E6EA8966 DSI panel" depends on OF && SPI @@ -542,6 +555,18 @@ config DRM_PANEL_RAYDIUM_RM692E5 Say Y here if you want to enable support for Raydium RM692E5-based display panels, such as the one found in the Fairphone 5 smartphone. +config DRM_PANEL_RAYDIUM_RM69380 + tristate "Raydium RM69380-based DSI panel" + depends on OF && GPIOLIB + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for Raydium RM69380-based + display panels. + + This panel controller can be found in the Lenovo Xiaoxin Pad Pro 2021 + in combination with an EDO OLED panel. + config DRM_PANEL_RONBO_RB070D30 tristate "Ronbo Electronics RB070D30 panel" depends on OF @@ -564,7 +589,7 @@ config DRM_PANEL_SAMSUNG_ATNA33XC20 depends on PM select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER - select DRM_DP_AUX_BUS + select DRM_DISPLAY_DP_AUX_BUS help DRM panel driver for the Samsung ATNA33XC20 panel. This panel can't be handled by the DRM_PANEL_SIMPLE driver because its power @@ -586,6 +611,15 @@ config DRM_PANEL_SAMSUNG_LD9040 depends on BACKLIGHT_CLASS_DEVICE select VIDEOMODE_HELPERS +config DRM_PANEL_SAMSUNG_S6E3FA7 + tristate "Samsung S6E3FA7 panel driver" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for the Samsung S6E3FA7 + 1920x2220 panel. + config DRM_PANEL_SAMSUNG_S6D16D0 tristate "Samsung S6D16D0 DSI video mode panel" depends on OF @@ -796,7 +830,7 @@ config DRM_PANEL_EDP select VIDEOMODE_HELPERS select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER - select DRM_DP_AUX_BUS + select DRM_DISPLAY_DP_AUX_BUS select DRM_KMS_HELPER help DRM panel driver for dumb eDP panels that need at most a regulator and diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index f156d7fa0b..f0203f6e02 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o +obj-$(CONFIG_DRM_PANEL_LG_SW43408) += panel-lg-sw43408.o obj-$(CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966) += panel-magnachip-d53e6ea8966.o obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o obj-$(CONFIG_DRM_PANEL_NEWVISION_NV3051D) += panel-newvision-nv3051d.o @@ -55,6 +56,7 @@ obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM67191) += panel-raydium-rm67191.o obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM692E5) += panel-raydium-rm692e5.o +obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM69380) += panel-raydium-rm69380.o obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20) += panel-samsung-atna33xc20.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o @@ -62,6 +64,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0) += panel-samsung-s6d7aa0.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3FA7) += panel-samsung-s6e3fa7.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index 0ffe8f8c01..83c604ba3e 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -1507,7 +1507,11 @@ static int boe_panel_prepare(struct drm_panel *panel) usleep_range(10000, 11000); if (boe->desc->lp11_before_reset) { - mipi_dsi_dcs_nop(boe->dsi); + ret = mipi_dsi_dcs_nop(boe->dsi); + if (ret < 0) { + dev_err(&boe->dsi->dev, "Failed to send NOP: %d\n", ret); + goto poweroff; + } usleep_range(1000, 2000); } gpiod_set_value(boe->enable_gpio, 1); @@ -1528,13 +1532,13 @@ static int boe_panel_prepare(struct drm_panel *panel) return 0; poweroff: + gpiod_set_value(boe->enable_gpio, 0); regulator_disable(boe->avee); poweroffavdd: regulator_disable(boe->avdd); poweroff1v8: usleep_range(5000, 7000); regulator_disable(boe->pp1800); - gpiod_set_value(boe->enable_gpio, 0); return ret; } diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 745f3e48f0..6db277efcb 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -210,15 +210,12 @@ struct panel_desc { * struct edp_panel_entry - Maps panel ID to delay / panel name. */ struct edp_panel_entry { - /** @panel_id: 32-bit ID for panel, encoded with drm_edid_encode_panel_id(). */ - u32 panel_id; + /** @ident: edid identity used for panel matching. */ + const struct drm_edid_ident ident; /** @delay: The power sequencing delays needed for this panel. */ const struct panel_delay *delay; - /** @name: Name of this panel (for printing to logs). */ - const char *name; - /** @override_edid_mode: Override the mode obtained by edid. */ const struct drm_display_mode *override_edid_mode; }; @@ -245,7 +242,7 @@ struct panel_edp { const struct edp_panel_entry *detected_panel; - struct edid *edid; + const struct drm_edid *drm_edid; struct drm_display_mode override_mode; @@ -620,13 +617,16 @@ static int panel_edp_get_modes(struct drm_panel *panel, if (p->ddc) { pm_runtime_get_sync(panel->dev); - if (!p->edid) - p->edid = drm_get_edid(connector, p->ddc); + if (!p->drm_edid) + p->drm_edid = drm_edid_read_ddc(connector, p->ddc); + + drm_edid_connector_update(connector, p->drm_edid); + /* * If both edid and hard-coded modes exists, skip edid modes to * avoid multiple preferred modes. */ - if (p->edid && !has_hard_coded_modes) { + if (p->drm_edid && !has_hard_coded_modes) { if (has_override_edid_mode) { /* * override_edid_mode is specified. Use @@ -635,7 +635,7 @@ static int panel_edp_get_modes(struct drm_panel *panel, num += panel_edp_override_edid_mode(p, connector, p->detected_panel->override_edid_mode); } else { - num += drm_add_edid_modes(connector, p->edid); + num += drm_edid_connector_add_modes(connector); } } @@ -691,7 +691,7 @@ static int detected_panel_show(struct seq_file *s, void *data) else if (!p->detected_panel) seq_puts(s, "HARDCODED\n"); else - seq_printf(s, "%s\n", p->detected_panel->name); + seq_printf(s, "%s\n", p->detected_panel->ident.name); return 0; } @@ -761,11 +761,31 @@ static void panel_edp_parse_panel_timing_node(struct device *dev, dev_err(dev, "Reject override mode: No display_timing found\n"); } -static const struct edp_panel_entry *find_edp_panel(u32 panel_id); +static const struct edp_panel_entry *find_edp_panel(u32 panel_id, const struct drm_edid *edid); + +static void panel_edp_set_conservative_timings(struct panel_edp *panel, struct panel_desc *desc) +{ + /* + * It's highly likely that the panel will work if we use very + * conservative timings, so let's do that. + * + * Nearly all panels have a "unprepare" delay of 500 ms though + * there are a few with 1000. Let's stick 2000 in just to be + * super conservative. + * + * An "enable" delay of 80 ms seems the most common, but we'll + * throw in 200 ms to be safe. + */ + desc->delay.unprepare = 2000; + desc->delay.enable = 200; + + panel->detected_panel = ERR_PTR(-EINVAL); +} static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) { struct panel_desc *desc; + const struct drm_edid *base_block; u32 panel_id; char vend[4]; u16 product_id; @@ -791,19 +811,26 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) /* Power the panel on so we can read the EDID */ ret = pm_runtime_get_sync(dev); if (ret < 0) { - dev_err(dev, "Couldn't power on panel to read EDID: %d\n", ret); + dev_err(dev, + "Couldn't power on panel to ID it; using conservative timings: %d\n", + ret); + panel_edp_set_conservative_timings(panel, desc); goto exit; } - panel_id = drm_edid_get_panel_id(panel->ddc); - if (!panel_id) { - dev_err(dev, "Couldn't identify panel via EDID\n"); - ret = -EIO; + base_block = drm_edid_read_base_block(panel->ddc); + if (base_block) { + panel_id = drm_edid_get_panel_id(base_block); + } else { + dev_err(dev, "Couldn't read EDID for ID; using conservative timings\n"); + panel_edp_set_conservative_timings(panel, desc); goto exit; } drm_edid_decode_panel_id(panel_id, vend, &product_id); - panel->detected_panel = find_edp_panel(panel_id); + panel->detected_panel = find_edp_panel(panel_id, base_block); + + drm_edid_free(base_block); /* * We're using non-optimized timings and want it really obvious that @@ -814,40 +841,20 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) dev_warn(dev, "Unknown panel %s %#06x, using conservative timings\n", vend, product_id); - - /* - * It's highly likely that the panel will work if we use very - * conservative timings, so let's do that. We already know that - * the HPD-related delays must have worked since we got this - * far, so we really just need the "unprepare" / "enable" - * delays. We don't need "prepare_to_enable" since that - * overlaps the "enable" delay anyway. - * - * Nearly all panels have a "unprepare" delay of 500 ms though - * there are a few with 1000. Let's stick 2000 in just to be - * super conservative. - * - * An "enable" delay of 80 ms seems the most common, but we'll - * throw in 200 ms to be safe. - */ - desc->delay.unprepare = 2000; - desc->delay.enable = 200; - - panel->detected_panel = ERR_PTR(-EINVAL); + panel_edp_set_conservative_timings(panel, desc); } else { dev_info(dev, "Detected %s %s (%#06x)\n", - vend, panel->detected_panel->name, product_id); + vend, panel->detected_panel->ident.name, product_id); /* Update the delay; everything else comes from EDID */ desc->delay = *panel->detected_panel->delay; } - ret = 0; exit: pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); - return ret; + return 0; } static int panel_edp_probe(struct device *dev, const struct panel_desc *desc, @@ -940,8 +947,14 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc, err = drm_panel_dp_aux_backlight(&panel->base, panel->aux); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); + + /* + * Warn if we get an error, but don't consider it fatal. Having + * a panel where we can't control the backlight is better than + * no panel. + */ if (err) - goto err_finished_pm_runtime; + dev_warn(dev, "failed to register dp aux backlight: %d\n", err); } drm_panel_add(&panel->base); @@ -971,8 +984,8 @@ static void panel_edp_remove(struct device *dev) if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc)) put_device(&panel->ddc->dev); - kfree(panel->edid); - panel->edid = NULL; + drm_edid_free(panel->drm_edid); + panel->drm_edid = NULL; } static void panel_edp_shutdown(struct device *dev) @@ -1005,6 +1018,19 @@ static const struct panel_desc auo_b101ean01 = { }, }; +static const struct drm_display_mode auo_b116xa3_mode = { + .clock = 70589, + .hdisplay = 1366, + .hsync_start = 1366 + 40, + .hsync_end = 1366 + 40 + 40, + .htotal = 1366 + 40 + 40 + 32, + .vdisplay = 768, + .vsync_start = 768 + 10, + .vsync_end = 768 + 10 + 12, + .vtotal = 768 + 10 + 12 + 6, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + static const struct drm_display_mode auo_b116xak01_mode = { .clock = 69300, .hdisplay = 1366, @@ -1926,17 +1952,21 @@ static const struct panel_delay delay_200_500_e50_po2e200 = { #define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \ { \ - .name = _name, \ - .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \ - product_id), \ + .ident = { \ + .name = _name, \ + .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \ + product_id), \ + }, \ .delay = _delay \ } #define EDP_PANEL_ENTRY2(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name, _mode) \ { \ - .name = _name, \ - .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \ - product_id), \ + .ident = { \ + .name = _name, \ + .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \ + product_id), \ + }, \ .delay = _delay, \ .override_edid_mode = _mode \ } @@ -1960,7 +1990,9 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"), - EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAN04.0"), + EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0", + &auo_b116xa3_mode), EDP_PANEL_ENTRY('A', 'U', 'O', 0x435c, &delay_200_500_e50, "Unknown"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"), @@ -1968,6 +2000,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x723c, &delay_200_500_e50, "B140XTN07.2"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0607, &delay_200_500_e200, "Unknown"), @@ -2017,6 +2050,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"), @@ -2032,6 +2066,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('C', 'M', 'N', 0x1156, &delay_200_500_e80_d50, "Unknown"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1157, &delay_200_500_e80_d50, "N116BGE-EA2"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x115e, &delay_200_500_e80_d50, "N116BCA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"), @@ -2043,6 +2078,8 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"), + EDP_PANEL_ENTRY('C', 'S', 'W', 0x1100, &delay_200_500_e80_d50, "MNB601LS1-1"), + EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d51, &delay_200_500_e200, "Unknown"), EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "Unknown"), EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5c, &delay_200_500_e200, "MB116AN01-2"), @@ -2083,15 +2120,25 @@ static const struct edp_panel_entry edp_panels[] = { { /* sentinal */ } }; -static const struct edp_panel_entry *find_edp_panel(u32 panel_id) +static const struct edp_panel_entry *find_edp_panel(u32 panel_id, const struct drm_edid *edid) { const struct edp_panel_entry *panel; if (!panel_id) return NULL; - for (panel = edp_panels; panel->panel_id; panel++) - if (panel->panel_id == panel_id) + /* + * Match with identity first. This allows handling the case where + * vendors incorrectly reused the same panel ID for multiple panels that + * need different settings. If there's no match, try again with panel + * ID, which should be unique. + */ + for (panel = edp_panels; panel->ident.panel_id; panel++) + if (drm_edid_match(edid, &panel->ident)) + return panel; + + for (panel = edp_panels; panel->ident.panel_id; panel++) + if (panel->ident.panel_id == panel_id) return panel; return NULL; diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c index ff0dc08b98..cb9f46e853 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx8394.c +++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c @@ -370,8 +370,7 @@ static int hx8394_enable(struct drm_panel *panel) sleep_in: /* This will probably fail, but let's try orderly power off anyway. */ - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (!ret) + if (!mipi_dsi_dcs_enter_sleep_mode(dsi)) msleep(50); return ret; diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index 4c8c317191..084c37fa73 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -455,6 +455,202 @@ static const struct ili9881c_instr k101_im2byl02_init[] = { ILI9881C_COMMAND_INSTR(0xD3, 0x3F), /* VN0 */ }; +static const struct ili9881c_instr kd050hdfia020_init[] = { + ILI9881C_SWITCH_PAGE_INSTR(3), + ILI9881C_COMMAND_INSTR(0x01, 0x00), + ILI9881C_COMMAND_INSTR(0x02, 0x00), + ILI9881C_COMMAND_INSTR(0x03, 0x72), + ILI9881C_COMMAND_INSTR(0x04, 0x00), + ILI9881C_COMMAND_INSTR(0x05, 0x00), + ILI9881C_COMMAND_INSTR(0x06, 0x09), + ILI9881C_COMMAND_INSTR(0x07, 0x00), + ILI9881C_COMMAND_INSTR(0x08, 0x00), + ILI9881C_COMMAND_INSTR(0x09, 0x01), + ILI9881C_COMMAND_INSTR(0x0a, 0x00), + ILI9881C_COMMAND_INSTR(0x0b, 0x00), + ILI9881C_COMMAND_INSTR(0x0c, 0x01), + ILI9881C_COMMAND_INSTR(0x0d, 0x00), + ILI9881C_COMMAND_INSTR(0x0e, 0x00), + ILI9881C_COMMAND_INSTR(0x0f, 0x00), + ILI9881C_COMMAND_INSTR(0x10, 0x00), + ILI9881C_COMMAND_INSTR(0x11, 0x00), + ILI9881C_COMMAND_INSTR(0x12, 0x00), + ILI9881C_COMMAND_INSTR(0x13, 0x00), + ILI9881C_COMMAND_INSTR(0x14, 0x00), + ILI9881C_COMMAND_INSTR(0x15, 0x00), + ILI9881C_COMMAND_INSTR(0x16, 0x00), + ILI9881C_COMMAND_INSTR(0x17, 0x00), + ILI9881C_COMMAND_INSTR(0x18, 0x00), + ILI9881C_COMMAND_INSTR(0x19, 0x00), + ILI9881C_COMMAND_INSTR(0x1a, 0x00), + ILI9881C_COMMAND_INSTR(0x1b, 0x00), + ILI9881C_COMMAND_INSTR(0x1c, 0x00), + ILI9881C_COMMAND_INSTR(0x1d, 0x00), + ILI9881C_COMMAND_INSTR(0x1e, 0x40), + ILI9881C_COMMAND_INSTR(0x1f, 0x80), + ILI9881C_COMMAND_INSTR(0x20, 0x05), + ILI9881C_COMMAND_INSTR(0x20, 0x05), + ILI9881C_COMMAND_INSTR(0x21, 0x02), + ILI9881C_COMMAND_INSTR(0x22, 0x00), + ILI9881C_COMMAND_INSTR(0x23, 0x00), + ILI9881C_COMMAND_INSTR(0x24, 0x00), + ILI9881C_COMMAND_INSTR(0x25, 0x00), + ILI9881C_COMMAND_INSTR(0x26, 0x00), + ILI9881C_COMMAND_INSTR(0x27, 0x00), + ILI9881C_COMMAND_INSTR(0x28, 0x33), + ILI9881C_COMMAND_INSTR(0x29, 0x02), + ILI9881C_COMMAND_INSTR(0x2a, 0x00), + ILI9881C_COMMAND_INSTR(0x2b, 0x00), + ILI9881C_COMMAND_INSTR(0x2c, 0x00), + ILI9881C_COMMAND_INSTR(0x2d, 0x00), + ILI9881C_COMMAND_INSTR(0x2e, 0x00), + ILI9881C_COMMAND_INSTR(0x2f, 0x00), + ILI9881C_COMMAND_INSTR(0x30, 0x00), + ILI9881C_COMMAND_INSTR(0x31, 0x00), + ILI9881C_COMMAND_INSTR(0x32, 0x00), + ILI9881C_COMMAND_INSTR(0x32, 0x00), + ILI9881C_COMMAND_INSTR(0x33, 0x00), + ILI9881C_COMMAND_INSTR(0x34, 0x04), + ILI9881C_COMMAND_INSTR(0x35, 0x00), + ILI9881C_COMMAND_INSTR(0x36, 0x00), + ILI9881C_COMMAND_INSTR(0x37, 0x00), + ILI9881C_COMMAND_INSTR(0x38, 0x3C), + ILI9881C_COMMAND_INSTR(0x39, 0x00), + ILI9881C_COMMAND_INSTR(0x3a, 0x40), + ILI9881C_COMMAND_INSTR(0x3b, 0x40), + ILI9881C_COMMAND_INSTR(0x3c, 0x00), + ILI9881C_COMMAND_INSTR(0x3d, 0x00), + ILI9881C_COMMAND_INSTR(0x3e, 0x00), + ILI9881C_COMMAND_INSTR(0x3f, 0x00), + ILI9881C_COMMAND_INSTR(0x40, 0x00), + ILI9881C_COMMAND_INSTR(0x41, 0x00), + ILI9881C_COMMAND_INSTR(0x42, 0x00), + ILI9881C_COMMAND_INSTR(0x43, 0x00), + ILI9881C_COMMAND_INSTR(0x44, 0x00), + ILI9881C_COMMAND_INSTR(0x50, 0x01), + ILI9881C_COMMAND_INSTR(0x51, 0x23), + ILI9881C_COMMAND_INSTR(0x52, 0x45), + ILI9881C_COMMAND_INSTR(0x53, 0x67), + ILI9881C_COMMAND_INSTR(0x54, 0x89), + ILI9881C_COMMAND_INSTR(0x55, 0xab), + ILI9881C_COMMAND_INSTR(0x56, 0x01), + ILI9881C_COMMAND_INSTR(0x57, 0x23), + ILI9881C_COMMAND_INSTR(0x58, 0x45), + ILI9881C_COMMAND_INSTR(0x59, 0x67), + ILI9881C_COMMAND_INSTR(0x5a, 0x89), + ILI9881C_COMMAND_INSTR(0x5b, 0xab), + ILI9881C_COMMAND_INSTR(0x5c, 0xcd), + ILI9881C_COMMAND_INSTR(0x5d, 0xef), + ILI9881C_COMMAND_INSTR(0x5e, 0x11), + ILI9881C_COMMAND_INSTR(0x5f, 0x01), + ILI9881C_COMMAND_INSTR(0x60, 0x00), + ILI9881C_COMMAND_INSTR(0x61, 0x15), + ILI9881C_COMMAND_INSTR(0x62, 0x14), + ILI9881C_COMMAND_INSTR(0x63, 0x0E), + ILI9881C_COMMAND_INSTR(0x64, 0x0F), + ILI9881C_COMMAND_INSTR(0x65, 0x0C), + ILI9881C_COMMAND_INSTR(0x66, 0x0D), + ILI9881C_COMMAND_INSTR(0x67, 0x06), + ILI9881C_COMMAND_INSTR(0x68, 0x02), + ILI9881C_COMMAND_INSTR(0x69, 0x07), + ILI9881C_COMMAND_INSTR(0x6a, 0x02), + ILI9881C_COMMAND_INSTR(0x6b, 0x02), + ILI9881C_COMMAND_INSTR(0x6c, 0x02), + ILI9881C_COMMAND_INSTR(0x6d, 0x02), + ILI9881C_COMMAND_INSTR(0x6e, 0x02), + ILI9881C_COMMAND_INSTR(0x6f, 0x02), + ILI9881C_COMMAND_INSTR(0x70, 0x02), + ILI9881C_COMMAND_INSTR(0x71, 0x02), + ILI9881C_COMMAND_INSTR(0x72, 0x02), + ILI9881C_COMMAND_INSTR(0x73, 0x02), + ILI9881C_COMMAND_INSTR(0x74, 0x02), + ILI9881C_COMMAND_INSTR(0x75, 0x01), + ILI9881C_COMMAND_INSTR(0x76, 0x00), + ILI9881C_COMMAND_INSTR(0x77, 0x14), + ILI9881C_COMMAND_INSTR(0x78, 0x15), + ILI9881C_COMMAND_INSTR(0x79, 0x0E), + ILI9881C_COMMAND_INSTR(0x7a, 0x0F), + ILI9881C_COMMAND_INSTR(0x7b, 0x0C), + ILI9881C_COMMAND_INSTR(0x7c, 0x0D), + ILI9881C_COMMAND_INSTR(0x7d, 0x06), + ILI9881C_COMMAND_INSTR(0x7e, 0x02), + ILI9881C_COMMAND_INSTR(0x7f, 0x07), + ILI9881C_COMMAND_INSTR(0x80, 0x02), + ILI9881C_COMMAND_INSTR(0x81, 0x02), + ILI9881C_COMMAND_INSTR(0x83, 0x02), + ILI9881C_COMMAND_INSTR(0x84, 0x02), + ILI9881C_COMMAND_INSTR(0x85, 0x02), + ILI9881C_COMMAND_INSTR(0x86, 0x02), + ILI9881C_COMMAND_INSTR(0x87, 0x02), + ILI9881C_COMMAND_INSTR(0x88, 0x02), + ILI9881C_COMMAND_INSTR(0x89, 0x02), + ILI9881C_COMMAND_INSTR(0x8A, 0x02), + ILI9881C_SWITCH_PAGE_INSTR(0x4), + ILI9881C_COMMAND_INSTR(0x6C, 0x15), + ILI9881C_COMMAND_INSTR(0x6E, 0x2A), + ILI9881C_COMMAND_INSTR(0x6F, 0x33), + ILI9881C_COMMAND_INSTR(0x3A, 0x94), + ILI9881C_COMMAND_INSTR(0x8D, 0x15), + ILI9881C_COMMAND_INSTR(0x87, 0xBA), + ILI9881C_COMMAND_INSTR(0x26, 0x76), + ILI9881C_COMMAND_INSTR(0xB2, 0xD1), + ILI9881C_COMMAND_INSTR(0xB5, 0x06), + ILI9881C_SWITCH_PAGE_INSTR(0x1), + ILI9881C_COMMAND_INSTR(0x22, 0x0A), + ILI9881C_COMMAND_INSTR(0x31, 0x00), + ILI9881C_COMMAND_INSTR(0x53, 0x90), + ILI9881C_COMMAND_INSTR(0x55, 0xA2), + ILI9881C_COMMAND_INSTR(0x50, 0xB7), + ILI9881C_COMMAND_INSTR(0x51, 0xB7), + ILI9881C_COMMAND_INSTR(0x60, 0x22), + ILI9881C_COMMAND_INSTR(0x61, 0x00), + ILI9881C_COMMAND_INSTR(0x62, 0x19), + ILI9881C_COMMAND_INSTR(0x63, 0x10), + ILI9881C_COMMAND_INSTR(0xA0, 0x08), + ILI9881C_COMMAND_INSTR(0xA1, 0x1A), + ILI9881C_COMMAND_INSTR(0xA2, 0x27), + ILI9881C_COMMAND_INSTR(0xA3, 0x15), + ILI9881C_COMMAND_INSTR(0xA4, 0x17), + ILI9881C_COMMAND_INSTR(0xA5, 0x2A), + ILI9881C_COMMAND_INSTR(0xA6, 0x1E), + ILI9881C_COMMAND_INSTR(0xA7, 0x1F), + ILI9881C_COMMAND_INSTR(0xA8, 0x8B), + ILI9881C_COMMAND_INSTR(0xA9, 0x1B), + ILI9881C_COMMAND_INSTR(0xAA, 0x27), + ILI9881C_COMMAND_INSTR(0xAB, 0x78), + ILI9881C_COMMAND_INSTR(0xAC, 0x18), + ILI9881C_COMMAND_INSTR(0xAD, 0x18), + ILI9881C_COMMAND_INSTR(0xAE, 0x4C), + ILI9881C_COMMAND_INSTR(0xAF, 0x21), + ILI9881C_COMMAND_INSTR(0xB0, 0x27), + ILI9881C_COMMAND_INSTR(0xB1, 0x54), + ILI9881C_COMMAND_INSTR(0xB2, 0x67), + ILI9881C_COMMAND_INSTR(0xB3, 0x39), + ILI9881C_COMMAND_INSTR(0xC0, 0x08), + ILI9881C_COMMAND_INSTR(0xC1, 0x1A), + ILI9881C_COMMAND_INSTR(0xC2, 0x27), + ILI9881C_COMMAND_INSTR(0xC3, 0x15), + ILI9881C_COMMAND_INSTR(0xC4, 0x17), + ILI9881C_COMMAND_INSTR(0xC5, 0x2A), + ILI9881C_COMMAND_INSTR(0xC6, 0x1E), + ILI9881C_COMMAND_INSTR(0xC7, 0x1F), + ILI9881C_COMMAND_INSTR(0xC8, 0x8B), + ILI9881C_COMMAND_INSTR(0xC9, 0x1B), + ILI9881C_COMMAND_INSTR(0xCA, 0x27), + ILI9881C_COMMAND_INSTR(0xCB, 0x78), + ILI9881C_COMMAND_INSTR(0xCC, 0x18), + ILI9881C_COMMAND_INSTR(0xCD, 0x18), + ILI9881C_COMMAND_INSTR(0xCE, 0x4C), + ILI9881C_COMMAND_INSTR(0xCF, 0x21), + ILI9881C_COMMAND_INSTR(0xD0, 0x27), + ILI9881C_COMMAND_INSTR(0xD1, 0x54), + ILI9881C_COMMAND_INSTR(0xD2, 0x67), + ILI9881C_COMMAND_INSTR(0xD3, 0x39), + ILI9881C_SWITCH_PAGE_INSTR(0), + ILI9881C_COMMAND_INSTR(0x35, 0x00), + ILI9881C_COMMAND_INSTR(0x3A, 0x7), +}; + static const struct ili9881c_instr tl050hdv35_init[] = { ILI9881C_SWITCH_PAGE_INSTR(3), ILI9881C_COMMAND_INSTR(0x01, 0x00), @@ -1177,6 +1373,23 @@ static const struct drm_display_mode k101_im2byl02_default_mode = { .height_mm = 217, }; +static const struct drm_display_mode kd050hdfia020_default_mode = { + .clock = 62000, + + .hdisplay = 720, + .hsync_start = 720 + 10, + .hsync_end = 720 + 10 + 20, + .htotal = 720 + 10 + 20 + 30, + + .vdisplay = 1280, + .vsync_start = 1280 + 10, + .vsync_end = 1280 + 10 + 10, + .vtotal = 1280 + 10 + 10 + 20, + + .width_mm = 62, + .height_mm = 110, +}; + static const struct drm_display_mode tl050hdv35_default_mode = { .clock = 59400, @@ -1345,6 +1558,14 @@ static const struct ili9881c_desc k101_im2byl02_desc = { .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE, }; +static const struct ili9881c_desc kd050hdfia020_desc = { + .init = kd050hdfia020_init, + .init_length = ARRAY_SIZE(kd050hdfia020_init), + .mode = &kd050hdfia020_default_mode, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_MODE_LPM, +}; + static const struct ili9881c_desc tl050hdv35_desc = { .init = tl050hdv35_init, .init_length = ARRAY_SIZE(tl050hdv35_init), @@ -1372,6 +1593,7 @@ static const struct ili9881c_desc am8001280g_desc = { static const struct of_device_id ili9881c_of_match[] = { { .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc }, { .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc }, + { .compatible = "startek,kd050hdfia020", .data = &kd050hdfia020_desc }, { .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc }, { .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc }, { .compatible = "ampire,am8001280g", .data = &am8001280g_desc }, diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c index 267a530704..35ea5494e0 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c @@ -560,7 +560,11 @@ static int ili9882t_prepare(struct drm_panel *panel) usleep_range(10000, 11000); // MIPI needs to keep the LP11 state before the lcm_reset pin is pulled high - mipi_dsi_dcs_nop(ili->dsi); + ret = mipi_dsi_dcs_nop(ili->dsi); + if (ret < 0) { + dev_err(&ili->dsi->dev, "Failed to send NOP: %d\n", ret); + goto poweroff; + } usleep_range(1000, 2000); gpiod_set_value(ili->enable_gpio, 1); @@ -579,13 +583,13 @@ static int ili9882t_prepare(struct drm_panel *panel) return 0; poweroff: + gpiod_set_value(ili->enable_gpio, 0); regulator_disable(ili->avee); poweroffavdd: regulator_disable(ili->avdd); poweroff1v8: usleep_range(5000, 7000); regulator_disable(ili->pp1800); - gpiod_set_value(ili->enable_gpio, 0); return ret; } diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c index 3e0a8e0d58..483dc88d16 100644 --- a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c +++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c @@ -247,6 +247,7 @@ static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi) drm_panel_init(&ctx->panel, dev, &jdi_fhd_r63452_panel_funcs, DRM_MODE_CONNECTOR_DSI); + ctx->panel.prepare_prev_first = true; ret = drm_panel_of_backlight(&ctx->panel); if (ret) diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c index b942a01622..c54be0cc3f 100644 --- a/drivers/gpu/drm/panel/panel-khadas-ts050.c +++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c @@ -25,6 +25,7 @@ struct khadas_ts050_panel { struct regulator *supply; struct gpio_desc *reset_gpio; struct gpio_desc *enable_gpio; + struct khadas_ts050_panel_data *panel_data; bool prepared; bool enabled; @@ -32,544 +33,601 @@ struct khadas_ts050_panel { struct khadas_ts050_panel_cmd { u8 cmd; - u8 data; + u8 data[55]; + u8 size; +}; + +struct khadas_ts050_panel_data { + struct khadas_ts050_panel_cmd *init_code; + int len; +}; + +static const struct khadas_ts050_panel_cmd ts050v2_init_code[] = { + {0xB9, {0xFF, 0x83, 0x99}, 0x03}, + {0xBA, {0x63, 0x23, 0x68, 0xCF}, 0x04}, + {0xD2, {0x55}, 0x01}, + {0xB1, {0x02, 0x04, 0x70, 0x90, 0x01, 0x32, 0x33, + 0x11, 0x11, 0x4D, 0x57, 0x56, 0x73, 0x02, 0x02}, 0x0f}, + {0xB2, {0x00, 0x80, 0x80, 0xAE, 0x0A, 0x0E, 0x75, 0x11, 0x00, 0x00, 0x00}, 0x0b}, + {0xB4, {0x00, 0xFF, 0x04, 0xA4, 0x02, 0xA0, 0x00, 0x00, 0x10, 0x00, 0x00, 0x02, + 0x00, 0x24, 0x02, 0x04, 0x0A, 0x21, 0x03, 0x00, 0x00, 0x08, 0xA6, 0x88, + 0x04, 0xA4, 0x02, 0xA0, 0x00, 0x00, 0x10, 0x00, 0x00, 0x02, 0x00, 0x24, + 0x02, 0x04, 0x0A, 0x00, 0x00, 0x08, 0xA6, 0x00, 0x08, 0x11}, 0x2e}, + {0xD3, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, + 0x18, 0x32, 0x10, 0x09, 0x00, 0x09, 0x32, + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x11, 0x00, 0x02, 0x02, 0x03, 0x00, 0x00, 0x00, 0x0A, + 0x40}, 0x21}, + {0xD5, {0x18, 0x18, 0x18, 0x18, 0x21, 0x20, 0x18, 0x18, 0x19, 0x19, 0x19, + 0x19, 0x18, 0x18, 0x18, 0x18, 0x03, 0x02, 0x01, 0x00, 0x2F, 0x2F, + 0x30, 0x30, 0x31, 0x31, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18}, 0x20}, + {0xD6, {0x18, 0x18, 0x18, 0x18, 0x20, 0x21, 0x19, 0x19, 0x18, 0x18, 0x19, + 0x19, 0x18, 0x18, 0x18, 0x18, 0x00, 0x01, 0x02, 0x03, 0x2F, 0x2F, + 0x30, 0x30, 0x31, 0x31, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18}, 0x20}, + {0xD8, {0x0A, 0xBE, 0xFA, 0xA0, 0x0A, 0xBE, 0xFA, 0xA0}, 0x08}, + {0xBD, {0x01}, 0x01}, + {0xD8, {0x0F, 0xFF, 0xFF, 0xE0, 0x0F, 0xFF, 0xFF, 0xE0}, 0x08}, + {0xBD, {0x02}, 0x01}, + {0xD8, {0x0F, 0xFF, 0xFF, 0xE0, 0x0F, 0xFF, 0xFF, 0xE0}, 0x08}, + {0xBD, {0x00}, 0x01}, + {0xE0, {0x01, 0x35, 0x41, 0x3B, 0x79, 0x81, 0x8C, 0x85, 0x8E, + 0x95, 0x9B, 0xA0, 0xA4, 0xAB, 0xB1, 0xB3, 0xB7, 0xC5, 0xBD, 0xC5, + 0xB6, 0xC2, 0xC2, 0x62, 0x5D, 0x66, 0x73, 0x01, 0x35, 0x41, 0x3B, + 0x79, 0x81, 0x8C, 0x85, 0x8E, 0x95, 0x9B, 0xA0, 0xA4, 0xAB, 0xB1, + 0xB3, 0xB7, 0xB5, 0xBD, 0xC5, 0xB6, 0xC2, 0xC2, 0x62, 0x5D, 0x66, + 0x73}, 0x36}, + {0xB6, {0x97, 0x97}, 0x02}, + {0xCC, {0xC8}, 0x02}, + {0xBF, {0x40, 0x41, 0x50, 0x19}, 0x04}, + {0xC6, {0xFF, 0xF9}, 0x02}, + {0xC0, {0x25, 0x5A}, 0x02}, }; /* Only the CMD1 User Command set is documented */ -static const struct khadas_ts050_panel_cmd init_code[] = { +static const struct khadas_ts050_panel_cmd ts050_init_code[] = { /* Select Unknown CMD Page (Undocumented) */ - {0xff, 0xee}, + {0xff, {0xee}, 0x01}, /* Reload CMD1: Don't reload default value to register */ - {0xfb, 0x01}, - {0x1f, 0x45}, - {0x24, 0x4f}, - {0x38, 0xc8}, - {0x39, 0x27}, - {0x1e, 0x77}, - {0x1d, 0x0f}, - {0x7e, 0x71}, - {0x7c, 0x03}, - {0xff, 0x00}, - {0xfb, 0x01}, - {0x35, 0x01}, + {0xfb, {0x01}, 0x01}, + {0x1f, {0x45}, 0x01}, + {0x24, {0x4f}, 0x01}, + {0x38, {0xc8}, 0x01}, + {0x39, {0x27}, 0x01}, + {0x1e, {0x77}, 0x01}, + {0x1d, {0x0f}, 0x01}, + {0x7e, {0x71}, 0x01}, + {0x7c, {0x03}, 0x01}, + {0xff, {0x00}, 0x01}, + {0xfb, {0x01}, 0x01}, + {0x35, {0x01}, 0x01}, /* Select CMD2 Page0 (Undocumented) */ - {0xff, 0x01}, + {0xff, {0x01}, 0x01}, /* Reload CMD1: Don't reload default value to register */ - {0xfb, 0x01}, - {0x00, 0x01}, - {0x01, 0x55}, - {0x02, 0x40}, - {0x05, 0x40}, - {0x06, 0x4a}, - {0x07, 0x24}, - {0x08, 0x0c}, - {0x0b, 0x7d}, - {0x0c, 0x7d}, - {0x0e, 0xb0}, - {0x0f, 0xae}, - {0x11, 0x10}, - {0x12, 0x10}, - {0x13, 0x03}, - {0x14, 0x4a}, - {0x15, 0x12}, - {0x16, 0x12}, - {0x18, 0x00}, - {0x19, 0x77}, - {0x1a, 0x55}, - {0x1b, 0x13}, - {0x1c, 0x00}, - {0x1d, 0x00}, - {0x1e, 0x13}, - {0x1f, 0x00}, - {0x23, 0x00}, - {0x24, 0x00}, - {0x25, 0x00}, - {0x26, 0x00}, - {0x27, 0x00}, - {0x28, 0x00}, - {0x35, 0x00}, - {0x66, 0x00}, - {0x58, 0x82}, - {0x59, 0x02}, - {0x5a, 0x02}, - {0x5b, 0x02}, - {0x5c, 0x82}, - {0x5d, 0x82}, - {0x5e, 0x02}, - {0x5f, 0x02}, - {0x72, 0x31}, + {0xfb, {0x01}, 0x01}, + {0x00, {0x01}, 0x01}, + {0x01, {0x55}, 0x01}, + {0x02, {0x40}, 0x01}, + {0x05, {0x40}, 0x01}, + {0x06, {0x4a}, 0x01}, + {0x07, {0x24}, 0x01}, + {0x08, {0x0c}, 0x01}, + {0x0b, {0x7d}, 0x01}, + {0x0c, {0x7d}, 0x01}, + {0x0e, {0xb0}, 0x01}, + {0x0f, {0xae}, 0x01}, + {0x11, {0x10}, 0x01}, + {0x12, {0x10}, 0x01}, + {0x13, {0x03}, 0x01}, + {0x14, {0x4a}, 0x01}, + {0x15, {0x12}, 0x01}, + {0x16, {0x12}, 0x01}, + {0x18, {0x00}, 0x01}, + {0x19, {0x77}, 0x01}, + {0x1a, {0x55}, 0x01}, + {0x1b, {0x13}, 0x01}, + {0x1c, {0x00}, 0x01}, + {0x1d, {0x00}, 0x01}, + {0x1e, {0x13}, 0x01}, + {0x1f, {0x00}, 0x01}, + {0x23, {0x00}, 0x01}, + {0x24, {0x00}, 0x01}, + {0x25, {0x00}, 0x01}, + {0x26, {0x00}, 0x01}, + {0x27, {0x00}, 0x01}, + {0x28, {0x00}, 0x01}, + {0x35, {0x00}, 0x01}, + {0x66, {0x00}, 0x01}, + {0x58, {0x82}, 0x01}, + {0x59, {0x02}, 0x01}, + {0x5a, {0x02}, 0x01}, + {0x5b, {0x02}, 0x01}, + {0x5c, {0x82}, 0x01}, + {0x5d, {0x82}, 0x01}, + {0x5e, {0x02}, 0x01}, + {0x5f, {0x02}, 0x01}, + {0x72, {0x31}, 0x01}, /* Select CMD2 Page4 (Undocumented) */ - {0xff, 0x05}, + {0xff, {0x05}, 0x01}, /* Reload CMD1: Don't reload default value to register */ - {0xfb, 0x01}, - {0x00, 0x01}, - {0x01, 0x0b}, - {0x02, 0x0c}, - {0x03, 0x09}, - {0x04, 0x0a}, - {0x05, 0x00}, - {0x06, 0x0f}, - {0x07, 0x10}, - {0x08, 0x00}, - {0x09, 0x00}, - {0x0a, 0x00}, - {0x0b, 0x00}, - {0x0c, 0x00}, - {0x0d, 0x13}, - {0x0e, 0x15}, - {0x0f, 0x17}, - {0x10, 0x01}, - {0x11, 0x0b}, - {0x12, 0x0c}, - {0x13, 0x09}, - {0x14, 0x0a}, - {0x15, 0x00}, - {0x16, 0x0f}, - {0x17, 0x10}, - {0x18, 0x00}, - {0x19, 0x00}, - {0x1a, 0x00}, - {0x1b, 0x00}, - {0x1c, 0x00}, - {0x1d, 0x13}, - {0x1e, 0x15}, - {0x1f, 0x17}, - {0x20, 0x00}, - {0x21, 0x03}, - {0x22, 0x01}, - {0x23, 0x40}, - {0x24, 0x40}, - {0x25, 0xed}, - {0x29, 0x58}, - {0x2a, 0x12}, - {0x2b, 0x01}, - {0x4b, 0x06}, - {0x4c, 0x11}, - {0x4d, 0x20}, - {0x4e, 0x02}, - {0x4f, 0x02}, - {0x50, 0x20}, - {0x51, 0x61}, - {0x52, 0x01}, - {0x53, 0x63}, - {0x54, 0x77}, - {0x55, 0xed}, - {0x5b, 0x00}, - {0x5c, 0x00}, - {0x5d, 0x00}, - {0x5e, 0x00}, - {0x5f, 0x15}, - {0x60, 0x75}, - {0x61, 0x00}, - {0x62, 0x00}, - {0x63, 0x00}, - {0x64, 0x00}, - {0x65, 0x00}, - {0x66, 0x00}, - {0x67, 0x00}, - {0x68, 0x04}, - {0x69, 0x00}, - {0x6a, 0x00}, - {0x6c, 0x40}, - {0x75, 0x01}, - {0x76, 0x01}, - {0x7a, 0x80}, - {0x7b, 0xa3}, - {0x7c, 0xd8}, - {0x7d, 0x60}, - {0x7f, 0x15}, - {0x80, 0x81}, - {0x83, 0x05}, - {0x93, 0x08}, - {0x94, 0x10}, - {0x8a, 0x00}, - {0x9b, 0x0f}, - {0xea, 0xff}, - {0xec, 0x00}, + {0xfb, {0x01}, 0x01}, + {0x00, {0x01}, 0x01}, + {0x01, {0x0b}, 0x01}, + {0x02, {0x0c}, 0x01}, + {0x03, {0x09}, 0x01}, + {0x04, {0x0a}, 0x01}, + {0x05, {0x00}, 0x01}, + {0x06, {0x0f}, 0x01}, + {0x07, {0x10}, 0x01}, + {0x08, {0x00}, 0x01}, + {0x09, {0x00}, 0x01}, + {0x0a, {0x00}, 0x01}, + {0x0b, {0x00}, 0x01}, + {0x0c, {0x00}, 0x01}, + {0x0d, {0x13}, 0x01}, + {0x0e, {0x15}, 0x01}, + {0x0f, {0x17}, 0x01}, + {0x10, {0x01}, 0x01}, + {0x11, {0x0b}, 0x01}, + {0x12, {0x0c}, 0x01}, + {0x13, {0x09}, 0x01}, + {0x14, {0x0a}, 0x01}, + {0x15, {0x00}, 0x01}, + {0x16, {0x0f}, 0x01}, + {0x17, {0x10}, 0x01}, + {0x18, {0x00}, 0x01}, + {0x19, {0x00}, 0x01}, + {0x1a, {0x00}, 0x01}, + {0x1b, {0x00}, 0x01}, + {0x1c, {0x00}, 0x01}, + {0x1d, {0x13}, 0x01}, + {0x1e, {0x15}, 0x01}, + {0x1f, {0x17}, 0x01}, + {0x20, {0x00}, 0x01}, + {0x21, {0x03}, 0x01}, + {0x22, {0x01}, 0x01}, + {0x23, {0x40}, 0x01}, + {0x24, {0x40}, 0x01}, + {0x25, {0xed}, 0x01}, + {0x29, {0x58}, 0x01}, + {0x2a, {0x12}, 0x01}, + {0x2b, {0x01}, 0x01}, + {0x4b, {0x06}, 0x01}, + {0x4c, {0x11}, 0x01}, + {0x4d, {0x20}, 0x01}, + {0x4e, {0x02}, 0x01}, + {0x4f, {0x02}, 0x01}, + {0x50, {0x20}, 0x01}, + {0x51, {0x61}, 0x01}, + {0x52, {0x01}, 0x01}, + {0x53, {0x63}, 0x01}, + {0x54, {0x77}, 0x01}, + {0x55, {0xed}, 0x01}, + {0x5b, {0x00}, 0x01}, + {0x5c, {0x00}, 0x01}, + {0x5d, {0x00}, 0x01}, + {0x5e, {0x00}, 0x01}, + {0x5f, {0x15}, 0x01}, + {0x60, {0x75}, 0x01}, + {0x61, {0x00}, 0x01}, + {0x62, {0x00}, 0x01}, + {0x63, {0x00}, 0x01}, + {0x64, {0x00}, 0x01}, + {0x65, {0x00}, 0x01}, + {0x66, {0x00}, 0x01}, + {0x67, {0x00}, 0x01}, + {0x68, {0x04}, 0x01}, + {0x69, {0x00}, 0x01}, + {0x6a, {0x00}, 0x01}, + {0x6c, {0x40}, 0x01}, + {0x75, {0x01}, 0x01}, + {0x76, {0x01}, 0x01}, + {0x7a, {0x80}, 0x01}, + {0x7b, {0xa3}, 0x01}, + {0x7c, {0xd8}, 0x01}, + {0x7d, {0x60}, 0x01}, + {0x7f, {0x15}, 0x01}, + {0x80, {0x81}, 0x01}, + {0x83, {0x05}, 0x01}, + {0x93, {0x08}, 0x01}, + {0x94, {0x10}, 0x01}, + {0x8a, {0x00}, 0x01}, + {0x9b, {0x0f}, 0x01}, + {0xea, {0xff}, 0x01}, + {0xec, {0x00}, 0x01}, /* Select CMD2 Page0 (Undocumented) */ - {0xff, 0x01}, + {0xff, {0x01}, 0x01}, /* Reload CMD1: Don't reload default value to register */ - {0xfb, 0x01}, - {0x75, 0x00}, - {0x76, 0xdf}, - {0x77, 0x00}, - {0x78, 0xe4}, - {0x79, 0x00}, - {0x7a, 0xed}, - {0x7b, 0x00}, - {0x7c, 0xf6}, - {0x7d, 0x00}, - {0x7e, 0xff}, - {0x7f, 0x01}, - {0x80, 0x07}, - {0x81, 0x01}, - {0x82, 0x10}, - {0x83, 0x01}, - {0x84, 0x18}, - {0x85, 0x01}, - {0x86, 0x20}, - {0x87, 0x01}, - {0x88, 0x3d}, - {0x89, 0x01}, - {0x8a, 0x56}, - {0x8b, 0x01}, - {0x8c, 0x84}, - {0x8d, 0x01}, - {0x8e, 0xab}, - {0x8f, 0x01}, - {0x90, 0xec}, - {0x91, 0x02}, - {0x92, 0x22}, - {0x93, 0x02}, - {0x94, 0x23}, - {0x95, 0x02}, - {0x96, 0x55}, - {0x97, 0x02}, - {0x98, 0x8b}, - {0x99, 0x02}, - {0x9a, 0xaf}, - {0x9b, 0x02}, - {0x9c, 0xdf}, - {0x9d, 0x03}, - {0x9e, 0x01}, - {0x9f, 0x03}, - {0xa0, 0x2c}, - {0xa2, 0x03}, - {0xa3, 0x39}, - {0xa4, 0x03}, - {0xa5, 0x47}, - {0xa6, 0x03}, - {0xa7, 0x56}, - {0xa9, 0x03}, - {0xaa, 0x66}, - {0xab, 0x03}, - {0xac, 0x76}, - {0xad, 0x03}, - {0xae, 0x85}, - {0xaf, 0x03}, - {0xb0, 0x90}, - {0xb1, 0x03}, - {0xb2, 0xcb}, - {0xb3, 0x00}, - {0xb4, 0xdf}, - {0xb5, 0x00}, - {0xb6, 0xe4}, - {0xb7, 0x00}, - {0xb8, 0xed}, - {0xb9, 0x00}, - {0xba, 0xf6}, - {0xbb, 0x00}, - {0xbc, 0xff}, - {0xbd, 0x01}, - {0xbe, 0x07}, - {0xbf, 0x01}, - {0xc0, 0x10}, - {0xc1, 0x01}, - {0xc2, 0x18}, - {0xc3, 0x01}, - {0xc4, 0x20}, - {0xc5, 0x01}, - {0xc6, 0x3d}, - {0xc7, 0x01}, - {0xc8, 0x56}, - {0xc9, 0x01}, - {0xca, 0x84}, - {0xcb, 0x01}, - {0xcc, 0xab}, - {0xcd, 0x01}, - {0xce, 0xec}, - {0xcf, 0x02}, - {0xd0, 0x22}, - {0xd1, 0x02}, - {0xd2, 0x23}, - {0xd3, 0x02}, - {0xd4, 0x55}, - {0xd5, 0x02}, - {0xd6, 0x8b}, - {0xd7, 0x02}, - {0xd8, 0xaf}, - {0xd9, 0x02}, - {0xda, 0xdf}, - {0xdb, 0x03}, - {0xdc, 0x01}, - {0xdd, 0x03}, - {0xde, 0x2c}, - {0xdf, 0x03}, - {0xe0, 0x39}, - {0xe1, 0x03}, - {0xe2, 0x47}, - {0xe3, 0x03}, - {0xe4, 0x56}, - {0xe5, 0x03}, - {0xe6, 0x66}, - {0xe7, 0x03}, - {0xe8, 0x76}, - {0xe9, 0x03}, - {0xea, 0x85}, - {0xeb, 0x03}, - {0xec, 0x90}, - {0xed, 0x03}, - {0xee, 0xcb}, - {0xef, 0x00}, - {0xf0, 0xbb}, - {0xf1, 0x00}, - {0xf2, 0xc0}, - {0xf3, 0x00}, - {0xf4, 0xcc}, - {0xf5, 0x00}, - {0xf6, 0xd6}, - {0xf7, 0x00}, - {0xf8, 0xe1}, - {0xf9, 0x00}, - {0xfa, 0xea}, + {0xfb, {0x01}, 0x01}, + {0x75, {0x00}, 0x01}, + {0x76, {0xdf}, 0x01}, + {0x77, {0x00}, 0x01}, + {0x78, {0xe4}, 0x01}, + {0x79, {0x00}, 0x01}, + {0x7a, {0xed}, 0x01}, + {0x7b, {0x00}, 0x01}, + {0x7c, {0xf6}, 0x01}, + {0x7d, {0x00}, 0x01}, + {0x7e, {0xff}, 0x01}, + {0x7f, {0x01}, 0x01}, + {0x80, {0x07}, 0x01}, + {0x81, {0x01}, 0x01}, + {0x82, {0x10}, 0x01}, + {0x83, {0x01}, 0x01}, + {0x84, {0x18}, 0x01}, + {0x85, {0x01}, 0x01}, + {0x86, {0x20}, 0x01}, + {0x87, {0x01}, 0x01}, + {0x88, {0x3d}, 0x01}, + {0x89, {0x01}, 0x01}, + {0x8a, {0x56}, 0x01}, + {0x8b, {0x01}, 0x01}, + {0x8c, {0x84}, 0x01}, + {0x8d, {0x01}, 0x01}, + {0x8e, {0xab}, 0x01}, + {0x8f, {0x01}, 0x01}, + {0x90, {0xec}, 0x01}, + {0x91, {0x02}, 0x01}, + {0x92, {0x22}, 0x01}, + {0x93, {0x02}, 0x01}, + {0x94, {0x23}, 0x01}, + {0x95, {0x02}, 0x01}, + {0x96, {0x55}, 0x01}, + {0x97, {0x02}, 0x01}, + {0x98, {0x8b}, 0x01}, + {0x99, {0x02}, 0x01}, + {0x9a, {0xaf}, 0x01}, + {0x9b, {0x02}, 0x01}, + {0x9c, {0xdf}, 0x01}, + {0x9d, {0x03}, 0x01}, + {0x9e, {0x01}, 0x01}, + {0x9f, {0x03}, 0x01}, + {0xa0, {0x2c}, 0x01}, + {0xa2, {0x03}, 0x01}, + {0xa3, {0x39}, 0x01}, + {0xa4, {0x03}, 0x01}, + {0xa5, {0x47}, 0x01}, + {0xa6, {0x03}, 0x01}, + {0xa7, {0x56}, 0x01}, + {0xa9, {0x03}, 0x01}, + {0xaa, {0x66}, 0x01}, + {0xab, {0x03}, 0x01}, + {0xac, {0x76}, 0x01}, + {0xad, {0x03}, 0x01}, + {0xae, {0x85}, 0x01}, + {0xaf, {0x03}, 0x01}, + {0xb0, {0x90}, 0x01}, + {0xb1, {0x03}, 0x01}, + {0xb2, {0xcb}, 0x01}, + {0xb3, {0x00}, 0x01}, + {0xb4, {0xdf}, 0x01}, + {0xb5, {0x00}, 0x01}, + {0xb6, {0xe4}, 0x01}, + {0xb7, {0x00}, 0x01}, + {0xb8, {0xed}, 0x01}, + {0xb9, {0x00}, 0x01}, + {0xba, {0xf6}, 0x01}, + {0xbb, {0x00}, 0x01}, + {0xbc, {0xff}, 0x01}, + {0xbd, {0x01}, 0x01}, + {0xbe, {0x07}, 0x01}, + {0xbf, {0x01}, 0x01}, + {0xc0, {0x10}, 0x01}, + {0xc1, {0x01}, 0x01}, + {0xc2, {0x18}, 0x01}, + {0xc3, {0x01}, 0x01}, + {0xc4, {0x20}, 0x01}, + {0xc5, {0x01}, 0x01}, + {0xc6, {0x3d}, 0x01}, + {0xc7, {0x01}, 0x01}, + {0xc8, {0x56}, 0x01}, + {0xc9, {0x01}, 0x01}, + {0xca, {0x84}, 0x01}, + {0xcb, {0x01}, 0x01}, + {0xcc, {0xab}, 0x01}, + {0xcd, {0x01}, 0x01}, + {0xce, {0xec}, 0x01}, + {0xcf, {0x02}, 0x01}, + {0xd0, {0x22}, 0x01}, + {0xd1, {0x02}, 0x01}, + {0xd2, {0x23}, 0x01}, + {0xd3, {0x02}, 0x01}, + {0xd4, {0x55}, 0x01}, + {0xd5, {0x02}, 0x01}, + {0xd6, {0x8b}, 0x01}, + {0xd7, {0x02}, 0x01}, + {0xd8, {0xaf}, 0x01}, + {0xd9, {0x02}, 0x01}, + {0xda, {0xdf}, 0x01}, + {0xdb, {0x03}, 0x01}, + {0xdc, {0x01}, 0x01}, + {0xdd, {0x03}, 0x01}, + {0xde, {0x2c}, 0x01}, + {0xdf, {0x03}, 0x01}, + {0xe0, {0x39}, 0x01}, + {0xe1, {0x03}, 0x01}, + {0xe2, {0x47}, 0x01}, + {0xe3, {0x03}, 0x01}, + {0xe4, {0x56}, 0x01}, + {0xe5, {0x03}, 0x01}, + {0xe6, {0x66}, 0x01}, + {0xe7, {0x03}, 0x01}, + {0xe8, {0x76}, 0x01}, + {0xe9, {0x03}, 0x01}, + {0xea, {0x85}, 0x01}, + {0xeb, {0x03}, 0x01}, + {0xec, {0x90}, 0x01}, + {0xed, {0x03}, 0x01}, + {0xee, {0xcb}, 0x01}, + {0xef, {0x00}, 0x01}, + {0xf0, {0xbb}, 0x01}, + {0xf1, {0x00}, 0x01}, + {0xf2, {0xc0}, 0x01}, + {0xf3, {0x00}, 0x01}, + {0xf4, {0xcc}, 0x01}, + {0xf5, {0x00}, 0x01}, + {0xf6, {0xd6}, 0x01}, + {0xf7, {0x00}, 0x01}, + {0xf8, {0xe1}, 0x01}, + {0xf9, {0x00}, 0x01}, + {0xfa, {0xea}, 0x01}, /* Select CMD2 Page2 (Undocumented) */ - {0xff, 0x02}, + {0xff, {0x02}, 0x01}, /* Reload CMD1: Don't reload default value to register */ - {0xfb, 0x01}, - {0x00, 0x00}, - {0x01, 0xf4}, - {0x02, 0x00}, - {0x03, 0xef}, - {0x04, 0x01}, - {0x05, 0x07}, - {0x06, 0x01}, - {0x07, 0x28}, - {0x08, 0x01}, - {0x09, 0x44}, - {0x0a, 0x01}, - {0x0b, 0x76}, - {0x0c, 0x01}, - {0x0d, 0xa0}, - {0x0e, 0x01}, - {0x0f, 0xe7}, - {0x10, 0x02}, - {0x11, 0x1f}, - {0x12, 0x02}, - {0x13, 0x22}, - {0x14, 0x02}, - {0x15, 0x54}, - {0x16, 0x02}, - {0x17, 0x8b}, - {0x18, 0x02}, - {0x19, 0xaf}, - {0x1a, 0x02}, - {0x1b, 0xe0}, - {0x1c, 0x03}, - {0x1d, 0x01}, - {0x1e, 0x03}, - {0x1f, 0x2d}, - {0x20, 0x03}, - {0x21, 0x39}, - {0x22, 0x03}, - {0x23, 0x47}, - {0x24, 0x03}, - {0x25, 0x57}, - {0x26, 0x03}, - {0x27, 0x65}, - {0x28, 0x03}, - {0x29, 0x77}, - {0x2a, 0x03}, - {0x2b, 0x85}, - {0x2d, 0x03}, - {0x2f, 0x8f}, - {0x30, 0x03}, - {0x31, 0xcb}, - {0x32, 0x00}, - {0x33, 0xbb}, - {0x34, 0x00}, - {0x35, 0xc0}, - {0x36, 0x00}, - {0x37, 0xcc}, - {0x38, 0x00}, - {0x39, 0xd6}, - {0x3a, 0x00}, - {0x3b, 0xe1}, - {0x3d, 0x00}, - {0x3f, 0xea}, - {0x40, 0x00}, - {0x41, 0xf4}, - {0x42, 0x00}, - {0x43, 0xfe}, - {0x44, 0x01}, - {0x45, 0x07}, - {0x46, 0x01}, - {0x47, 0x28}, - {0x48, 0x01}, - {0x49, 0x44}, - {0x4a, 0x01}, - {0x4b, 0x76}, - {0x4c, 0x01}, - {0x4d, 0xa0}, - {0x4e, 0x01}, - {0x4f, 0xe7}, - {0x50, 0x02}, - {0x51, 0x1f}, - {0x52, 0x02}, - {0x53, 0x22}, - {0x54, 0x02}, - {0x55, 0x54}, - {0x56, 0x02}, - {0x58, 0x8b}, - {0x59, 0x02}, - {0x5a, 0xaf}, - {0x5b, 0x02}, - {0x5c, 0xe0}, - {0x5d, 0x03}, - {0x5e, 0x01}, - {0x5f, 0x03}, - {0x60, 0x2d}, - {0x61, 0x03}, - {0x62, 0x39}, - {0x63, 0x03}, - {0x64, 0x47}, - {0x65, 0x03}, - {0x66, 0x57}, - {0x67, 0x03}, - {0x68, 0x65}, - {0x69, 0x03}, - {0x6a, 0x77}, - {0x6b, 0x03}, - {0x6c, 0x85}, - {0x6d, 0x03}, - {0x6e, 0x8f}, - {0x6f, 0x03}, - {0x70, 0xcb}, - {0x71, 0x00}, - {0x72, 0x00}, - {0x73, 0x00}, - {0x74, 0x21}, - {0x75, 0x00}, - {0x76, 0x4c}, - {0x77, 0x00}, - {0x78, 0x6b}, - {0x79, 0x00}, - {0x7a, 0x85}, - {0x7b, 0x00}, - {0x7c, 0x9a}, - {0x7d, 0x00}, - {0x7e, 0xad}, - {0x7f, 0x00}, - {0x80, 0xbe}, - {0x81, 0x00}, - {0x82, 0xcd}, - {0x83, 0x01}, - {0x84, 0x01}, - {0x85, 0x01}, - {0x86, 0x29}, - {0x87, 0x01}, - {0x88, 0x68}, - {0x89, 0x01}, - {0x8a, 0x98}, - {0x8b, 0x01}, - {0x8c, 0xe5}, - {0x8d, 0x02}, - {0x8e, 0x1e}, - {0x8f, 0x02}, - {0x90, 0x30}, - {0x91, 0x02}, - {0x92, 0x52}, - {0x93, 0x02}, - {0x94, 0x88}, - {0x95, 0x02}, - {0x96, 0xaa}, - {0x97, 0x02}, - {0x98, 0xd7}, - {0x99, 0x02}, - {0x9a, 0xf7}, - {0x9b, 0x03}, - {0x9c, 0x21}, - {0x9d, 0x03}, - {0x9e, 0x2e}, - {0x9f, 0x03}, - {0xa0, 0x3d}, - {0xa2, 0x03}, - {0xa3, 0x4c}, - {0xa4, 0x03}, - {0xa5, 0x5e}, - {0xa6, 0x03}, - {0xa7, 0x71}, - {0xa9, 0x03}, - {0xaa, 0x86}, - {0xab, 0x03}, - {0xac, 0x94}, - {0xad, 0x03}, - {0xae, 0xfa}, - {0xaf, 0x00}, - {0xb0, 0x00}, - {0xb1, 0x00}, - {0xb2, 0x21}, - {0xb3, 0x00}, - {0xb4, 0x4c}, - {0xb5, 0x00}, - {0xb6, 0x6b}, - {0xb7, 0x00}, - {0xb8, 0x85}, - {0xb9, 0x00}, - {0xba, 0x9a}, - {0xbb, 0x00}, - {0xbc, 0xad}, - {0xbd, 0x00}, - {0xbe, 0xbe}, - {0xbf, 0x00}, - {0xc0, 0xcd}, - {0xc1, 0x01}, - {0xc2, 0x01}, - {0xc3, 0x01}, - {0xc4, 0x29}, - {0xc5, 0x01}, - {0xc6, 0x68}, - {0xc7, 0x01}, - {0xc8, 0x98}, - {0xc9, 0x01}, - {0xca, 0xe5}, - {0xcb, 0x02}, - {0xcc, 0x1e}, - {0xcd, 0x02}, - {0xce, 0x20}, - {0xcf, 0x02}, - {0xd0, 0x52}, - {0xd1, 0x02}, - {0xd2, 0x88}, - {0xd3, 0x02}, - {0xd4, 0xaa}, - {0xd5, 0x02}, - {0xd6, 0xd7}, - {0xd7, 0x02}, - {0xd8, 0xf7}, - {0xd9, 0x03}, - {0xda, 0x21}, - {0xdb, 0x03}, - {0xdc, 0x2e}, - {0xdd, 0x03}, - {0xde, 0x3d}, - {0xdf, 0x03}, - {0xe0, 0x4c}, - {0xe1, 0x03}, - {0xe2, 0x5e}, - {0xe3, 0x03}, - {0xe4, 0x71}, - {0xe5, 0x03}, - {0xe6, 0x86}, - {0xe7, 0x03}, - {0xe8, 0x94}, - {0xe9, 0x03}, - {0xea, 0xfa}, + {0xfb, {0x01}, 0x01}, + {0x00, {0x00}, 0x01}, + {0x01, {0xf4}, 0x01}, + {0x02, {0x00}, 0x01}, + {0x03, {0xef}, 0x01}, + {0x04, {0x01}, 0x01}, + {0x05, {0x07}, 0x01}, + {0x06, {0x01}, 0x01}, + {0x07, {0x28}, 0x01}, + {0x08, {0x01}, 0x01}, + {0x09, {0x44}, 0x01}, + {0x0a, {0x01}, 0x01}, + {0x0b, {0x76}, 0x01}, + {0x0c, {0x01}, 0x01}, + {0x0d, {0xa0}, 0x01}, + {0x0e, {0x01}, 0x01}, + {0x0f, {0xe7}, 0x01}, + {0x10, {0x02}, 0x01}, + {0x11, {0x1f}, 0x01}, + {0x12, {0x02}, 0x01}, + {0x13, {0x22}, 0x01}, + {0x14, {0x02}, 0x01}, + {0x15, {0x54}, 0x01}, + {0x16, {0x02}, 0x01}, + {0x17, {0x8b}, 0x01}, + {0x18, {0x02}, 0x01}, + {0x19, {0xaf}, 0x01}, + {0x1a, {0x02}, 0x01}, + {0x1b, {0xe0}, 0x01}, + {0x1c, {0x03}, 0x01}, + {0x1d, {0x01}, 0x01}, + {0x1e, {0x03}, 0x01}, + {0x1f, {0x2d}, 0x01}, + {0x20, {0x03}, 0x01}, + {0x21, {0x39}, 0x01}, + {0x22, {0x03}, 0x01}, + {0x23, {0x47}, 0x01}, + {0x24, {0x03}, 0x01}, + {0x25, {0x57}, 0x01}, + {0x26, {0x03}, 0x01}, + {0x27, {0x65}, 0x01}, + {0x28, {0x03}, 0x01}, + {0x29, {0x77}, 0x01}, + {0x2a, {0x03}, 0x01}, + {0x2b, {0x85}, 0x01}, + {0x2d, {0x03}, 0x01}, + {0x2f, {0x8f}, 0x01}, + {0x30, {0x03}, 0x01}, + {0x31, {0xcb}, 0x01}, + {0x32, {0x00}, 0x01}, + {0x33, {0xbb}, 0x01}, + {0x34, {0x00}, 0x01}, + {0x35, {0xc0}, 0x01}, + {0x36, {0x00}, 0x01}, + {0x37, {0xcc}, 0x01}, + {0x38, {0x00}, 0x01}, + {0x39, {0xd6}, 0x01}, + {0x3a, {0x00}, 0x01}, + {0x3b, {0xe1}, 0x01}, + {0x3d, {0x00}, 0x01}, + {0x3f, {0xea}, 0x01}, + {0x40, {0x00}, 0x01}, + {0x41, {0xf4}, 0x01}, + {0x42, {0x00}, 0x01}, + {0x43, {0xfe}, 0x01}, + {0x44, {0x01}, 0x01}, + {0x45, {0x07}, 0x01}, + {0x46, {0x01}, 0x01}, + {0x47, {0x28}, 0x01}, + {0x48, {0x01}, 0x01}, + {0x49, {0x44}, 0x01}, + {0x4a, {0x01}, 0x01}, + {0x4b, {0x76}, 0x01}, + {0x4c, {0x01}, 0x01}, + {0x4d, {0xa0}, 0x01}, + {0x4e, {0x01}, 0x01}, + {0x4f, {0xe7}, 0x01}, + {0x50, {0x02}, 0x01}, + {0x51, {0x1f}, 0x01}, + {0x52, {0x02}, 0x01}, + {0x53, {0x22}, 0x01}, + {0x54, {0x02}, 0x01}, + {0x55, {0x54}, 0x01}, + {0x56, {0x02}, 0x01}, + {0x58, {0x8b}, 0x01}, + {0x59, {0x02}, 0x01}, + {0x5a, {0xaf}, 0x01}, + {0x5b, {0x02}, 0x01}, + {0x5c, {0xe0}, 0x01}, + {0x5d, {0x03}, 0x01}, + {0x5e, {0x01}, 0x01}, + {0x5f, {0x03}, 0x01}, + {0x60, {0x2d}, 0x01}, + {0x61, {0x03}, 0x01}, + {0x62, {0x39}, 0x01}, + {0x63, {0x03}, 0x01}, + {0x64, {0x47}, 0x01}, + {0x65, {0x03}, 0x01}, + {0x66, {0x57}, 0x01}, + {0x67, {0x03}, 0x01}, + {0x68, {0x65}, 0x01}, + {0x69, {0x03}, 0x01}, + {0x6a, {0x77}, 0x01}, + {0x6b, {0x03}, 0x01}, + {0x6c, {0x85}, 0x01}, + {0x6d, {0x03}, 0x01}, + {0x6e, {0x8f}, 0x01}, + {0x6f, {0x03}, 0x01}, + {0x70, {0xcb}, 0x01}, + {0x71, {0x00}, 0x01}, + {0x72, {0x00}, 0x01}, + {0x73, {0x00}, 0x01}, + {0x74, {0x21}, 0x01}, + {0x75, {0x00}, 0x01}, + {0x76, {0x4c}, 0x01}, + {0x77, {0x00}, 0x01}, + {0x78, {0x6b}, 0x01}, + {0x79, {0x00}, 0x01}, + {0x7a, {0x85}, 0x01}, + {0x7b, {0x00}, 0x01}, + {0x7c, {0x9a}, 0x01}, + {0x7d, {0x00}, 0x01}, + {0x7e, {0xad}, 0x01}, + {0x7f, {0x00}, 0x01}, + {0x80, {0xbe}, 0x01}, + {0x81, {0x00}, 0x01}, + {0x82, {0xcd}, 0x01}, + {0x83, {0x01}, 0x01}, + {0x84, {0x01}, 0x01}, + {0x85, {0x01}, 0x01}, + {0x86, {0x29}, 0x01}, + {0x87, {0x01}, 0x01}, + {0x88, {0x68}, 0x01}, + {0x89, {0x01}, 0x01}, + {0x8a, {0x98}, 0x01}, + {0x8b, {0x01}, 0x01}, + {0x8c, {0xe5}, 0x01}, + {0x8d, {0x02}, 0x01}, + {0x8e, {0x1e}, 0x01}, + {0x8f, {0x02}, 0x01}, + {0x90, {0x30}, 0x01}, + {0x91, {0x02}, 0x01}, + {0x92, {0x52}, 0x01}, + {0x93, {0x02}, 0x01}, + {0x94, {0x88}, 0x01}, + {0x95, {0x02}, 0x01}, + {0x96, {0xaa}, 0x01}, + {0x97, {0x02}, 0x01}, + {0x98, {0xd7}, 0x01}, + {0x99, {0x02}, 0x01}, + {0x9a, {0xf7}, 0x01}, + {0x9b, {0x03}, 0x01}, + {0x9c, {0x21}, 0x01}, + {0x9d, {0x03}, 0x01}, + {0x9e, {0x2e}, 0x01}, + {0x9f, {0x03}, 0x01}, + {0xa0, {0x3d}, 0x01}, + {0xa2, {0x03}, 0x01}, + {0xa3, {0x4c}, 0x01}, + {0xa4, {0x03}, 0x01}, + {0xa5, {0x5e}, 0x01}, + {0xa6, {0x03}, 0x01}, + {0xa7, {0x71}, 0x01}, + {0xa9, {0x03}, 0x01}, + {0xaa, {0x86}, 0x01}, + {0xab, {0x03}, 0x01}, + {0xac, {0x94}, 0x01}, + {0xad, {0x03}, 0x01}, + {0xae, {0xfa}, 0x01}, + {0xaf, {0x00}, 0x01}, + {0xb0, {0x00}, 0x01}, + {0xb1, {0x00}, 0x01}, + {0xb2, {0x21}, 0x01}, + {0xb3, {0x00}, 0x01}, + {0xb4, {0x4c}, 0x01}, + {0xb5, {0x00}, 0x01}, + {0xb6, {0x6b}, 0x01}, + {0xb7, {0x00}, 0x01}, + {0xb8, {0x85}, 0x01}, + {0xb9, {0x00}, 0x01}, + {0xba, {0x9a}, 0x01}, + {0xbb, {0x00}, 0x01}, + {0xbc, {0xad}, 0x01}, + {0xbd, {0x00}, 0x01}, + {0xbe, {0xbe}, 0x01}, + {0xbf, {0x00}, 0x01}, + {0xc0, {0xcd}, 0x01}, + {0xc1, {0x01}, 0x01}, + {0xc2, {0x01}, 0x01}, + {0xc3, {0x01}, 0x01}, + {0xc4, {0x29}, 0x01}, + {0xc5, {0x01}, 0x01}, + {0xc6, {0x68}, 0x01}, + {0xc7, {0x01}, 0x01}, + {0xc8, {0x98}, 0x01}, + {0xc9, {0x01}, 0x01}, + {0xca, {0xe5}, 0x01}, + {0xcb, {0x02}, 0x01}, + {0xcc, {0x1e}, 0x01}, + {0xcd, {0x02}, 0x01}, + {0xce, {0x20}, 0x01}, + {0xcf, {0x02}, 0x01}, + {0xd0, {0x52}, 0x01}, + {0xd1, {0x02}, 0x01}, + {0xd2, {0x88}, 0x01}, + {0xd3, {0x02}, 0x01}, + {0xd4, {0xaa}, 0x01}, + {0xd5, {0x02}, 0x01}, + {0xd6, {0xd7}, 0x01}, + {0xd7, {0x02}, 0x01}, + {0xd8, {0xf7}, 0x01}, + {0xd9, {0x03}, 0x01}, + {0xda, {0x21}, 0x01}, + {0xdb, {0x03}, 0x01}, + {0xdc, {0x2e}, 0x01}, + {0xdd, {0x03}, 0x01}, + {0xde, {0x3d}, 0x01}, + {0xdf, {0x03}, 0x01}, + {0xe0, {0x4c}, 0x01}, + {0xe1, {0x03}, 0x01}, + {0xe2, {0x5e}, 0x01}, + {0xe3, {0x03}, 0x01}, + {0xe4, {0x71}, 0x01}, + {0xe5, {0x03}, 0x01}, + {0xe6, {0x86}, 0x01}, + {0xe7, {0x03}, 0x01}, + {0xe8, {0x94}, 0x01}, + {0xe9, {0x03}, 0x01}, + {0xea, {0xfa}, 0x01}, /* Select CMD2 Page0 (Undocumented) */ - {0xff, 0x01}, + {0xff, {0x01}, 0x01}, /* Reload CMD1: Don't reload default value to register */ - {0xfb, 0x01}, + {0xfb, {0x01}, 0x01}, /* Select CMD2 Page1 (Undocumented) */ - {0xff, 0x02}, + {0xff, {0x02}, 0x01}, /* Reload CMD1: Don't reload default value to register */ - {0xfb, 0x01}, + {0xfb, {0x01}, 0x01}, /* Select CMD2 Page3 (Undocumented) */ - {0xff, 0x04}, + {0xff, {0x04}, 0x01}, /* Reload CMD1: Don't reload default value to register */ - {0xfb, 0x01}, + {0xfb, {0x01}, 0x01}, /* Select CMD1 */ - {0xff, 0x00}, - {0xd3, 0x22}, /* RGBMIPICTRL: VSYNC back porch = 34 */ - {0xd4, 0x04}, /* RGBMIPICTRL: VSYNC front porch = 4 */ + {0xff, {0x00}, 0x01}, + {0xd3, {0x22}, 0x01}, /* RGBMIPICTRL: VSYNC back porch = 34 */ + {0xd4, {0x04}, 0x01}, /* RGBMIPICTRL: VSYNC front porch = 4 */ +}; + +struct khadas_ts050_panel_data ts050_panel_data = { + .init_code = (struct khadas_ts050_panel_cmd *)ts050_init_code, + .len = ARRAY_SIZE(ts050_init_code) +}; + +struct khadas_ts050_panel_data ts050v2_panel_data = { + .init_code = (struct khadas_ts050_panel_cmd *)ts050v2_init_code, + .len = ARRAY_SIZE(ts050v2_init_code) }; static inline @@ -613,10 +671,11 @@ static int khadas_ts050_panel_prepare(struct drm_panel *panel) msleep(100); - for (i = 0; i < ARRAY_SIZE(init_code); i++) { + for (i = 0; i < khadas_ts050->panel_data->len; i++) { err = mipi_dsi_dcs_write(khadas_ts050->link, - init_code[i].cmd, - &init_code[i].data, 1); + khadas_ts050->panel_data->init_code[i].cmd, + &khadas_ts050->panel_data->init_code[i].data, + khadas_ts050->panel_data->init_code[i].size); if (err < 0) { dev_err(panel->dev, "failed write cmds: %d\n", err); goto poweroff; @@ -762,7 +821,8 @@ static const struct drm_panel_funcs khadas_ts050_panel_funcs = { }; static const struct of_device_id khadas_ts050_of_match[] = { - { .compatible = "khadas,ts050", }, + { .compatible = "khadas,ts050", .data = &ts050_panel_data, }, + { .compatible = "khadas,ts050v2", .data = &ts050v2_panel_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, khadas_ts050_of_match); @@ -806,6 +866,13 @@ static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi) struct khadas_ts050_panel *khadas_ts050; int err; + const void *data = of_device_get_match_data(&dsi->dev); + + if (!data) { + dev_err(&dsi->dev, "No matching data\n"); + return -ENODEV; + } + dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | @@ -816,6 +883,7 @@ static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi) if (!khadas_ts050) return -ENOMEM; + khadas_ts050->panel_data = (struct khadas_ts050_panel_data *)data; mipi_dsi_set_drvdata(dsi, khadas_ts050); khadas_ts050->link = dsi; diff --git a/drivers/gpu/drm/panel/panel-lg-sw43408.c b/drivers/gpu/drm/panel/panel-lg-sw43408.c new file mode 100644 index 0000000000..67a98ac508 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-lg-sw43408.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019-2024 Linaro Ltd + * Author: Sumit Semwal + * Dmitry Baryshkov + */ + +#include +#include +#include +#include +#include +#include + +#include